Skip to content

Commit

Permalink
remove extra code
Browse files Browse the repository at this point in the history
  • Loading branch information
Rhui Dih Lee authored and Rhui Dih Lee committed Jul 19, 2024
1 parent 808fd63 commit 7573f85
Show file tree
Hide file tree
Showing 7 changed files with 1 addition and 43 deletions.
7 changes: 0 additions & 7 deletions tests/models/falcon/test_modeling_falcon.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,8 @@
from transformers.testing_utils import (
is_flaky,
require_bitsandbytes,
require_flash_attn,
require_torch,
require_torch_sdpa,
require_torch_gpu,
slow,
torch_device,
)
Expand Down Expand Up @@ -583,11 +581,6 @@ def test_eager_matches_sdpa_generate(self):

self.assertTrue(torch.allclose(res_eager, res_sdpa))

@require_flash_attn
@require_torch_gpu
@slow
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
super().test_flash_attention_2_padding_matches_padding_free_with_position_ids()

@require_torch
class FalconLanguageGenerationTest(unittest.TestCase):
Expand Down
6 changes: 0 additions & 6 deletions tests/models/gemma/test_modeling_gemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,12 +526,6 @@ def test_flash_attn_2_equivalence(self):
assert torch.allclose(logits_fa, logits, atol=3e-3)


@require_flash_attn
@require_torch_gpu
@slow
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
super().test_flash_attention_2_padding_matches_padding_free_with_position_ids()

@slow
@require_torch_gpu
class GemmaIntegrationTest(unittest.TestCase):
Expand Down
6 changes: 0 additions & 6 deletions tests/models/llama/test_modeling_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -555,12 +555,6 @@ def test_use_flash_attention_2_true(self):
if not has_flash:
raise ValueError("The flash model should have flash attention layers")

@require_flash_attn
@require_torch_gpu
@slow
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
super().test_flash_attention_2_padding_matches_padding_free_with_position_ids()

@require_torch_sdpa
@slow
def test_eager_matches_sdpa_generate(self):
Expand Down
7 changes: 1 addition & 6 deletions tests/models/mistral/test_modeling_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,12 +490,7 @@ def test_flash_attn_2_generate_use_cache(self):
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="Mistral flash attention does not support right padding")

@require_flash_attn
@require_torch_gpu
@slow
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
super().test_flash_attention_2_padding_matches_padding_free_with_position_ids()


@require_torch_gpu
class MistralIntegrationTest(unittest.TestCase):
Expand Down
6 changes: 0 additions & 6 deletions tests/models/mixtral/test_modeling_mixtral.py
Original file line number Diff line number Diff line change
Expand Up @@ -492,12 +492,6 @@ def test_flash_attn_2_generate_use_cache(self):
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="Mixtral flash attention does not support right padding")

@require_flash_attn
@require_torch_gpu
@slow
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
super().test_flash_attention_2_padding_matches_padding_free_with_position_ids()

# Ignore copy
def test_load_balancing_loss(self):
r"""
Expand Down
6 changes: 0 additions & 6 deletions tests/models/phi/test_modeling_phi.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,12 +495,6 @@ def test_flash_attn_2_generate_padding_right(self):
self.assertListEqual(output_native, output_fa_2)


@require_flash_attn
@require_torch_gpu
@slow
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
super().test_flash_attention_2_padding_matches_padding_free_with_position_ids()

@slow
@require_torch
class PhiIntegrationTest(unittest.TestCase):
Expand Down
6 changes: 0 additions & 6 deletions tests/models/starcoder2/test_modeling_starcoder2.py
Original file line number Diff line number Diff line change
Expand Up @@ -484,12 +484,6 @@ def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="Starcoder2 flash attention does not support right padding")


@require_flash_attn
@require_torch_gpu
@slow
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
super().test_flash_attention_2_padding_matches_padding_free_with_position_ids()

@slow
@require_torch_gpu
class Starcoder2IntegrationTest(unittest.TestCase):
Expand Down

0 comments on commit 7573f85

Please sign in to comment.