Skip to content

Commit f0435f5

Browse files
authored
these should run fine on multi-gpu (#8582)
1 parent 36a1991 commit f0435f5

File tree

3 files changed

+3
-33
lines changed

3 files changed

+3
-33
lines changed

examples/seq2seq/test_bash_script.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from finetune import SummarizationModule, main
1414
from transformers import MarianMTModel
1515
from transformers.file_utils import cached_path
16-
from transformers.testing_utils import TestCasePlus, require_torch_gpu, require_torch_non_multi_gpu_but_fix_me, slow
16+
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
1717
from utils import load_json
1818

1919

@@ -32,15 +32,13 @@ def setUp(self):
3232

3333
@slow
3434
@require_torch_gpu
35-
@require_torch_non_multi_gpu_but_fix_me
3635
def test_model_download(self):
3736
"""This warms up the cache so that we can time the next test without including download time, which varies between machines."""
3837
MarianMTModel.from_pretrained(MARIAN_MODEL)
3938

4039
# @timeout_decorator.timeout(1200)
4140
@slow
4241
@require_torch_gpu
43-
@require_torch_non_multi_gpu_but_fix_me
4442
def test_train_mbart_cc25_enro_script(self):
4543
env_vars_to_replace = {
4644
"$MAX_LEN": 64,
@@ -129,7 +127,6 @@ class TestDistilMarianNoTeacher(TestCasePlus):
129127
@timeout_decorator.timeout(600)
130128
@slow
131129
@require_torch_gpu
132-
@require_torch_non_multi_gpu_but_fix_me
133130
def test_opus_mt_distill_script(self):
134131
data_dir = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
135132
env_vars_to_replace = {

examples/seq2seq/test_fsmt_bleu_score.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,7 @@
1919

2020
from parameterized import parameterized
2121
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
22-
from transformers.testing_utils import (
23-
get_tests_dir,
24-
require_torch,
25-
require_torch_non_multi_gpu_but_fix_me,
26-
slow,
27-
torch_device,
28-
)
22+
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
2923
from utils import calculate_bleu
3024

3125

@@ -54,7 +48,6 @@ def get_model(self, mname):
5448
]
5549
)
5650
@slow
57-
@require_torch_non_multi_gpu_but_fix_me
5851
def test_bleu_scores(self, pair, min_bleu_score):
5952
# note: this test is not testing the best performance since it only evals a small batch
6053
# but it should be enough to detect a regression in the output quality

examples/seq2seq/test_seq2seq_examples.py

Lines changed: 1 addition & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,7 @@
1919
from run_eval_search import run_search
2020
from transformers import AutoConfig, AutoModelForSeq2SeqLM
2121
from transformers.hf_api import HfApi
22-
from transformers.testing_utils import (
23-
CaptureStderr,
24-
CaptureStdout,
25-
TestCasePlus,
26-
require_torch_gpu,
27-
require_torch_non_multi_gpu_but_fix_me,
28-
slow,
29-
)
22+
from transformers.testing_utils import CaptureStderr, CaptureStdout, TestCasePlus, require_torch_gpu, slow
3023
from utils import ROUGE_KEYS, label_smoothed_nll_loss, lmap, load_json
3124

3225

@@ -135,7 +128,6 @@ def setUpClass(cls):
135128

136129
@slow
137130
@require_torch_gpu
138-
@require_torch_non_multi_gpu_but_fix_me
139131
def test_hub_configs(self):
140132
"""I put require_torch_gpu cause I only want this to run with self-scheduled."""
141133

@@ -153,12 +145,10 @@ def test_hub_configs(self):
153145
failures.append(m)
154146
assert not failures, f"The following models could not be loaded through AutoConfig: {failures}"
155147

156-
@require_torch_non_multi_gpu_but_fix_me
157148
def test_distill_no_teacher(self):
158149
updates = dict(student_encoder_layers=2, student_decoder_layers=1, no_teacher=True)
159150
self._test_distiller_cli(updates)
160151

161-
@require_torch_non_multi_gpu_but_fix_me
162152
def test_distill_checkpointing_with_teacher(self):
163153
updates = dict(
164154
student_encoder_layers=2,
@@ -183,7 +173,6 @@ def test_distill_checkpointing_with_teacher(self):
183173
convert_pl_to_hf(ckpts[0], transformer_ckpts[0].parent, out_path_new)
184174
assert os.path.exists(os.path.join(out_path_new, "pytorch_model.bin"))
185175

186-
@require_torch_non_multi_gpu_but_fix_me
187176
def test_loss_fn(self):
188177
model = AutoModelForSeq2SeqLM.from_pretrained(BART_TINY)
189178
input_ids, mask = model.dummy_inputs["input_ids"], model.dummy_inputs["attention_mask"]
@@ -204,7 +193,6 @@ def test_loss_fn(self):
204193
# TODO: understand why this breaks
205194
self.assertEqual(nll_loss, model_computed_loss)
206195

207-
@require_torch_non_multi_gpu_but_fix_me
208196
def test_distill_mbart(self):
209197
updates = dict(
210198
student_encoder_layers=2,
@@ -229,7 +217,6 @@ def test_distill_mbart(self):
229217
assert len(all_files) > 2
230218
self.assertEqual(len(transformer_ckpts), 2)
231219

232-
@require_torch_non_multi_gpu_but_fix_me
233220
def test_distill_t5(self):
234221
updates = dict(
235222
student_encoder_layers=1,
@@ -241,7 +228,6 @@ def test_distill_t5(self):
241228
)
242229
self._test_distiller_cli(updates)
243230

244-
@require_torch_non_multi_gpu_but_fix_me
245231
def test_distill_different_base_models(self):
246232
updates = dict(
247233
teacher=T5_TINY,
@@ -321,21 +307,18 @@ def run_eval_tester(self, model):
321307

322308
# test one model to quickly (no-@slow) catch simple problems and do an
323309
# extensive testing of functionality with multiple models as @slow separately
324-
@require_torch_non_multi_gpu_but_fix_me
325310
def test_run_eval(self):
326311
self.run_eval_tester(T5_TINY)
327312

328313
# any extra models should go into the list here - can be slow
329314
@parameterized.expand([BART_TINY, MBART_TINY])
330315
@slow
331-
@require_torch_non_multi_gpu_but_fix_me
332316
def test_run_eval_slow(self, model):
333317
self.run_eval_tester(model)
334318

335319
# testing with 2 models to validate: 1. translation (t5) 2. summarization (mbart)
336320
@parameterized.expand([T5_TINY, MBART_TINY])
337321
@slow
338-
@require_torch_non_multi_gpu_but_fix_me
339322
def test_run_eval_search(self, model):
340323
input_file_name = Path(self.get_auto_remove_tmp_dir()) / "utest_input.source"
341324
output_file_name = input_file_name.parent / "utest_output.txt"
@@ -386,7 +369,6 @@ def test_run_eval_search(self, model):
386369
@parameterized.expand(
387370
[T5_TINY, BART_TINY, MBART_TINY, MARIAN_TINY, FSMT_TINY],
388371
)
389-
@require_torch_non_multi_gpu_but_fix_me
390372
def test_finetune(self, model):
391373
args_d: dict = CHEAP_ARGS.copy()
392374
task = "translation" if model in [MBART_TINY, MARIAN_TINY, FSMT_TINY] else "summarization"
@@ -438,7 +420,6 @@ def test_finetune(self, model):
438420
assert isinstance(example_batch, dict)
439421
assert len(example_batch) >= 4
440422

441-
@require_torch_non_multi_gpu_but_fix_me
442423
def test_finetune_extra_model_args(self):
443424
args_d: dict = CHEAP_ARGS.copy()
444425

@@ -489,7 +470,6 @@ def test_finetune_extra_model_args(self):
489470
model = main(args)
490471
assert str(excinfo.value) == f"model config doesn't have a `{unsupported_param}` attribute"
491472

492-
@require_torch_non_multi_gpu_but_fix_me
493473
def test_finetune_lr_schedulers(self):
494474
args_d: dict = CHEAP_ARGS.copy()
495475

0 commit comments

Comments
 (0)