19
19
from run_eval_search import run_search
20
20
from transformers import AutoConfig , AutoModelForSeq2SeqLM
21
21
from transformers .hf_api import HfApi
22
- from transformers .testing_utils import (
23
- CaptureStderr ,
24
- CaptureStdout ,
25
- TestCasePlus ,
26
- require_torch_gpu ,
27
- require_torch_non_multi_gpu_but_fix_me ,
28
- slow ,
29
- )
22
+ from transformers .testing_utils import CaptureStderr , CaptureStdout , TestCasePlus , require_torch_gpu , slow
30
23
from utils import ROUGE_KEYS , label_smoothed_nll_loss , lmap , load_json
31
24
32
25
@@ -135,7 +128,6 @@ def setUpClass(cls):
135
128
136
129
@slow
137
130
@require_torch_gpu
138
- @require_torch_non_multi_gpu_but_fix_me
139
131
def test_hub_configs (self ):
140
132
"""I put require_torch_gpu cause I only want this to run with self-scheduled."""
141
133
@@ -153,12 +145,10 @@ def test_hub_configs(self):
153
145
failures .append (m )
154
146
assert not failures , f"The following models could not be loaded through AutoConfig: { failures } "
155
147
156
- @require_torch_non_multi_gpu_but_fix_me
157
148
def test_distill_no_teacher (self ):
158
149
updates = dict (student_encoder_layers = 2 , student_decoder_layers = 1 , no_teacher = True )
159
150
self ._test_distiller_cli (updates )
160
151
161
- @require_torch_non_multi_gpu_but_fix_me
162
152
def test_distill_checkpointing_with_teacher (self ):
163
153
updates = dict (
164
154
student_encoder_layers = 2 ,
@@ -183,7 +173,6 @@ def test_distill_checkpointing_with_teacher(self):
183
173
convert_pl_to_hf (ckpts [0 ], transformer_ckpts [0 ].parent , out_path_new )
184
174
assert os .path .exists (os .path .join (out_path_new , "pytorch_model.bin" ))
185
175
186
- @require_torch_non_multi_gpu_but_fix_me
187
176
def test_loss_fn (self ):
188
177
model = AutoModelForSeq2SeqLM .from_pretrained (BART_TINY )
189
178
input_ids , mask = model .dummy_inputs ["input_ids" ], model .dummy_inputs ["attention_mask" ]
@@ -204,7 +193,6 @@ def test_loss_fn(self):
204
193
# TODO: understand why this breaks
205
194
self .assertEqual (nll_loss , model_computed_loss )
206
195
207
- @require_torch_non_multi_gpu_but_fix_me
208
196
def test_distill_mbart (self ):
209
197
updates = dict (
210
198
student_encoder_layers = 2 ,
@@ -229,7 +217,6 @@ def test_distill_mbart(self):
229
217
assert len (all_files ) > 2
230
218
self .assertEqual (len (transformer_ckpts ), 2 )
231
219
232
- @require_torch_non_multi_gpu_but_fix_me
233
220
def test_distill_t5 (self ):
234
221
updates = dict (
235
222
student_encoder_layers = 1 ,
@@ -241,7 +228,6 @@ def test_distill_t5(self):
241
228
)
242
229
self ._test_distiller_cli (updates )
243
230
244
- @require_torch_non_multi_gpu_but_fix_me
245
231
def test_distill_different_base_models (self ):
246
232
updates = dict (
247
233
teacher = T5_TINY ,
@@ -321,21 +307,18 @@ def run_eval_tester(self, model):
321
307
322
308
# test one model to quickly (no-@slow) catch simple problems and do an
323
309
# extensive testing of functionality with multiple models as @slow separately
324
- @require_torch_non_multi_gpu_but_fix_me
325
310
def test_run_eval (self ):
326
311
self .run_eval_tester (T5_TINY )
327
312
328
313
# any extra models should go into the list here - can be slow
329
314
@parameterized .expand ([BART_TINY , MBART_TINY ])
330
315
@slow
331
- @require_torch_non_multi_gpu_but_fix_me
332
316
def test_run_eval_slow (self , model ):
333
317
self .run_eval_tester (model )
334
318
335
319
# testing with 2 models to validate: 1. translation (t5) 2. summarization (mbart)
336
320
@parameterized .expand ([T5_TINY , MBART_TINY ])
337
321
@slow
338
- @require_torch_non_multi_gpu_but_fix_me
339
322
def test_run_eval_search (self , model ):
340
323
input_file_name = Path (self .get_auto_remove_tmp_dir ()) / "utest_input.source"
341
324
output_file_name = input_file_name .parent / "utest_output.txt"
@@ -386,7 +369,6 @@ def test_run_eval_search(self, model):
386
369
@parameterized .expand (
387
370
[T5_TINY , BART_TINY , MBART_TINY , MARIAN_TINY , FSMT_TINY ],
388
371
)
389
- @require_torch_non_multi_gpu_but_fix_me
390
372
def test_finetune (self , model ):
391
373
args_d : dict = CHEAP_ARGS .copy ()
392
374
task = "translation" if model in [MBART_TINY , MARIAN_TINY , FSMT_TINY ] else "summarization"
@@ -438,7 +420,6 @@ def test_finetune(self, model):
438
420
assert isinstance (example_batch , dict )
439
421
assert len (example_batch ) >= 4
440
422
441
- @require_torch_non_multi_gpu_but_fix_me
442
423
def test_finetune_extra_model_args (self ):
443
424
args_d : dict = CHEAP_ARGS .copy ()
444
425
@@ -489,7 +470,6 @@ def test_finetune_extra_model_args(self):
489
470
model = main (args )
490
471
assert str (excinfo .value ) == f"model config doesn't have a `{ unsupported_param } ` attribute"
491
472
492
- @require_torch_non_multi_gpu_but_fix_me
493
473
def test_finetune_lr_schedulers (self ):
494
474
args_d : dict = CHEAP_ARGS .copy ()
495
475
0 commit comments