Skip to content

Commit 52503f2

Browse files
committed
Lint
1 parent 0b4c279 commit 52503f2

File tree

3 files changed

+3
-9
lines changed

3 files changed

+3
-9
lines changed

src/transformers/trainer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2198,8 +2198,8 @@ def torchdynamo_smart_context_manager(self):
21982198
ctx_manager = torchdynamo.optimize("eager")
21992199
elif self.args.torchdynamo == "nvfuser":
22002200
ctx_manager = torchdynamo.optimize(aot_autograd_speedup_strategy)
2201-
else:
2202-
ctx_manager = contextlib.nullcontext()
2201+
elif self.args.torchdynamo is not None:
2202+
raise ValueError("torchdynamo training arg can be eager/nvfuser")
22032203
return ctx_manager
22042204

22052205
def autocast_smart_context_manager(self):

src/transformers/utils/import_utils.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -377,12 +377,7 @@ def is_torch_tpu_available():
377377

378378

379379
def is_torchdynamo_available():
380-
try:
381-
import torchdynamo
382-
383-
return True
384-
except ImportError:
385-
return False
380+
return importlib.util.find_spec("torchdynamo") is not None
386381

387382

388383
def is_datasets_available():

tests/trainer/test_trainer.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1598,7 +1598,6 @@ def test_fp16_full_eval(self):
15981598
@require_torch_gpu
15991599
@require_torchdynamo
16001600
def test_torchdynamo_full_eval(self):
1601-
debug = 0
16021601
n_gpus = get_gpu_count()
16031602

16041603
bs = 8

0 commit comments

Comments
 (0)