Skip to content

Commit

Permalink
Make test_lr_scheduler CPU/GPU agnostic and also fix assertion (pytor…
Browse files Browse the repository at this point in the history
…ch#268)

Summary:
Pull Request resolved: pytorch#268

The assertion should be `assertEqual` not `assertTrue`

Reviewed By: ananthsub

Differential Revision: D41049628

fbshipit-source-id: 8f977d6b16a2f41ea1c8b76a1d1b7f5cf39bacfa
  • Loading branch information
daniellepintz authored and facebook-github-bot committed Nov 4, 2022
1 parent cfa36f9 commit d088a4e
Showing 1 changed file with 6 additions and 10 deletions.
16 changes: 6 additions & 10 deletions tests/runner/test_auto_unit.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,14 +58,12 @@ def test_app_state_mixin(self) -> None:
for key in ("module", "optimizer", "lr_scheduler", "grad_scaler"):
self.assertTrue(key in auto_train_unit.app_state())

@unittest.skipUnless(
condition=(not cuda_available), reason="This test shouldn't run on a GPU host."
)
def test_lr_scheduler_step(self) -> None:
"""
Test that the lr scheduler is stepped every optimizer step when step_lr_interval="step"
"""
my_module = torch.nn.Linear(2, 2)
device = init_from_env()
my_module = torch.nn.Linear(2, 2, device=device)
my_optimizer = torch.optim.SGD(my_module.parameters(), lr=0.01)
my_lr_scheduler = MagicMock()
auto_train_unit = DummyAutoTrainUnit(
Expand All @@ -84,16 +82,14 @@ def test_lr_scheduler_step(self) -> None:
train_dl = generate_random_dataloader(dataset_len, input_dim, batch_size)
state = init_train_state(dataloader=train_dl, max_epochs=max_epochs)
train(state, auto_train_unit)
self.assertTrue(my_lr_scheduler.step.call_count, expected_steps_per_epoch)
self.assertEqual(my_lr_scheduler.step.call_count, expected_steps_per_epoch)

@unittest.skipUnless(
condition=(not cuda_available), reason="This test shouldn't run on a GPU host."
)
def test_lr_scheduler_epoch(self) -> None:
"""
Test that the lr scheduler is stepped every epoch when step_lr_interval="epoch"
"""
my_module = torch.nn.Linear(2, 2)
device = init_from_env()
my_module = torch.nn.Linear(2, 2, device=device)
my_optimizer = torch.optim.SGD(my_module.parameters(), lr=0.01)
my_lr_scheduler = MagicMock()
auto_train_unit = DummyAutoTrainUnit(
Expand All @@ -112,7 +108,7 @@ def test_lr_scheduler_epoch(self) -> None:

state = init_train_state(dataloader=train_dl, max_epochs=max_epochs)
train(state, auto_train_unit)
self.assertTrue(my_lr_scheduler.step.call_count, max_epochs)
self.assertEqual(my_lr_scheduler.step.call_count, max_epochs)

@unittest.skipUnless(
condition=cuda_available, reason="This test needs a GPU host to run."
Expand Down

0 comments on commit d088a4e

Please sign in to comment.