diff --git a/tests/helpers.py b/tests/helpers.py index 31147262..d34ad41e 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -174,8 +174,8 @@ def assert_all_close( from torch.testing._comparison import get_tolerances rtol, atol = get_tolerances(actual, expected, rtol=rtol, atol=atol) - rtol *= 10 * NUM_UPDATES - atol *= 10 * NUM_UPDATES + rtol *= 4 * NUM_UPDATES + atol *= 4 * NUM_UPDATES torch.testing.assert_close( actual, diff --git a/tests/test_alias.py b/tests/test_alias.py index b75202a5..4a0b8473 100644 --- a/tests/test_alias.py +++ b/tests/test_alias.py @@ -88,7 +88,7 @@ def test_sgd( @helpers.parametrize( - dtype=[torch.float64, torch.float32], + dtype=[torch.float64], lr=[1e-2, 1e-3, 1e-4], betas=[(0.9, 0.999), (0.95, 0.9995)], eps=[1e-8], @@ -146,7 +146,7 @@ def test_adam( @helpers.parametrize( - dtype=[torch.float64, torch.float32], + dtype=[torch.float64], lr=[1e-2, 1e-3, 1e-4], betas=[(0.9, 0.999), (0.95, 0.9995)], eps=[1e-8], @@ -206,7 +206,7 @@ def test_adam_accelerated_cpu( @pytest.mark.skipif(not torch.cuda.is_available(), reason='No CUDA device available.') @helpers.parametrize( - dtype=[torch.float64, torch.float32], + dtype=[torch.float64], lr=[1e-2, 1e-3, 1e-4], betas=[(0.9, 0.999), (0.95, 0.9995)], eps=[1e-8], @@ -267,7 +267,7 @@ def test_adam_accelerated_cuda( @helpers.parametrize( - dtype=[torch.float64, torch.float32], + dtype=[torch.float64], lr=[1e-2, 1e-3, 1e-4], alpha=[0.9, 0.99], eps=[1e-8], diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index 34063a70..b2a99152 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -84,7 +84,7 @@ def test_SGD( @helpers.parametrize( - dtype=[torch.float64, torch.float32], + dtype=[torch.float64], lr=[1e-2, 1e-3, 1e-4], betas=[(0.9, 0.999), (0.95, 0.9995)], eps=[1e-8], @@ -139,7 +139,7 @@ def test_Adam( @helpers.parametrize( - dtype=[torch.float64, torch.float32], + dtype=[torch.float64], lr=[1e-2, 1e-3, 1e-4], betas=[(0.9, 0.999), (0.95, 0.9995)], eps=[1e-8], @@ -196,7 +196,7 @@ def test_Adam_accelerated_cpu( @pytest.mark.skipif(not torch.cuda.is_available(), reason='No CUDA device available.') @helpers.parametrize( - dtype=[torch.float64, torch.float32], + dtype=[torch.float64], lr=[1e-2, 1e-3, 1e-4], betas=[(0.9, 0.999), (0.95, 0.9995)], eps=[1e-8], @@ -254,7 +254,7 @@ def test_Adam_accelerated_cuda( @helpers.parametrize( - dtype=[torch.float64, torch.float32], + dtype=[torch.float64], lr=[1e-2, 1e-3, 1e-4], alpha=[0.9, 0.99], eps=[1e-8],