|
6 | 6 |
|
7 | 7 | import unittest |
8 | 8 |
|
| 9 | +import torch |
| 10 | +from parameterized import parameterized |
| 11 | + |
9 | 12 | from torchao.testing.model_architectures import create_model_and_input_data |
| 13 | +from torchao.utils import get_available_devices |
10 | 14 |
|
11 | 15 |
|
12 | 16 | class TestModels(unittest.TestCase): |
13 | | - def test_toy_linear_model(self): |
14 | | - model, input_data = create_model_and_input_data("linear", 10, 64, 32) |
| 17 | + @parameterized.expand([(device,) for device in get_available_devices()]) |
| 18 | + def test_toy_linear_model(self, device): |
| 19 | + # Skip if device is not available |
| 20 | + if device == "cuda" and not torch.cuda.is_available(): |
| 21 | + self.skipTest("CUDA not available") |
| 22 | + |
| 23 | + model, input_data = create_model_and_input_data( |
| 24 | + "linear", 10, 64, 32, device=device |
| 25 | + ) |
15 | 26 | output = model(input_data) |
16 | 27 | self.assertEqual(output.shape, (10, 32)) |
17 | 28 |
|
18 | | - def test_ln_linear_activation_model(self): |
19 | | - model, input_data = create_model_and_input_data("ln_linear_sigmoid", 10, 64, 32) |
| 29 | + @parameterized.expand([(device,) for device in get_available_devices()]) |
| 30 | + def test_ln_linear_activation_model(self, device): |
| 31 | + # Skip if device is not available |
| 32 | + if device == "cuda" and not torch.cuda.is_available(): |
| 33 | + self.skipTest("CUDA not available") |
| 34 | + |
| 35 | + model, input_data = create_model_and_input_data( |
| 36 | + "ln_linear_sigmoid", 10, 64, 32, device=device |
| 37 | + ) |
20 | 38 | output = model(input_data) |
21 | 39 | self.assertEqual(output.shape, (10, 32)) |
22 | 40 |
|
23 | | - def test_transformer_block(self): |
24 | | - model, input_data = create_model_and_input_data("transformer_block", 10, 64, 32) |
| 41 | + @parameterized.expand([(device,) for device in get_available_devices()]) |
| 42 | + def test_transformer_block(self, device): |
| 43 | + # Skip if device is not available |
| 44 | + if device == "cuda" and not torch.cuda.is_available(): |
| 45 | + self.skipTest("CUDA not available") |
| 46 | + |
| 47 | + model, input_data = create_model_and_input_data( |
| 48 | + "transformer_block", 10, 64, 32, device=device |
| 49 | + ) |
25 | 50 | output = model(input_data) |
26 | 51 | self.assertEqual(output.shape, (10, 16, 64)) |
27 | 52 |
|
|
0 commit comments