diff --git a/tests/py/dynamo/lowering/test_aten_lowering_passes.py b/tests/py/dynamo/lowering/test_aten_lowering_passes.py index 2d7a4731f5..0dd9a8de1c 100644 --- a/tests/py/dynamo/lowering/test_aten_lowering_passes.py +++ b/tests/py/dynamo/lowering/test_aten_lowering_passes.py @@ -1,9 +1,11 @@ +import sys import unittest import torch -import torch_tensorrt from torch.testing._internal.common_utils import TestCase, run_tests +import torch_tensorrt + from ..testing_utilities import DECIMALS_OF_AGREEMENT, lower_graph_testing @@ -273,6 +275,10 @@ def forward(self, q, k, v): torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8, "GPU compute capability is too low to run flash attention, need Ampere (8.0) or greater", ) +@unittest.skipIf( + sys.platform.startswith("win"), + "Test not supported on Windows", +) class TestLowerFlashAttention(TestCase): def test_lower_flash_attention(self): class FlashAttention(torch.nn.Module): diff --git a/tests/py/dynamo/runtime/test_hw_compat.py b/tests/py/dynamo/runtime/test_hw_compat.py index fa87c9947c..6106c96512 100644 --- a/tests/py/dynamo/runtime/test_hw_compat.py +++ b/tests/py/dynamo/runtime/test_hw_compat.py @@ -2,9 +2,10 @@ import unittest import torch -import torch_tensorrt from torch.testing._internal.common_utils import TestCase, run_tests +import torch_tensorrt + class TestHardwareCompatibility(TestCase): @unittest.skipIf( @@ -63,8 +64,9 @@ def forward(self, x): self.assertIn("Hardware Compatibility: Disabled", cpp_repr) @unittest.skipIf( - torch.ops.tensorrt.ABI_VERSION() != "5", - "Detected incorrect ABI version, please update this test case", + not torch_tensorrt.ENABLED_FEATURES.torch_tensorrt_runtime + or torch.ops.tensorrt.ABI_VERSION() != "5", + "Torch-TensorRT runtime is not available or ABI Version is compatible", ) @unittest.skipIf( not torch_tensorrt.ENABLED_FEATURES.torch_tensorrt_runtime,