From 688c81e2b842d60b820c85b6cf56f28d6ead5751 Mon Sep 17 00:00:00 2001 From: Alexander Dokuchaev Date: Wed, 30 Oct 2024 12:20:22 +0200 Subject: [PATCH] Fix test examples on windows (#3045) ### Changes - `setuptools<=72.1.0` to avoid issue `Could not compile CPU quantization extensions. Falling back on torch native operations - CPU quantization fine-tuning may be slower than expected.` on windows. - Add catch `BackendCompilerFailed` exception for `post_training_quantization_torch_fx_resnet18` - Bump version of anomalib to 1.0.1 `quantization_aware_training_torch_resnet18` tests on windows ### Related tickets 155866 --- .../torch/mobilenet_v2/requirements.txt | 1 + .../torch/ssd300_vgg16/requirements.txt | 1 + .../torch_fx/resnet18/main.py | 13 ++++++++++++- .../torch/anomalib/requirements.txt | 3 ++- .../torch/resnet18/requirements.txt | 1 + 5 files changed, 17 insertions(+), 2 deletions(-) diff --git a/examples/post_training_quantization/torch/mobilenet_v2/requirements.txt b/examples/post_training_quantization/torch/mobilenet_v2/requirements.txt index 5a04422318d..76c9d079603 100644 --- a/examples/post_training_quantization/torch/mobilenet_v2/requirements.txt +++ b/examples/post_training_quantization/torch/mobilenet_v2/requirements.txt @@ -3,3 +3,4 @@ openvino==2024.4 scikit-learn torch==2.4.0 torchvision==0.19.0 +setuptools<=72.1.0 diff --git a/examples/post_training_quantization/torch/ssd300_vgg16/requirements.txt b/examples/post_training_quantization/torch/ssd300_vgg16/requirements.txt index 2b593802576..d7d818c53cd 100644 --- a/examples/post_training_quantization/torch/ssd300_vgg16/requirements.txt +++ b/examples/post_training_quantization/torch/ssd300_vgg16/requirements.txt @@ -6,3 +6,4 @@ torch==2.4.0 torchmetrics==1.0.1 torchvision==0.19.0 numpy<2 +setuptools<=72.1.0 diff --git a/examples/post_training_quantization/torch_fx/resnet18/main.py b/examples/post_training_quantization/torch_fx/resnet18/main.py index b4f45422922..22858ac20f2 100644 --- a/examples/post_training_quantization/torch_fx/resnet18/main.py +++ b/examples/post_training_quantization/torch_fx/resnet18/main.py @@ -24,11 +24,13 @@ import torchvision.models as models import torchvision.transforms as transforms from fastdownload import FastDownload +from torch._dynamo.exc import BackendCompilerFailed import nncf import nncf.torch from nncf.common.logging.track_progress import track from nncf.common.utils.helpers import create_table +from nncf.common.utils.os import is_windows from nncf.torch import disable_patching IMAGE_SIZE = 64 @@ -205,7 +207,16 @@ def transform_fn(data_item): print("Benchmark FP32 model compiled with default backend ...") with disable_patching(): compiled_model = torch.compile(model) - fp32_latency = measure_latency(compiled_model, example_inputs=example_input) + try: + fp32_latency = measure_latency(compiled_model, example_inputs=example_input) + except BackendCompilerFailed as exp: + if not is_windows(): + raise exp + print( + "WARNING: Torch Inductor is currently unavailable on Windows. " + "For more information, visit https://github.com/pytorch/pytorch/issues/135954" + ) + fp32_latency = float("nan") print(f"{fp32_latency:.3f} ms") print("Benchmark FP32 model compiled with openvino backend ...") diff --git a/examples/quantization_aware_training/torch/anomalib/requirements.txt b/examples/quantization_aware_training/torch/anomalib/requirements.txt index 16bfe3c7f2c..6372c123d36 100644 --- a/examples/quantization_aware_training/torch/anomalib/requirements.txt +++ b/examples/quantization_aware_training/torch/anomalib/requirements.txt @@ -1 +1,2 @@ -anomalib[core,openvino]==1.0.0 \ No newline at end of file +anomalib[core,openvino]==1.0.1 +setuptools<=72.1.0 diff --git a/examples/quantization_aware_training/torch/resnet18/requirements.txt b/examples/quantization_aware_training/torch/resnet18/requirements.txt index 040eb6d97dd..aa0f037c182 100644 --- a/examples/quantization_aware_training/torch/resnet18/requirements.txt +++ b/examples/quantization_aware_training/torch/resnet18/requirements.txt @@ -2,3 +2,4 @@ fastdownload==0.0.7 openvino==2024.4 torch==2.4.0 torchvision==0.19.0 +setuptools<=72.1.0