diff --git a/examples/post_training_quantization/torch/mobilenet_v2/requirements.txt b/examples/post_training_quantization/torch/mobilenet_v2/requirements.txt index 5a04422318d..76c9d079603 100644 --- a/examples/post_training_quantization/torch/mobilenet_v2/requirements.txt +++ b/examples/post_training_quantization/torch/mobilenet_v2/requirements.txt @@ -3,3 +3,4 @@ openvino==2024.4 scikit-learn torch==2.4.0 torchvision==0.19.0 +setuptools<=72.1.0 diff --git a/examples/post_training_quantization/torch/ssd300_vgg16/requirements.txt b/examples/post_training_quantization/torch/ssd300_vgg16/requirements.txt index 2b593802576..d7d818c53cd 100644 --- a/examples/post_training_quantization/torch/ssd300_vgg16/requirements.txt +++ b/examples/post_training_quantization/torch/ssd300_vgg16/requirements.txt @@ -6,3 +6,4 @@ torch==2.4.0 torchmetrics==1.0.1 torchvision==0.19.0 numpy<2 +setuptools<=72.1.0 diff --git a/examples/post_training_quantization/torch_fx/resnet18/main.py b/examples/post_training_quantization/torch_fx/resnet18/main.py index b4f45422922..22858ac20f2 100644 --- a/examples/post_training_quantization/torch_fx/resnet18/main.py +++ b/examples/post_training_quantization/torch_fx/resnet18/main.py @@ -24,11 +24,13 @@ import torchvision.models as models import torchvision.transforms as transforms from fastdownload import FastDownload +from torch._dynamo.exc import BackendCompilerFailed import nncf import nncf.torch from nncf.common.logging.track_progress import track from nncf.common.utils.helpers import create_table +from nncf.common.utils.os import is_windows from nncf.torch import disable_patching IMAGE_SIZE = 64 @@ -205,7 +207,16 @@ def transform_fn(data_item): print("Benchmark FP32 model compiled with default backend ...") with disable_patching(): compiled_model = torch.compile(model) - fp32_latency = measure_latency(compiled_model, example_inputs=example_input) + try: + fp32_latency = measure_latency(compiled_model, example_inputs=example_input) + except BackendCompilerFailed as exp: + if not is_windows(): + raise exp + print( + "WARNING: Torch Inductor is currently unavailable on Windows. " + "For more information, visit https://github.com/pytorch/pytorch/issues/135954" + ) + fp32_latency = float("nan") print(f"{fp32_latency:.3f} ms") print("Benchmark FP32 model compiled with openvino backend ...") diff --git a/examples/quantization_aware_training/torch/anomalib/requirements.txt b/examples/quantization_aware_training/torch/anomalib/requirements.txt index 16bfe3c7f2c..6372c123d36 100644 --- a/examples/quantization_aware_training/torch/anomalib/requirements.txt +++ b/examples/quantization_aware_training/torch/anomalib/requirements.txt @@ -1 +1,2 @@ -anomalib[core,openvino]==1.0.0 \ No newline at end of file +anomalib[core,openvino]==1.0.1 +setuptools<=72.1.0 diff --git a/examples/quantization_aware_training/torch/resnet18/requirements.txt b/examples/quantization_aware_training/torch/resnet18/requirements.txt index 040eb6d97dd..aa0f037c182 100644 --- a/examples/quantization_aware_training/torch/resnet18/requirements.txt +++ b/examples/quantization_aware_training/torch/resnet18/requirements.txt @@ -2,3 +2,4 @@ fastdownload==0.0.7 openvino==2024.4 torch==2.4.0 torchvision==0.19.0 +setuptools<=72.1.0