Skip to content

Commit 4b45106

Browse files
committed
lint
1 parent 1727bde commit 4b45106

File tree

8 files changed

+29
-22
lines changed

8 files changed

+29
-22
lines changed

backends/cortex_m/test/test_replace_quant_nodes.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,15 +16,15 @@
1616
ReplaceQuantNodesPass,
1717
)
1818
from executorch.exir.dialects._ops import ops as exir_ops
19-
from torch.ao.quantization.observer import HistogramObserver
20-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
21-
from torch.ao.quantization.quantizer.quantizer import (
19+
from torch.export import export, export_for_training
20+
from torch.fx import GraphModule
21+
from torchao.quantization.pt2e.observer import HistogramObserver
22+
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
23+
from torchao.quantization.pt2e.quantizer import (
2224
QuantizationAnnotation,
2325
QuantizationSpec,
2426
Quantizer,
2527
)
26-
from torch.export import export, export_for_training
27-
from torch.fx import GraphModule
2828

2929

3030
@dataclass(eq=True, frozen=True)

backends/nxp/tests/executorch_pipeline.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
to_edge_transform_and_lower,
2121
)
2222
from torch import nn
23-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
23+
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
2424

2525

2626
def _quantize_model(model, calibration_inputs: list[tuple[torch.Tensor]]):

backends/vulkan/test/test_vulkan_delegate.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@
2323
EdgeProgramManager,
2424
ExecutorchProgramManager,
2525
)
26+
from torch.export import Dim, export, export_for_training, ExportedProgram
2627

27-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
28+
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
2829

29-
from torch.ao.quantization.quantizer import Quantizer
30-
from torch.export import Dim, export, export_for_training, ExportedProgram
30+
from torchao.quantization.pt2e.quantizer import Quantizer
3131

3232
ctypes.CDLL("libvulkan.so.1")
3333

backends/xnnpack/test/quantizer/test_pt2e_quantization.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@
2828
NodeSpec as ns,
2929
TestHelperModules,
3030
)
31-
from torchao.testing.pt2e.utils import PT2EQuantizationTestCase
3231
from torch.testing._internal.common_utils import (
3332
instantiate_parametrized_tests,
3433
TemporaryFileName,
@@ -52,6 +51,7 @@
5251
from torchao.quantization.pt2e.quantizer import Quantizer
5352
from torchao.quantization.pt2e.quantizer.composable_quantizer import ComposableQuantizer
5453
from torchao.quantization.pt2e.quantizer.embedding_quantizer import EmbeddingQuantizer
54+
from torchao.testing.pt2e.utils import PT2EQuantizationTestCase
5555

5656

5757
class TestQuantizePT2E(PT2EQuantizationTestCase):
@@ -798,7 +798,7 @@ def test_extract_results_from_loggers(self) -> None:
798798
ref_results = extract_results_from_loggers(m_ref_logger)
799799
quant_results = extract_results_from_loggers(m_quant_logger)
800800
comparison_results = compare_results(
801-
ref_results, # pyre-ignore[6]
801+
ref_results, # pyre-ignore[6]
802802
quant_results, # pyre-ignore[6]
803803
)
804804
for node_summary in comparison_results.values():

backends/xnnpack/test/quantizer/test_xnnpack_quantizer.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,12 @@
99
XNNPACKQuantizer,
1010
)
1111
from torch.ao.ns.fx.utils import compute_sqnr
12-
from torch.ao.quantization import default_dynamic_qconfig, default_dynamic_fake_quant, QConfig, QConfigMapping
12+
from torch.ao.quantization import (
13+
default_dynamic_fake_quant,
14+
default_dynamic_qconfig,
15+
QConfig,
16+
QConfigMapping,
17+
)
1318
from torch.ao.quantization.backend_config import get_qnnpack_backend_config
1419
from torch.ao.quantization.qconfig import (
1520
default_per_channel_symmetric_qnnpack_qconfig,
@@ -29,9 +34,9 @@
2934
skipIfNoQNNPACK,
3035
TestHelperModules,
3136
)
32-
from torchao.testing.pt2e.utils import PT2EQuantizationTestCase
3337
from torch.testing._internal.common_quantized import override_quantized_engine
3438
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
39+
from torchao.testing.pt2e.utils import PT2EQuantizationTestCase
3540

3641

3742
@skipIfNoQNNPACK

exir/backend/test/demos/test_xnnpack_qnnpack.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,10 @@
2828
_load_for_executorch_from_buffer,
2929
)
3030
from executorch.extension.pytree import tree_flatten
31+
from torch.ao.quantization import (
32+
default_dynamic_quant_observer,
33+
default_per_channel_weight_observer,
34+
)
3135
from torch.ao.quantization.backend_config.executorch import (
3236
get_executorch_backend_config,
3337
)
@@ -36,7 +40,7 @@
3640
_convert_to_reference_decomposed_fx,
3741
prepare_fx,
3842
)
39-
from torch.ao.quantization import default_dynamic_quant_observer, default_per_channel_weight_observer
43+
4044

4145
class TestXnnQnnBackends(unittest.TestCase):
4246
def test_add_xnnpack_and_dqlinear_qnn(self):

exir/tests/test_quantization.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,13 @@
1919
from executorch.exir.passes.quant_fusion_pass import QuantFusionPass
2020
from executorch.exir.passes.spec_prop_pass import SpecPropPass
2121
from torch.ao.ns.fx.utils import compute_sqnr
22+
from torch.ao.quantization.backend_config import get_executorch_backend_config
23+
from torch.ao.quantization.qconfig import default_per_channel_symmetric_qnnpack_qconfig
24+
from torch.ao.quantization.qconfig_mapping import QConfigMapping
2225
from torch.ao.quantization.quantize_fx import ( # @manual
2326
_convert_to_reference_decomposed_fx,
27+
prepare_fx,
2428
)
25-
from torch.ao.quantization.qconfig_mapping import (
26-
QConfigMapping,
27-
)
28-
from torch.ao.quantization.backend_config import get_executorch_backend_config
29-
from torch.ao.quantization.qconfig import default_per_channel_symmetric_qnnpack_qconfig
30-
from torch.ao.quantization.quantize_fx import prepare_fx
3129
from torch.export import export
3230
from torch.testing import FileCheck
3331
from torch.testing._internal.common_quantized import override_quantized_engine

export/export.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,10 @@
1313
from executorch.runtime import Runtime, Verification
1414
from tabulate import tabulate
1515
from torch import nn
16-
from torch.ao.quantization import allow_exported_model_train_eval
17-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
1816
from torch.export import ExportedProgram
1917
from torchao.quantization import quantize_
18+
from torchao.quantization.pt2e import allow_exported_model_train_eval
19+
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
2020
from torchao.utils import unwrap_tensor_subclass
2121

2222
from .recipe import ExportRecipe

0 commit comments

Comments
 (0)