File tree 5 files changed +11
-11
lines changed
backends/cadence/aot/quantizer
5 files changed +11
-11
lines changed Original file line number Diff line number Diff line change @@ -390,7 +390,6 @@ exclude_patterns = [
390
390
" backends/arm/test/ops/**" ,
391
391
" backends/vulkan/quantizer/**" ,
392
392
" backends/vulkan/test/**" ,
393
- " backends/cadence/aot/quantizer/**" ,
394
393
" backends/qualcomm/quantizer/**" ,
395
394
" examples/qualcomm/**" ,
396
395
" backends/xnnpack/quantizer/**" ,
Original file line number Diff line number Diff line change @@ -9,6 +9,7 @@ python_library(
9
9
],
10
10
deps = [
11
11
"//caffe2:torch",
12
+ "//pytorch/ao:torchao",
12
13
],
13
14
)
14
15
@@ -34,7 +35,6 @@ python_library(
34
35
":patterns",
35
36
":utils",
36
37
"//caffe2:torch",
37
- "//executorch/backends/xnnpack/quantizer:xnnpack_quantizer_utils",
38
38
],
39
39
)
40
40
Original file line number Diff line number Diff line change 15
15
16
16
from torch import fx
17
17
from torch ._ops import OpOverload
18
- from torch . ao . quantization .quantizer import (
18
+ from torchao . quantization . pt2e .quantizer import (
19
19
DerivedQuantizationSpec ,
20
20
SharedQuantizationSpec ,
21
21
)
Original file line number Diff line number Diff line change 29
29
is_annotated ,
30
30
no_outside_users ,
31
31
)
32
- from executorch .backends .xnnpack .quantizer .xnnpack_quantizer_utils import (
32
+
33
+ from torch import fx
34
+
35
+ from torchao .quantization .pt2e import HistogramObserver , MinMaxObserver
36
+ from torchao .quantization .pt2e .quantizer import (
37
+ ComposableQuantizer ,
38
+ DerivedQuantizationSpec ,
33
39
OperatorConfig ,
34
40
QuantizationAnnotation ,
35
41
QuantizationConfig ,
36
42
QuantizationSpec ,
43
+ Quantizer ,
37
44
)
38
45
39
- from torch import fx
40
-
41
- from torch .ao .quantization .observer import HistogramObserver , MinMaxObserver
42
- from torch .ao .quantization .quantizer import DerivedQuantizationSpec , Quantizer
43
- from torch .ao .quantization .quantizer .composable_quantizer import ComposableQuantizer
44
-
45
46
46
47
act_qspec_asym8s = QuantizationSpec (
47
48
dtype = torch .int8 ,
Original file line number Diff line number Diff line change 14
14
import torch
15
15
from torch import fx
16
16
from torch ._ops import OpOverload
17
- from torch .ao .quantization import ObserverOrFakeQuantize
18
17
19
18
from torch .fx import GraphModule
20
19
from torch .fx .passes .utils .source_matcher_utils import (
21
20
check_subgraphs_connected ,
22
21
SourcePartition ,
23
22
)
23
+ from torchao .quantization .pt2e import ObserverOrFakeQuantize
24
24
25
25
26
26
def quantize_tensor_multiplier (
You can’t perform that action at this time.
0 commit comments