Skip to content

Commit c9b3d09

Browse files
metascroyfacebook-github-bot
authored andcommitted
Add backward compatible types to pt2e prepare (#2244)
Summary: Pull Request resolved: pytorch/ao#2244 Differential Revision: D75248288
1 parent df5e7df commit c9b3d09

File tree

3 files changed

+3
-3
lines changed

3 files changed

+3
-3
lines changed

backends/cadence/aot/compiler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ def prepare_and_convert_pt2(
123123
assert isinstance(model_gm, torch.fx.GraphModule)
124124

125125
# Prepare
126-
prepared_model = prepare_pt2e(model_gm, quantizer) # pyre-ignore[6]
126+
prepared_model = prepare_pt2e(model_gm, quantizer)
127127

128128
# Calibrate
129129
# If no calibration data is provided, use the inputs

examples/xnnpack/quantization/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def quantize(
3333
is_dynamic=is_dynamic,
3434
)
3535
quantizer.set_global(operator_config)
36-
m = prepare_pt2e(model, quantizer) # pyre-ignore[6]
36+
m = prepare_pt2e(model, quantizer)
3737
# calibration
3838
m(*example_inputs)
3939
m = convert_pt2e(m)

extension/llm/export/builder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -373,7 +373,7 @@ def pt2e_quantize(self, quantizers: Optional[List[Quantizer]]) -> "LLMEdgeManage
373373
), "Please run export() first"
374374
m = prepare_pt2e(
375375
self.pre_autograd_graph_module, # pyre-ignore[6]
376-
composed_quantizer, # pyre-ignore[6]
376+
composed_quantizer,
377377
)
378378
logging.info(
379379
f"Calibrating with tasks: {self.calibration_tasks}, limit: {self.calibration_limit}, calibration_data: {self.calibration_data}, tokenizer_path: {self.tokenizer_path}, seq_length: {self.calibration_seq_length}"

0 commit comments

Comments
 (0)