Skip to content

Commit

Permalink
Add missing ops to ArmQuantizer (#6780)
Browse files Browse the repository at this point in the history
Add missing ops to quantizer

- Add missing ops such as chunk to quantizer.
- Remove faulty ensures() from match_arg_shapes_rank pass.
- Remove sum annotator and move sum annotation to one-to-one annotator.

Signed-off-by: Oscar Andersson <oscar.andersson@arm.com>
  • Loading branch information
oscarandersson8218 authored Nov 12, 2024
1 parent dc41596 commit 995c2bf
Show file tree
Hide file tree
Showing 6 changed files with 4 additions and 70 deletions.
11 changes: 0 additions & 11 deletions backends/arm/_passes/match_arg_ranks_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,14 +113,3 @@ def call(self, graph_module: GraphModule) -> PassResult:
graph_module.recompile()
graph_module = super().call(graph_module).graph_module
return PassResult(graph_module, True)

def ensures(self, graph_module):
for node in graph_module.graph.nodes:
if node.op != "call_function" or node.target not in self.targeted_ops:
continue
arg0_rank = node.args[0].meta["val"].dim()
arg1_rank = node.args[1].meta["val"].dim()
if arg0_rank != arg1_rank:
raise ValueError(
"Arguments of arithmetic operators need to have the same rank!"
)
1 change: 0 additions & 1 deletion backends/arm/quantizer/arm_quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,6 @@ class ArmQuantizer(Quantizer):
"mm",
"one_to_one",
"generic",
"sum",
]

def __init__(self) -> None:
Expand Down
1 change: 0 additions & 1 deletion backends/arm/quantizer/quantization_annotation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,5 +59,4 @@ def decorator(annotator: AnnotatorType):
mul_annotator,
one_to_one_annotator,
sub_annotator,
sum_annotator,
)
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
# torch.ops.aten.view_as_real.default,
# torch.ops.aten.view_as_real_copy.default,
torch.ops.aten.view.default,
torch.ops.aten.view_as.default,
torch.ops.aten.view_copy.default,
torch.ops.aten.select.int,
torch.ops.aten.select_copy.int,
Expand All @@ -53,6 +54,8 @@
torch.ops.aten.flip.default,
torch.ops.aten.cat.default,
torch.ops.aten.stack.default,
torch.ops.aten.chunk.default,
torch.ops.aten.contiguous.default,
]


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ def _annotate_one_to_one(
torch.ops.aten.rsqrt.default,
torch.ops.aten.sigmoid.default,
torch.ops.aten.tanh.default,
torch.ops.aten.sum.dim_IntList,
)
for node in gm.graph.nodes:
if node.op != "call_function" or node.target not in one_to_one_ops:
Expand Down
57 changes: 0 additions & 57 deletions backends/arm/quantizer/quantization_annotation/sum_annotator.py

This file was deleted.

0 comments on commit 995c2bf

Please sign in to comment.