|
| 1 | +# Copyright 2025 Arm Limited and/or its affiliates. |
| 2 | +# |
| 3 | +# This source code is licensed under the BSD-style license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | +import torch |
| 7 | +from executorch.exir.dialects._ops import ops as exir_ops |
| 8 | +from executorch.exir.pass_base import ExportPass, PassResult |
| 9 | + |
| 10 | + |
| 11 | +class ConvertMinMaxPass(ExportPass): |
| 12 | + """ |
| 13 | + Converts min/max to amin/amax and unrolls multi-dimensional reduction and keep-dims arg to be |
| 14 | + TOSA compliant. |
| 15 | +
|
| 16 | + The difference between max/min and amax/amin is (from pytorch docs): |
| 17 | + - amax/amin supports reducing on multiple dimensions, |
| 18 | + - amax/amin does not return indices, |
| 19 | + - amax/amin evenly distributes gradient between equal values, while max(dim)/min(dim) |
| 20 | + propagates gradient only to a single index in the source tensor. |
| 21 | + Since we do not care about gradients post training, convert min/max ops to amin/amax as long as |
| 22 | + the indices are not used. |
| 23 | +
|
| 24 | + Original: |
| 25 | + amax([dim1, dim2], keepdim = False) |
| 26 | + After pass: |
| 27 | + amax(dim1, keepdim = True) |
| 28 | + amax(dim2, keepdim = True) |
| 29 | + squeeze(dim = [dim1, dim2]) |
| 30 | + """ |
| 31 | + |
| 32 | + def check_argmax(self, node): |
| 33 | + """ |
| 34 | + Raises a RuntimeError if the argmax value returned by the min/max op is used in the graph. |
| 35 | + """ |
| 36 | + if node.target in [torch.ops.aten.max.dim, torch.ops.aten.min.dim]: |
| 37 | + no_argmax = len(node.users) == 1 |
| 38 | + no_argmax_users = (len(node.users) == 2) and ( |
| 39 | + len(list(node.users)[1].users) == 0 |
| 40 | + ) |
| 41 | + if not (no_argmax or no_argmax_users): |
| 42 | + raise RuntimeError("Argmax is not supported by the arm_quantizer") |
| 43 | + |
| 44 | + def get_variables(self, node): |
| 45 | + """Returns variables specific for each op handled by the pass.""" |
| 46 | + if node.target in [ |
| 47 | + exir_ops.edge.aten.amax.default, |
| 48 | + exir_ops.edge.aten.amin.default, |
| 49 | + ]: |
| 50 | + replace_node = node |
| 51 | + op = node.target |
| 52 | + squeeze_op = exir_ops.edge.aten.squeeze_copy.dims |
| 53 | + elif node.target == exir_ops.edge.aten.max.dim: |
| 54 | + replace_node = list(node.users)[0] |
| 55 | + op = exir_ops.edge.aten.amax.default |
| 56 | + squeeze_op = exir_ops.edge.aten.squeeze_copy.dims |
| 57 | + elif node.target == exir_ops.edge.aten.min.dim: |
| 58 | + replace_node = list(node.users)[0] |
| 59 | + op = exir_ops.edge.aten.amin.default |
| 60 | + squeeze_op = exir_ops.edge.aten.squeeze_copy.dims |
| 61 | + elif node.target == torch.ops.aten.max.dim: |
| 62 | + replace_node = list(node.users)[0] |
| 63 | + op = torch.ops.aten.amax.default |
| 64 | + squeeze_op = torch.ops.aten.squeeze.dims |
| 65 | + elif node.target == torch.ops.aten.min.dim: |
| 66 | + replace_node = list(node.users)[0] |
| 67 | + op = torch.ops.aten.amin.default |
| 68 | + squeeze_op = torch.ops.aten.squeeze.dims |
| 69 | + else: |
| 70 | + raise RuntimeError( |
| 71 | + f"{node.name} is not an accepted target for ConvertMinMaxPass()" |
| 72 | + ) |
| 73 | + |
| 74 | + return (replace_node, op, squeeze_op) |
| 75 | + |
| 76 | + def call(self, graph_module: torch.fx.GraphModule): |
| 77 | + modified = False |
| 78 | + for node in graph_module.graph.nodes: |
| 79 | + if node.op != "call_function": |
| 80 | + continue |
| 81 | + if node.target not in [ |
| 82 | + exir_ops.edge.aten.amax.default, |
| 83 | + exir_ops.edge.aten.amin.default, |
| 84 | + exir_ops.edge.aten.max.dim, |
| 85 | + exir_ops.edge.aten.min.dim, |
| 86 | + torch.ops.aten.max.dim, |
| 87 | + torch.ops.aten.min.dim, |
| 88 | + ]: |
| 89 | + continue |
| 90 | + |
| 91 | + self.check_argmax( |
| 92 | + node |
| 93 | + ) # TODO: MLETORCH-718 : Quantization of indices in arm_quantizer |
| 94 | + replace_node, op, squeeze_op = self.get_variables(node) |
| 95 | + |
| 96 | + # Unwrap args |
| 97 | + if len(node.args) == 2: |
| 98 | + input_node, dims = node.args |
| 99 | + keepdims = False |
| 100 | + elif len(node.args) == 3: |
| 101 | + input_node, dims, keepdims = node.args |
| 102 | + else: |
| 103 | + raise RuntimeError(f"Unexpected arg size in {node.name}") |
| 104 | + |
| 105 | + try: |
| 106 | + iter(dims) |
| 107 | + except: |
| 108 | + dims = [dims] |
| 109 | + else: |
| 110 | + dims = list(dims) |
| 111 | + |
| 112 | + # Unroll multi-dimensional reduction and keep-dims arg |
| 113 | + with graph_module.graph.inserting_before(node): |
| 114 | + |
| 115 | + for dim in dims: |
| 116 | + args = (input_node, dim, True) |
| 117 | + input_node = graph_module.graph.create_node( |
| 118 | + "call_function", op, args, node.kwargs |
| 119 | + ) |
| 120 | + |
| 121 | + if not keepdims: |
| 122 | + input_node = graph_module.graph.create_node( |
| 123 | + "call_function", |
| 124 | + squeeze_op, |
| 125 | + (input_node, dims), |
| 126 | + ) |
| 127 | + |
| 128 | + replace_node.replace_all_uses_with(input_node) |
| 129 | + modified = True |
| 130 | + |
| 131 | + if modified: |
| 132 | + graph_module.graph.eliminate_dead_code() |
| 133 | + graph_module.recompile() |
| 134 | + graph_module = super().call(graph_module).graph_module |
| 135 | + |
| 136 | + return PassResult(graph_module, True) |
0 commit comments