Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
105 changes: 0 additions & 105 deletions backends/nxp/_passes/remove_getitem_pass.py

This file was deleted.

6 changes: 3 additions & 3 deletions backends/nxp/backend/ir/converter/conversion/common.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#
# Copyright 2023 Martin Pavella
# Copyright 2023-2025 NXP
# Copyright 2023-2026 NXP
#
# License: MIT
# See the LICENSE_MIT for more details.
Expand All @@ -12,7 +12,7 @@
'conversion/builtin/' directory.
"""

from typing import List, MutableSequence, Optional
from typing import List, MutableSequence, Optional, Sequence

import executorch.backends.nxp.backend.ir.logger as logger
from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model
Expand Down Expand Up @@ -71,7 +71,7 @@ def extend_1d_dilation_to_2d(tflite_1d_dilation: MutableSequence):
)


def assign_2d_strides(options: StridedOptions, strides: Optional[List[int]]):
def assign_2d_strides(options: StridedOptions, strides: Sequence[int] | None):
"""Assign to 'obj' the attributes 'stride_h' and 'stride_w' from 'strides'.
If 'strides' is None, assign 1s.

Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
# Copyright 2024-2025 NXP
# Copyright 2024-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

import numpy as np

from executorch.backends.nxp.backend.edge_helper import try_get_arg
from executorch.backends.nxp.backend.ir.converter.conversion import (
aten_translator,
common,
Expand All @@ -14,13 +16,19 @@
NodeConverter,
)
from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType
from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model
from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import (
max_pool_2d_options,
from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options.max_pool_2d_options import (
MaxPool2D,
)
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
from torch.fx import Node
from torch.nn import Parameter

KernelSize = tuple[int, int]
Stride = tuple[int, int]
Padding = tuple[int, int]
Dilation = tuple[int, int]
CeilMode = bool


class MaxPool2dConverter(NodeConverter):
"""Convert 'max_pool2d' operator to TFLite 'MaxPool2D'.
Expand All @@ -33,22 +41,66 @@ def _is_supported_in_IR(
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:
n_args = len(node.args)
kernel_size, stride, padding, dilation, ceil_mode = (
MaxPool2dConverter._get_node_args(node)
)

dilation = node.args[4] if n_args >= 5 else [1, 1]
ceil_mode = node.args[5] if n_args == 6 else False
if dilation != (1, 1):
# The Neutron IR MaxPool2D does not support dilation.
return False

if any(dil != 1 for dil in dilation) or ceil_mode:
if ceil_mode:
# This argument affects how the output shape is computed. Neutron IR only supports the default `False`.
return False

if not NodeConverter._has_shared_q_params_if_quantized(node):
return False

return True

def _get_pad_constant_value(self, input_type: TensorType) -> np.ndarray:
"""
Get scalar NumPy array with constant value used as constant value for 'Pad' operator.
@staticmethod
def _is_supported_on_target(
node: Node,
neutron_target_spec: NeutronTargetSpec,
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:
kernel_size, stride, padding, dilation, ceil_mode = (
MaxPool2dConverter._get_node_args(node)
)

output_shape = node.meta["val"].shape
if output_shape[0] != 1:
# /neutron-converter/src/OperatorC/MaxPoolPlugin.cpp?at=NEUTRON_SOFTWARE_2.2.2#106
return False

# Neutron only has a restriction on `stride_h`. `stride_w` is not restricted.
stride_h = stride[0]
if stride_h not in (1, 2):
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#901
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#923
return False

channels = output_shape[1]
if channels % neutron_target_spec.get_num_macs() != 0:
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#903
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#925
return False

if any(pad > kernel_dim for pad, kernel_dim in zip(padding, kernel_size)):
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#904-907
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#926-929

# Cannot be tested as PyTorch crashes in this case. It requires the padding to be at most half of the
# effective kernel size, which is an even stricter requirement than what Neutron imposes.
# https://github.com/pytorch/pytorch/blob/449b1768410104d3ed79d3bcfe4ba1d65c7f22c0/torch/_meta_registrations.py#L4483-L4489
return False

return True

@staticmethod
def _get_pad_constant_value(input_type: TensorType) -> np.ndarray:
"""Get scalar NumPy array with constant value used as constant value for 'Pad' operator.

:param input_type: Input tensor type.
:return: Scalar array with single minimum value of given type.
Expand All @@ -62,43 +114,64 @@ def _get_pad_constant_value(self, input_type: TensorType) -> np.ndarray:
case TensorType.FLOAT32:
return np.asarray([np.finfo(np.float32).min], dtype=np.float32)
case _:
raise RuntimeError("Unexpected input type for MaxPool operator.")
# Should never happen.
raise RuntimeError(
f"Unexpected input type '{input_type}' for MaxPool operator."
)

# noinspection PyMethodMayBeStatic
def _convert_2d_max_pool(
self, kernel_size, stride, padding, t_op: tflite_model.Operator
) -> list[tflite_model.Operator]:
x = t_op.tmp_inputs[0]
@staticmethod
def _get_node_args(
node: Node,
) -> tuple[KernelSize, Stride, Padding, Dilation, CeilMode]:
"""Extract and return `aten.max_pool2d` arguments from the node.

:param node: The node representing the `aten.max_pool2d` operation.
:return: Tuple of (kernel_size, stride, padding, dilation, ceil_mode).
"""
kernel_size = node.args[1]
stride = node.args[
2
] # The default value is equal to the kernel_size, so it is never empty here.
padding = try_get_arg(node, 3) or (0, 0)
dilation = try_get_arg(node, 4) or (1, 1)
ceil_mode = try_get_arg(node, 5) or False

return kernel_size, stride, padding, dilation, ceil_mode

def convert(self, node: Node):
"""Convert the `aten.max_pool2d.default` operator to Neutron IR `MaxPool2D`.
The schema is:
aten::max_pool2d(
Tensor self,
int[2] kernel_size,
int[2] stride=[], # The default value is equal to the kernel_size.
int[2] padding=0,
int[2] dilation=1,
bool ceil_mode=False
) -> Tensor
"""
self.assert_convertible(node)

kernel_size, stride, padding, dilation, ceil_mode = self._get_node_args(node)

t_op = self._create_tflite_op_with_io_tensors(node)
ops = OpsList(middle_op=t_op)
t_op.builtin_options = max_pool_2d_options.MaxPool2D()
t_op.builtin_options.filter_h = kernel_size[0]
t_op.builtin_options.filter_w = kernel_size[1]

x = t_op.tmp_inputs[0]

t_op.builtin_options = MaxPool2D()
t_op.builtin_options.filter_h, t_op.builtin_options.filter_w = kernel_size
common.assign_2d_strides(t_op.builtin_options, stride)

t_op.builtin_options.padding, explicit_padding = (
aten_translator.convert_padding(padding)
aten_translator.convert_padding(list(padding))
)

if explicit_padding is not None:
# Need to prepend a 'Pad' operator, which adds min values for type.
constant_value = self._get_pad_constant_value(x.type)
pre_pad_op = self.builder.create_pad_operator_before(
pad_op = self.builder.create_pad_operator_before(
t_op, 0, explicit_padding, constant_value=constant_value
)
ops.add_pre(pre_pad_op)

return ops.flatten()

# Maxpool2d Node format: (Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False)
def convert(self, node: Node):
self.assert_convertible(node)

n_args = len(node.args)
ops.add_pre(pad_op)

kernel_size = node.args[1]
stride = node.args[2]
padding = node.args[3] if n_args >= 4 else [0, 0]

t_op = self._create_tflite_op_with_io_tensors(node)
ops_to_add = self._convert_2d_max_pool(kernel_size, stride, padding, t_op)
self.builder.append_operators(ops_to_add)
self.builder.append_operators(ops.flatten())
Loading