Skip to content

ValueError: Torch var stride_width.1 not found in context #1788

Closed
@ivyas21

Description

@ivyas21

When converting a traced torchvision model, an expected input to a mul operation is not found: ValueError: Torch var stride_width.1 not found in context

Stack Trace

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
/tmp/ipykernel_16536/1090456604.py in <module>
      5     traced_model = torch.jit.trace(model_to_trace, example_image).eval()
      6 
----> 7 detector_mlmodel = ct.convert(traced_model, inputs=[ct.ImageType(shape=(1, 3, 224, 224))])
      8 detector_mlmodel.save("segmenter.mlmodel")

/opt/conda/lib/python3.7/site-packages/coremltools/converters/_converters_entry.py in convert(model, source, inputs, outputs, classifier_config, minimum_deployment_target, convert_to, compute_precision, skip_model_load, compute_units, package_dir, debug)
    454         package_dir=package_dir,
    455         debug=debug,
--> 456         specification_version=specification_version,
    457     )
    458 

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/converter.py in mil_convert(model, convert_from, convert_to, compute_units, **kwargs)
    185         See `coremltools.converters.convert`
    186     """
--> 187     return _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, **kwargs)
    188 
    189 

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/converter.py in _mil_convert(model, convert_from, convert_to, registry, modelClass, compute_units, **kwargs)
    214                             convert_to,
    215                             registry,
--> 216                             **kwargs
    217                          )
    218 

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/converter.py in mil_convert_to_proto(model, convert_from, convert_to, converter_registry, **kwargs)
    279     frontend_converter = frontend_converter_type()
    280 
--> 281     prog = frontend_converter(model, **kwargs)
    282 
    283     if convert_to.lower() != "neuralnetwork":

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/converter.py in __call__(self, *args, **kwargs)
    107         from .frontend.torch import load
    108 
--> 109         return load(*args, **kwargs)
    110 
    111 

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/frontend/torch/load.py in load(model_spec, inputs, specification_version, debug, outputs, cut_at_symbols, **kwargs)
     55     inputs = _convert_to_torch_inputtype(inputs)
     56     converter = TorchConverter(torchscript, inputs, outputs, cut_at_symbols, specification_version)
---> 57     return _perform_torch_convert(converter, debug)
     58 
     59 

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/frontend/torch/load.py in _perform_torch_convert(converter, debug)
     94 def _perform_torch_convert(converter, debug):
     95     try:
---> 96         prog = converter.convert()
     97     except RuntimeError as e:
     98         if debug and "convert function" in str(e):

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/frontend/torch/converter.py in convert(self)
    279 
    280             # Add the rest of the operations
--> 281             convert_nodes(self.context, self.graph)
    282 
    283             graph_outputs = [self.context[name] for name in self.graph.outputs]

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/frontend/torch/ops.py in convert_nodes(context, graph)
     87 
     88         context.prepare_for_conversion(node)
---> 89         add_op(context, node)
     90 
     91         # We've generated all the outputs the graph needs, terminate conversion.

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/frontend/torch/ops.py in mul(context, node)
   1388 @register_torch_op
   1389 def mul(context, node):
-> 1390     inputs = _get_inputs(context, node, expected=2)
   1391     x, y = promote_input_dtypes(inputs)
   1392     res = mb.mul(x=x, y=y, name=node.name)

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/frontend/torch/ops.py in _get_inputs(context, node, expected, min_expected)
    187     value of @expected.
    188     """
--> 189     inputs = [context[name] for name in node.inputs]
    190     if expected is not None:
    191         expected = [expected] if not isinstance(expected, (list, tuple)) else expected

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/frontend/torch/ops.py in <listcomp>(.0)
    187     value of @expected.
    188     """
--> 189     inputs = [context[name] for name in node.inputs]
    190     if expected is not None:
    191         expected = [expected] if not isinstance(expected, (list, tuple)) else expected

/opt/conda/lib/python3.7/site-packages/coremltools/converters/mil/frontend/torch/converter.py in __getitem__(self, torch_name)
     87                 return self._current_graph[idx][torch_name]
     88         raise ValueError(
---> 89             "Torch var {} not found in context {}".format(torch_name, self.name)
     90         )
     91 

ValueError: Torch var stride_width.1 not found in context 

Steps To Reproduce

import coremltools as ct
import torch, torchvision

detector_model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(weights="DEFAULT")
detector_model = detector_model.eval()

class FasterRCNN_MobileNetV3_AdapterModel(torch.nn.Module):
    """This adapter is only here to unbox the first output."""
    def __init__(self, model, w=2):
        super().__init__()
        self.model = model

    def forward(self, x):
        result = self.model(x)
        return result[0]['boxes'], result[0]['labels'], result[0]['scores']

adapted_detector_model = FasterRCNN_MobileNetV3_AdapterModel(detector_model)

model_to_trace = adapted_detector_model
with torch.inference_mode():
    example_image = torch.rand(1,3,224,224)
    out = model_to_trace(example_image)
    traced_model = torch.jit.trace(model_to_trace, example_image).eval()
    
detector_mlmodel = ct.convert(traced_model, inputs=[ct.ImageType(shape=(1, 3, 224, 224))])
detector_mlmodel.save("segmenter.mlmodel")
  • If the model conversion succeeds, but there is a numerical mismatch in predictions, please include the code used for comparisons.

System environment:

  • coremltools version: 6.2
  • OS: Linux (Linux foohostname 4.19.0-23-cloud-amd64 #1 SMP Debian 4.19.269-1 (2022-12-20) x86_64 GNU/Linux)
  • Any other relevant version information (e.g. PyTorch or TensorFlow version):
    • Python: 3.7
    • PyTorch: 1.12.1+cu102
    • Other libraries installed as dependencies of coremltools:
Requirement already satisfied: coremltools==6.2 in /opt/conda/lib/python3.7/site-packages (6.2)
Requirement already satisfied: tqdm in /opt/conda/lib/python3.7/site-packages (from coremltools==6.2) (4.64.1)
Requirement already satisfied: protobuf<=4.0.0,>=3.1.0 in /home/jupyter/.local/lib/python3.7/site-packages (from coremltools==6.2) (3.20.1)
Requirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from coremltools==6.2) (21.3)
Requirement already satisfied: numpy>=1.14.5 in /opt/conda/lib/python3.7/site-packages (from coremltools==6.2) (1.21.6)
Requirement already satisfied: sympy in /opt/conda/lib/python3.7/site-packages (from coremltools==6.2) (1.10.1)
Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->coremltools==6.2) (3.0.9)
Requirement already satisfied: mpmath>=0.19 in /opt/conda/lib/python3.7/site-packages (from sympy->coremltools==6.2) (1.2.1)

Please advise. Thank you!

Metadata

Metadata

Assignees

No one assigned

    Labels

    PyTorch (traced)bugUnexpected behaviour that should be corrected (type)triagedReviewed and examined, release as been assigned if applicable (status)

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions