Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[v1.x] Update onnx support to work with onnx 1.7.0 with most CV models #19017

Merged
merged 35 commits into from
Sep 11, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
51715d7
fix pooling_convention warning when convert model to onnx (#18529)
HaoLiuHust Aug 10, 2020
7b7141b
Prevent uninitialized variable error.
Aug 18, 2020
aa1515b
Initial work to get Dropout to work with onnx 1.7
Aug 26, 2020
77fb75f
Remove trailing whitespace for pylint.
Aug 26, 2020
ae1e74d
Fix tensor initialization for Dropout operator input.
Aug 27, 2020
0faeeef
Update Clip operator to support latest ONNX opset versions by moving …
Aug 27, 2020
e9453c5
Fix whitespace.
Aug 27, 2020
1d5b664
Add support for importing Dropout operator in ONNX opset version >= 12.
Aug 28, 2020
9c5c034
Add support for import ONNX opsets >= 11 to clip operator.
Aug 28, 2020
aabcdd5
Add optional opset_version parameter that defaults to latest opset ve…
Aug 28, 2020
edd6f53
Add optional parameter to create_model() that allows user to specify …
Aug 28, 2020
2dfa22f
Use opset_version argument to determine operator format.
Aug 28, 2020
6c4e555
Add a opset_version parameter to from_onnx() so at operator conversio…
Aug 28, 2020
7305b9d
For Clip and Dropout operators, use opset version from passed proto_o…
Aug 28, 2020
39da0fc
Use same tolerances that are in master.
Aug 31, 2020
e36c200
Change Pad operator to use inputs instead of attributes for newer ops…
Sep 1, 2020
e4a9318
Add documentation opset_version parameter.
Sep 1, 2020
85a0ea6
Add opset_version parameters to unit tests.
Sep 1, 2020
0738620
Add test script for testing inference with onnxruntime on CV models f…
Sep 1, 2020
885862d
Add license and clean up imports.
Sep 2, 2020
9bb2b47
Install onnxruntime in docker container for unit tests.
Sep 2, 2020
50d929c
Add onnxruntime to test dependencies.
Sep 2, 2020
a6e6967
Install onnxruntime into CentOS docker image.
Sep 2, 2020
0bfec8e
Disable testing squeezenet models for now.
Sep 2, 2020
26708e3
Update onnx version.
Sep 2, 2020
d620548
Fix typo.
Sep 2, 2020
c7b55c1
Use mx.image.imread instead of PIL module.
Sep 2, 2020
f49e47a
ONNX import: use Conv pad attribute for symmetrical padding (#18675)
Kh4L Jul 24, 2020
36d92ca
Install onnx in CentOS containers when installing python.
Sep 2, 2020
b102f78
Update import and export of some ONNX ops to support newer opset vers…
Sep 3, 2020
8bd6a64
Re-enable squeezenet model testings in onnxruntime.
Sep 3, 2020
a3ea851
Run the onnxruntime inference tests in the ONNX pipeline instead of n…
Sep 3, 2020
a5246fe
Add missed return value.
Sep 3, 2020
29dcdf3
Refactor code based on review comment.
Sep 8, 2020
d597b5a
Since the onnx tests are only run on ubuntu_cpu images, we don't need…
Sep 8, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Update import and export of some ONNX ops to support newer opset vers…
…ions - this gets all ONNX unit tests to pass with onnx 1.7.
  • Loading branch information
Joe Evans committed Sep 3, 2020
commit b102f78a2a75ced5dbd62f2548cd0b43862b6fef
78 changes: 59 additions & 19 deletions python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,7 @@ def convert_pad(node, **kwargs):
pad_mode = attrs.get("mode")

if pad_mode == "constant":
pad_value = float(attrs.get("constant_value", 0.0))
pad_value = np.float32(attrs.get("constant_value", 0.0))
if opset_version >= 11:
# starting with opset 11, pads and constant_value are inputs instead of attributes
from onnx.helper import make_tensor, make_tensor_value_info
Expand All @@ -494,6 +494,7 @@ def convert_pad(node, **kwargs):
"Pad",
input_nodes,
[name],
mode='constant',
name=name
)
return [pads_value_node, const_value_node, pad_node]
Expand All @@ -508,16 +509,35 @@ def convert_pad(node, **kwargs):
name=name
)
else:
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode=pad_mode,
pads=onnx_pad_width,
name=name
)

return [node]
if opset_version >= 11:
# starting with opset 11, pads and constant_value are inputs instead of attributes
from onnx.helper import make_tensor, make_tensor_value_info
initializer = kwargs["initializer"]
pads_input_name = name + "_pads"
pads_input_type = onnx.TensorProto.INT64
pads_input_shape = np.shape(np.array(onnx_pad_width))
pads_value_node = make_tensor_value_info(pads_input_name, pads_input_type, pads_input_shape)
pads_tensor_node = make_tensor(pads_input_name, pads_input_type, pads_input_shape, onnx_pad_width)
initializer.append(pads_tensor_node)
input_nodes.append(pads_input_name)
pad_node = onnx.helper.make_node(
"Pad",
input_nodes,
[name],
mode=pad_mode,
name=name
)
return [pads_value_node, pad_node]
else:
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode=pad_mode,
pads=onnx_pad_width,
name=name
)
return [node]


def create_helper_trans_node(op_name, input_node, node_name):
Expand Down Expand Up @@ -2155,14 +2175,34 @@ def convert_topk(node, **kwargs):
else:
raise NotImplementedError("ONNX expects both value and indices as output")

topk_node = onnx.helper.make_node(
"TopK",
input_nodes,
outputs,
axis=axis,
k=k,
name=name
)
opset_version = kwargs['opset_version']
if opset_version >= 10:
from onnx.helper import make_tensor, make_tensor_value_info
initializer = kwargs["initializer"]
k_input_name = name + "_k"
k_input_type = onnx.TensorProto.INT64
k_value_node = make_tensor_value_info(k_input_name, k_input_type, ())
k_tensor_node = make_tensor(k_input_name, k_input_type, (), k)
initializer.append(k_tensor_node)
input_nodes.append(k_input_name)

topk_node = onnx.helper.make_node(
"TopK",
input_nodes,
outputs,
axis=axis,
name=name
)
return [k_value_node, topk_node]
else:
topk_node = onnx.helper.make_node(
"TopK",
input_nodes,
outputs,
axis=axis,
k=k,
name=name
)

return [topk_node]

Expand Down
66 changes: 50 additions & 16 deletions python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,11 +240,24 @@ def relu(attrs, inputs, proto_obj):

def pad(attrs, inputs, proto_obj):
""" Add padding to input tensor"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width',
'value' : 'constant_value'
})
new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width'))
return 'pad', new_attrs, inputs
opset_version = proto_obj.opset_version
if 'mode' not in attrs.keys():
attrs['mode'] = 'constant'
if opset_version >= 11:
pads = list(proto_obj._params[inputs[1].name].asnumpy())
pads = tuple([int(i) for i in pads])
new_attrs = translation_utils._add_extra_attributes(attrs, {'pad_width': pads})
if len(inputs) == 3:
const = proto_obj._params[inputs[2].name].asnumpy()[0]
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'constant_value': const})
new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width'))
return 'pad', new_attrs, inputs[0]
else:
new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width',
'value' : 'constant_value'
})
new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width'))
return 'pad', new_attrs, inputs

def matrix_multiplication(attrs, inputs, proto_obj):
"""Performs general matrix multiplication"""
Expand Down Expand Up @@ -367,7 +380,7 @@ def deconv(attrs, inputs, proto_obj):
new_attrs = translation_utils._fix_bias('Deconvolution', new_attrs, len(inputs))

new_attrs = translation_utils._fix_channels('Deconvolution', new_attrs, inputs, proto_obj)
kernel = new_attrs['kernel']
kernel = new_attrs['kernel'] if 'kernel' in new_attrs else []
stride = new_attrs['stride'] if 'stride' in new_attrs else []
padding = new_attrs['pad'] if 'pad' in new_attrs else []
dilations = new_attrs['dilate'] if 'dilate' in new_attrs else []
Expand Down Expand Up @@ -522,15 +535,30 @@ def _slice(attrs, inputs, proto_obj):
"""Returns a slice of the input tensor along multiple axes."""
input_tensor_data = proto_obj.model_metadata.get('input_tensor_data')[0]
input_shape = input_tensor_data[1]
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = list(new_attrs.get('end'))
axes = new_attrs.get('axis', tuple(range(len(begin))))

if proto_obj.opset_version >= 10:
begin = proto_obj._params[inputs[1].name].asnumpy()
end = proto_obj._params[inputs[2].name].asnumpy()
if len(inputs) >= 4:
axes = list(proto_obj._params[inputs[3].name].asnumpy())
axes = tuple([int(i) for i in axes])
else:
axes = tuple(range(len(begin)))
new_attrs = translation_utils._add_extra_attributes(attrs, {'axes' : axes,
'begin' : begin,
'end' : end
})
else:
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = list(new_attrs.get('end'))
axes = new_attrs.get('axis', tuple(range(len(begin))))

for i, axis in enumerate(axes):
end[i] = None if end[i] >= input_shape[axis] else end[i]
slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])
Expand Down Expand Up @@ -826,4 +854,10 @@ def topk(attrs, inputs, proto_obj):
new_attrs = translation_utils._add_extra_attributes(attrs,
{'ret_typ': 'both',
'dtype': 'int64'})
return 'topk', new_attrs, inputs
opset_version = proto_obj.opset_version
if opset_version >= 10:
k_vals = proto_obj._params[inputs[1].name].asnumpy()
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'k': k_vals})
return 'topk', new_attrs, inputs[0]
else:
return 'topk', new_attrs, inputs