Skip to content

Commit 66ba19a

Browse files
Lily Orth-SmithMatthew Brookhart
authored andcommitted
Dynamic ONNX importer: Upsampling and Pad (#2)
1 parent 569ed7f commit 66ba19a

File tree

2 files changed

+54
-42
lines changed

2 files changed

+54
-42
lines changed

python/tvm/relay/frontend/onnx.py

Lines changed: 46 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141

4242
__all__ = ['from_onnx']
4343

44-
4544
class onnx_input():
4645
""" Dual purpose list or dictionary access object."""
4746
def __init__(self):
@@ -127,7 +126,6 @@ def revert_caffe2_pad(pads):
127126
raise tvm.error.OpAttributeInvalid('Number of pads must be either 2 or 4.')
128127
return pads
129128

130-
131129
def get_pad_pair(input1d, kernel1d, stride1d):
132130
"""infer pad size"""
133131
if input1d % stride1d == 0:
@@ -641,26 +639,22 @@ def _impl_v2(cls, inputs, attr, params):
641639

642640
@classmethod
643641
def _impl_v11(cls, inputs, attr, params):
644-
pad_width = []
645-
pads = infer_value_simulated(inputs[1], params).asnumpy()
642+
pads = inputs[1]
646643
if len(inputs) == 3:
647-
value = infer_value_simulated(inputs[2], params).asnumpy().item()
644+
value = _op.take(inputs[2], _op.const(0))
648645
else:
649646
value = 0
650-
attr["pad_value"] = value
651-
dims = int(len(pads) / 2)
652-
for i in range(dims):
653-
pad_width.append((pads[i], pads[i + dims]))
654-
attr['pad_width'] = pad_width
647+
648+
pads_shape = infer_shape(pads)
649+
dims = int(pads_shape[0] / 2)
650+
pad_width_expr = _op.transpose(_op.reshape(pads, (2, dims)))
655651
pad_mode = attr.get('mode', b'constant').decode('utf-8')
656-
if pad_mode in ['constant', 'edge', 'reflect']:
657-
attr['pad_mode'] = pad_mode
658-
attr.pop('mode', None)
659-
else:
652+
653+
if not pad_mode in ['constant', 'edge', 'reflect']:
660654
raise tvm.error.OpAttributeInvalid('Value ' + pad_mode +
661655
' in attribute "mode" is invalid for operator Pad.')
662656

663-
return AttrCvt('pad')(inputs[:1], attr, params)
657+
return _op.nn.pad(inputs[0], pad_width_expr, value, pad_mode=pad_mode)
664658

665659

666660
class ParametricSoftPlus(OnnxOpConverter):
@@ -868,17 +862,24 @@ class Upsample(OnnxOpConverter):
868862
@classmethod
869863
def _impl_v9(cls, inputs, attr, params):
870864
scales = attr.get('scales')
865+
866+
input_shape = infer_shape(inputs[0])
867+
dims = len(input_shape)
868+
871869
if not scales:
872870
#Here we are going to higher OPSET version.
873-
assert len(inputs) == 2, "Upsample op take 2 inputs, {} given".format(len(inputs))
871+
assert len(inputs) == 2, "Upsample op takes 2 inputs, {} given".format(len(inputs))
872+
874873
if get_name(inputs[1]) in params:
875874
scales = params[inputs[1].name_hint].asnumpy()
876-
else:
875+
elif dims == 5:
877876
scales = infer_value_simulated(inputs[1], params).asnumpy()
878-
inputs = inputs[:1]
879-
assert scales[0] == 1.0 and scales[1] == 1.0
880-
input_shape = infer_shape(inputs[0])
881-
dims = len(input_shape)
877+
else:
878+
scales = inputs[1]
879+
880+
if not isinstance(scales, Call):
881+
assert scales[0] == 1.0 and scales[1] == 1.0
882+
882883
mode = attr.get('mode')
883884
if mode == b'nearest':
884885
method = "nearest_neighbor"
@@ -887,21 +888,31 @@ def _impl_v9(cls, inputs, attr, params):
887888
else:
888889
raise tvm.error.OpAttributeInvalid(
889890
'Value {} in attribute "mode" of operator Upsample is not valid.'.format(mode))
890-
attr = {'scale_h': scales[-2], 'scale_w': scales[-1], 'method': method}
891+
892+
if method == 'nearest_neighbor':
893+
align_corners=False
894+
else:
895+
align_corners=True
896+
# in 3d case, we use the purely static op
891897
if dims == 5:
892-
assert len(scales) == 5
893-
attr['scale_d'] = scales[-3]
894-
attr['layout'] = 'NCDHW'
895-
op_name = 'upsampling3d'
898+
scale_h = scales[-2]
899+
scale_w = scales[-1]
900+
scale_d = scales[-3]
901+
layout = 'NCDHW'
902+
return _op.nn.upsampling3d(inputs[0], scale_d, scale_h, scale_w,
903+
layout=layout, method=method)
904+
# in 2d case, use dynamic op
896905
else:
897-
assert len(scales) == 4
898-
attr['layout'] = 'NCHW'
899-
if method == 'nearest_neighbor':
900-
attr['align_corners'] = False
906+
if isinstance(scales, Call):
907+
scale_h = _op.take(scales, _op.const(3))
908+
scale_w = _op.take(scales, _op.const(4))
901909
else:
902-
attr['align_corners'] = True
903-
op_name = 'upsampling'
904-
return AttrCvt(op_name)(inputs, attr)
910+
assert len(scales) == 4
911+
scale_h = scales[-2]
912+
scale_w = scales[-1]
913+
layout = 'NCHW'
914+
915+
return _op.nn.upsampling(inputs[0], scale_h, scale_w, layout=layout, method=method, align_corners=align_corners)
905916

906917

907918
class Shape(OnnxOpConverter):
@@ -2289,3 +2300,5 @@ def from_onnx(model, shape=None, dtype="float32", opset=None, freeze_params=Fals
22892300
opset = 1
22902301
mod, params = g.from_onnx(graph, opset, freeze_params)
22912302
return mod, params
2303+
2304+

tests/python/frontend/onnx/test_forward.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -988,11 +988,9 @@ def _test_upsample_bilinear_opset9():
988988
graph, producer_name='upsample_bilinear_opset9_test')
989989

990990
for target, ctx in tvm.testing.enabled_targets():
991-
tvm_out = get_tvm_output(
992-
model, in_array, target, ctx, out_shape, 'float32')
991+
tvm_out = get_tvm_output_with_vm(model, [in_array], target, ctx, opset=9, freeze_params=True)
993992
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
994993

995-
996994
def _test_upsample3d_trilinear():
997995
scale = 2
998996
in_shape = (1, 1, 3, 3, 3)
@@ -1026,7 +1024,8 @@ def _test_upsample3d_trilinear():
10261024
model, in_array, target, ctx, out_shape, 'float32')
10271025
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
10281026

1029-
@tvm.testing.uses_gpu
1027+
# TODO(mbrookhart): enable once VM supports heterogenous execution
1028+
# @tvm.testing.uses_gpu
10301029
def test_upsample():
10311030
_test_upsample_nearest()
10321031
_test_upsample_bilinear()
@@ -1419,7 +1418,7 @@ def verify_pad_v11(indata, pads, mode='constant', value=0.0):
14191418
outputs=[helper.make_tensor_value_info("output",
14201419
TensorProto.FLOAT, list(outdata.shape))])
14211420
else:
1422-
inputs = [indata, pads, np.array([value])]
1421+
inputs = [indata, pads, np.array([value]).astype("float32")]
14231422
outdata = np.pad(indata, pad_width=np_pads,
14241423
mode='constant', constant_values=value)
14251424
node = helper.make_node(
@@ -1435,7 +1434,7 @@ def verify_pad_v11(indata, pads, mode='constant', value=0.0):
14351434
helper.make_tensor_value_info("pads",
14361435
TensorProto.INT64,(len(pads),)),
14371436
helper.make_tensor_value_info("constant_value",
1438-
TensorProto.INT64,(1,)),
1437+
TensorProto.FLOAT,(1,)),
14391438
],
14401439
initializer=[helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads),
14411440
helper.make_tensor("constant_value", TensorProto.FLOAT, (1,), [value])],
@@ -1444,12 +1443,12 @@ def verify_pad_v11(indata, pads, mode='constant', value=0.0):
14441443
model = helper.make_model(graph, producer_name='pad_test')
14451444
# tvm result
14461445
for target, ctx in tvm.testing.enabled_targets():
1447-
tvm_out = get_tvm_output(
1448-
model, inputs, target, ctx, outdata.shape, 'float32', opset=11)
1446+
tvm_out = get_tvm_output_with_vm(model, inputs, target, ctx, opset=11, freeze_params=False)
14491447
tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
14501448

14511449

1452-
@tvm.testing.uses_gpu
1450+
# TODO(mbrookhart): enable once VM supports heterogenous execution
1451+
# @tvm.testing.uses_gpu
14531452
def test_pad():
14541453
verify_pad(np.random.randn(2, 2).astype(
14551454
np.float32), [0, 1, 0, 0], 'constant', 0.0)

0 commit comments

Comments
 (0)