Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
100 changes: 81 additions & 19 deletions tests/test_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@

if is_tf2():
conv2d_backprop_input = tf.compat.v1.nn.conv2d_backprop_input
conv3d_transpose = tf.compat.v1.nn.conv3d_transpose
multinomial = tf.compat.v1.random.multinomial
space_to_batch_nd = tf.compat.v1.space_to_batch_nd
batch_to_space_nd = tf.compat.v1.batch_to_space_nd
Expand All @@ -73,6 +74,7 @@
fake_quant_with_min_max_args = tf.quantization.fake_quant_with_min_max_args
elif LooseVersion(tf.__version__) >= "1.13":
conv2d_backprop_input = tf.compat.v1.nn.conv2d_backprop_input
conv3d_transpose = tf.compat.v1.nn.conv3d_transpose
multinomial = tf.compat.v1.random.multinomial
space_to_batch_nd = tf.compat.v1.space_to_batch_nd
batch_to_space_nd = tf.compat.v1.batch_to_space_nd
Expand All @@ -93,6 +95,7 @@
fake_quant_with_min_max_args = tf.compat.v1.quantization.fake_quant_with_min_max_args
else:
conv2d_backprop_input = tf.nn.conv2d_backprop_input
conv3d_transpose = tf.nn.conv3d_transpose
multinomial = tf.multinomial
space_to_batch_nd = tf.space_to_batch_nd
batch_to_space_nd = tf.batch_to_space_nd
Expand Down Expand Up @@ -3136,45 +3139,38 @@ def func(x):
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_const(self):
input_sizes_val_ = np.array([1, 10, 10, 3], dtype=np.int32)
filter_val_ = np.random.randint(low=0, high=256, size=[3, 3, 3, 5])
out_backprop_val_ = np.random.randint(low=0, high=256, size=[1, 10, 10, 5])
def func():
def func(filter_val, out_backprop_val):
input_sizes_val = tf.constant(input_sizes_val_, dtype=tf.int32)
filter_val = tf.constant(filter_val_, dtype=tf.float32)
out_backprop_val = tf.constant(out_backprop_val_, dtype=tf.float32)
return conv2d_backprop_input(input_sizes=input_sizes_val, filter=filter_val,
out_backprop=out_backprop_val, strides=[1, 1, 1, 1],
padding='SAME', name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {})
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: filters_val, _INPUT1: out_backprop_val})

@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_const_strided(self):
input_sizes_val_ = np.array([1, 10, 10, 3], dtype=np.int32)
filter_val_ = np.random.randint(low=0, high=256, size=[3, 3, 3, 5])
out_backprop_val_ = np.random.randint(low=0, high=256, size=[1, 5, 5, 5])

def func():
def func(filter_val, out_backprop_val):
input_sizes_val = tf.constant(input_sizes_val_, dtype=tf.int32)
filter_val = tf.constant(filter_val_, dtype=tf.float32)
out_backprop_val = tf.constant(out_backprop_val_, dtype=tf.float32)
return conv2d_backprop_input(input_sizes=input_sizes_val, filter=filter_val,
out_backprop=out_backprop_val, strides=[1, 2, 2, 1],
padding='SAME', name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {})
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: filters_val, _INPUT1: out_backprop_val})

@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_const_valid(self):
input_sizes_val_ = np.array([1, 12, 12, 3], dtype=np.int32)
filter_val_ = np.random.randint(low=0, high=256, size=[3, 3, 3, 5])
out_backprop_val_ = np.random.randint(low=0, high=256, size=[1, 10, 10, 5])
def func():
def func(filter_val, out_backprop_val):
input_sizes_val = tf.constant(input_sizes_val_, dtype=tf.int32)
filter_val = tf.constant(filter_val_, dtype=tf.float32)
out_backprop_val = tf.constant(out_backprop_val_, dtype=tf.float32)
return conv2d_backprop_input(input_sizes=input_sizes_val, filter=filter_val,
out_backprop=out_backprop_val, strides=[1, 1, 1, 1],
padding='VALID', name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {})
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: filters_val, _INPUT1: out_backprop_val})

@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput(self):
Expand Down Expand Up @@ -3206,6 +3202,72 @@ def func(input_sizes, filters, out_backprop):
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_sizes_val, _INPUT1: filters_val, _INPUT2: out_backprop_val})

@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_const(self):
output_shape_val_ = np.array([1, 10, 10, 10, 3], dtype=np.int32)
def func(value, filters):
output_shape_val = tf.constant(output_shape_val_, dtype=tf.int32)
return conv3d_transpose(value, filters, output_shape_val, strides=[1, 1, 1, 1, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val}, rtol=1e-6)

@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_const_strided(self):
output_shape_val_ = np.array([1, 10, 10, 10, 3], dtype=np.int32)
def func(value, filters):
output_shape_val = tf.constant(output_shape_val_, dtype=tf.int32)
return conv3d_transpose(value, filters, output_shape_val, strides=[1, 2, 2, 2, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val}, rtol=1e-6)

@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_const_valid(self):
output_shape_val_ = np.array([1, 12, 12, 12, 3], dtype=np.int32)
def func(value, filters):
output_shape_val = tf.constant(output_shape_val_, dtype=tf.int32)
return conv3d_transpose(value, filters, output_shape_val, strides=[1, 1, 1, 1, 1],
padding='VALID', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val}, rtol=1e-6)

@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2(self):
def func(value, filters, output_shape):
return conv3d_transpose(value, filters, output_shape, strides=[1, 1, 1, 1, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[2, 3, 4, 4, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[2, 7, 8, 9, 5]).astype(np.float32)
output_shape_val = np.array([2, 7, 8, 9, 4], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val, _INPUT2: output_shape_val},
rtol=1e-6)

@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_strided(self):
def func(value, filters, output_shape):
return conv3d_transpose(value, filters, output_shape, strides=[1, 2, 2, 2, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5, 5]).astype(np.float32)
output_shape_val = np.array([1, 10, 10, 10, 3], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val, _INPUT2: output_shape_val},
rtol=1e-6)

@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_valid(self):
def func(value, filters, output_shape):
return conv3d_transpose(value, filters, output_shape, strides=[1, 1, 1, 1, 1],
padding='VALID', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 10, 5]).astype(np.float32)
output_shape_val = np.array([1, 12, 12, 12, 3], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val, _INPUT2: output_shape_val},
rtol=1e-6)

@check_opset_min_version(8, "CategoryMapper")
@skip_tf2()
def test_hashtable_lookup(self):
Expand Down
2 changes: 2 additions & 0 deletions tf2onnx/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,8 @@ def data_format(self, val):

def is_nhwc(self):
"""Return True if node is in NHWC format."""
utils.make_sure('D' not in self.data_format, "is_nhwc called on %s with spatial=2 but data_format=%s",
self.name, self.data_format)
return self.data_format == "NHWC"

def is_const(self):
Expand Down
62 changes: 45 additions & 17 deletions tf2onnx/onnx_opset/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -362,8 +362,7 @@ def version_11(cls, ctx, node, **kwargs):
# No change.
cls.version_1(ctx, node, **kwargs)


@tf_op("Conv2DBackpropInput")
@tf_op(["Conv2DBackpropInput", "Conv3DBackpropInputV2"])
class ConvTranspose:
@classmethod
def version_1(cls, ctx, node, **kwargs):
Expand All @@ -372,24 +371,36 @@ def version_1(cls, ctx, node, **kwargs):
# T Y = ConvTranspose(T X, T W, T B, @STRING auto_pad, @INTS dilations,
# @INT group, @INTS kernel_shape, @INTS output_shape, @INTS pads, @INTS strides)

if node.type == "Conv3DBackpropInputV2":
spatial = 3
else:
spatial = 2
node.type = "ConvTranspose"
# Note: inputs are reversed from what one would expect.
conv_kernel_shape(ctx, node, 1)
conv_kernel_shape(ctx, node, 1, spatial=spatial)
input_shape = ctx.get_shape(node.input[2])
output_shape_orig = node.output_shapes

# ouput_shape is explicitly specified here, in this case pads values are auto generated/calculated.
if node.inputs[0].is_const():
output_shape = ctx.get_shape(node.output[0])
if node.is_nhwc():
if is_channels_last(node):
new_output_shape = [output_shape[1], output_shape[2]]
input_hw = [input_shape[1], input_shape[2]]
input_dims = [input_shape[1], input_shape[2]]
if spatial == 3:
new_output_shape.append(output_shape[3])
input_dims.append(input_shape[3])
else:
new_output_shape = [output_shape[2], output_shape[3]]
input_hw = [input_shape[2], input_shape[3]]
utils.make_sure(new_output_shape.count(-1) <= 0, "output h and w need to be known")
utils.make_sure(new_output_shape[0] >= input_hw[0] and new_output_shape[1] >= input_hw[1],
"output h and w cannot be smaller than input h and w.")
input_dims = [input_shape[2], input_shape[3]]
if spatial == 3:
new_output_shape.append(output_shape[4])
input_dims.append(input_shape[4])

utils.make_sure(new_output_shape.count(-1) <= 0, "output dims need to be known")
utils.make_sure(all(new_output_shape[i] >= input_dims[i] for i in range(spatial)),
"output dims cannot be smaller than input dims.")

node.set_attr("output_shape", new_output_shape)
else:
input_shape = ctx.make_node("Cast", [node.input[0]], attr={'to': TensorProto.INT64})
Expand All @@ -409,20 +420,37 @@ def version_1(cls, ctx, node, **kwargs):
start_w = ctx.make_node("Div", [diff_w.output[0], const_two.output[0]])
end_h = ctx.make_node("Add", [start_h.output[0], expect_h])
end_w = ctx.make_node("Add", [start_w.output[0], expect_w])
starts = ctx.make_node("Concat", [start_h.output[0], start_w.output[0]], attr={"axis": 0})
ends = ctx.make_node("Concat", [end_h.output[0], end_w.output[0]], attr={"axis": 0})
const_one_two = ctx.make_const(utils.make_name(node.name + "_const_one_two"),
np.array([1, 2], dtype=np.int64))
if spatial == 3:
output_d = GraphBuilder(ctx).make_slice(
{"data": output_shape.output[0], "ends": [4], "starts": [3], "axes": [0]})
expect_d = GraphBuilder(ctx).make_slice(
{"data": input_shape.output[0], "ends": [4], "starts": [3], "axes": [0]})
diff_d = ctx.make_node("Sub", [output_d, expect_d])
start_d = ctx.make_node("Div", [diff_d.output[0], const_two.output[0]])
end_d = ctx.make_node("Add", [start_d.output[0], expect_d])

starts = ctx.make_node("Concat", [start_h.output[0], start_w.output[0], start_d.output[0]],
attr={"axis": 0})
ends = ctx.make_node("Concat", [end_h.output[0], end_w.output[0], end_d.output[0]], attr={"axis": 0})
slice_axes = ctx.make_const(utils.make_name(node.name + "_const_slice_axes"),
np.array([1, 2, 3], dtype=np.int64))
else:
starts = ctx.make_node("Concat", [start_h.output[0], start_w.output[0]], attr={"axis": 0})
ends = ctx.make_node("Concat", [end_h.output[0], end_w.output[0]], attr={"axis": 0})
slice_axes = ctx.make_const(utils.make_name(node.name + "_const_slice_axes"),
np.array([1, 2], dtype=np.int64))

slice_node = ctx.make_node("Slice",
[node.output[0], starts.output[0], ends.output[0], const_one_two.output[0]],
[node.output[0], starts.output[0], ends.output[0], slice_axes.output[0]],
shapes=output_shape_orig)

downstream_nodes = ctx.find_output_consumers(node.output[0])
downstream_nodes.remove(output_shape)
downstream_nodes.remove(slice_node)
ctx.replace_all_inputs(downstream_nodes, node.output[0], slice_node.output[0])

conv_dims_attr(node, "strides")
conv_dims_attr(node, "dilations")
conv_dims_attr(node, "strides", spatial=spatial)
conv_dims_attr(node, "dilations", spatial=spatial)

# remove output_shapes input
ctx.remove_input(node, node.input[0])
Expand All @@ -431,7 +459,7 @@ def version_1(cls, ctx, node, **kwargs):
node.input[0] = node.input[1]
node.input[1] = t

conv_convert_inputs(ctx, node, with_kernel=True)
conv_convert_inputs(ctx, node, with_kernel=True, spatial=spatial)

@classmethod
def version_11(cls, ctx, node, **kwargs):
Expand Down