Skip to content

Commit

Permalink
[Relay][ConvertLayout] Support deformable conv2d (#7087)
Browse files Browse the repository at this point in the history
* add test case

* fix

* support

* test case

* fix

* fix test

* fix bug

* add comment
  • Loading branch information
comaniac authored Dec 15, 2020
1 parent 1da038e commit 7a20b4a
Show file tree
Hide file tree
Showing 6 changed files with 235 additions and 8 deletions.
86 changes: 86 additions & 0 deletions python/tvm/relay/op/nn/_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
from .._tensor import elemwise_shape_func
from ..strategy.generic import is_depthwise_conv2d
from ...transform import LayoutConfig
from ....ir import container
from ....tir import expr

# relu
reg.register_broadcast_schedule("nn.relu")
Expand Down Expand Up @@ -636,6 +638,90 @@ def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_d
reg.register_pattern("nn.deformable_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)


@reg.register_alter_op_layout("nn.deformable_conv2d")
def alter_op_layout_deformable_conv2d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of deformable conv2d"""
return None


@reg.register_legalize("nn.deformable_conv2d")
def legalize_deformable_conv2d(attrs, inputs, types):
"""Legalize deformable conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return None


@reg.register_convert_op_layout("nn.deformable_conv2d")
def convert_deformable_conv2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for deformable conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay

data, offset, weight = inputs
new_attrs = dict(attrs)
for attr in new_attrs:
if isinstance(new_attrs[attr], container.Array):
new_attrs[attr] = list(new_attrs[attr])
elif isinstance(new_attrs[attr], expr.IntImm):
new_attrs[attr] = new_attrs[attr].value

# First check if there is a LayoutConfig scope, and if so, whether
# it indicates we should ignore this layer or not.
layout_config = LayoutConfig.current
if layout_config is not None:
skip_layer = layout_config.check_skip()
if skip_layer:
return relay.nn.deformable_conv2d(data, offset, weight, **new_attrs)

# Prepare new layout.
assert len(desired_layouts) == 2, "A desired layout is expected for data and kernel"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout

if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.deformable_conv2d(data, offset, weight, **new_attrs)

# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
elif desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
else:
raise ValueError("Layout %s is not yet supported." % desired_data_layout)

return relay.nn.deformable_conv2d(data, offset, weight, **new_attrs)


# bitpack
@reg.register_compute("nn.bitpack")
def compute_bitpack(attrs, inputs, out_dtype):
Expand Down
22 changes: 16 additions & 6 deletions python/tvm/relay/op/strategy/cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,13 +462,23 @@ def conv2d_winograd_without_weight_transfrom_strategy_cuda(attrs, inputs, out_ty
def deformable_conv2d_strategy_cuda(attrs, inputs, out_type, target):
"""deformable_conv2d cuda strategy"""
layout = attrs.data_layout
assert layout == "NCHW"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.cuda.deformable_conv2d_nchw),
wrap_topi_schedule(topi.cuda.schedule_deformable_conv2d_nchw),
name="deformable_conv2d_nchw.cuda",
)

if layout == "NCHW":
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.cuda.deformable_conv2d_nchw),
wrap_topi_schedule(topi.cuda.schedule_deformable_conv2d_nchw),
name="deformable_conv2d_nchw.cuda",
)
elif layout == "NHWC":
# This implementation should never be picked by autotvm
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nhwc),
naive_schedule,
name="deformable_conv2d_nhwc.cuda",
)
else:
raise RuntimeError("Layout %s is not supported in deformable conv2d on CUDA" % layout)
return strategy


Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/strategy/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ def deformable_conv2d_strategy(attrs, inputs, out_type, target):
# This implementation should never be picked by autotvm
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nhwc),
wrap_topi_schedule(naive_schedule),
naive_schedule,
name="deformable_conv2d_nhwc.generic",
)
else:
Expand Down
4 changes: 3 additions & 1 deletion src/relay/op/nn/convolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -563,7 +563,9 @@ by concating all the *g* results.
.add_argument("offset", "Tensor", "The offset tensor.")
.add_argument("weight", "Tensor", "The weight tensor.")
.set_support_level(5)
.add_type_rel("DeformableConv2D", DeformableConv2DRel<DeformableConv2DAttrs>);
.add_type_rel("DeformableConv2D", DeformableConv2DRel<DeformableConv2DAttrs>)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
DeformableConvInferCorrectLayout<DeformableConv2DAttrs>);

// Positional relay function to create deformable_conv2d operator
// used by frontend FFI.
Expand Down
12 changes: 12 additions & 0 deletions src/relay/op/nn/convolution.h
Original file line number Diff line number Diff line change
Expand Up @@ -1211,6 +1211,18 @@ bool DeformableConv2DRel(const Array<Type>& types, int num_inputs, const Attrs&
return true;
}

template <typename AttrType>
Array<Array<Layout> > DeformableConvInferCorrectLayout(
const Attrs& attrs, const Array<Layout>& new_in_layouts, const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type>& old_in_types) {
const AttrType* params = attrs.as<AttrType>();

// Layout of {data, offet, kernel}, {out}
return Array<Array<Layout> >{
{params->data_layout, params->data_layout, params->kernel_layout},
{params->out_layout == "" ? params->data_layout : params->out_layout}};
}

template <typename T>
Array<Array<Layout> > ConvInferCorrectLayout(const Attrs& attrs,
const Array<Layout>& new_in_layouts,
Expand Down
117 changes: 117 additions & 0 deletions tests/python/relay/test_pass_convert_op_layout.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,122 @@ def expected():
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)


def test_deformable_conv_bias_pool_convert_layout():
def before(N, CI, H, W, CO, KH, KW, layout):
if layout == "NCHW":
data_shape = (N, CI, H, W)
weight_shape = (CO, CI, KH, KW)
kernel_layout = "OIHW"
else:
data_shape = (N, H, W, CI)
weight_shape = (KH, KW, CI, CO)
kernel_layout = "HWIO"
bias_shape = (CO,)

data = relay.var("data", shape=data_shape, dtype="float32")
offset = relay.var("offset")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")

y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout,
kernel_layout=kernel_layout,
)
y = relay.nn.bias_add(y, bias, axis=-1 if layout == "NHWC" else 1)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=layout)
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y

def expected(N, CI, H, W, CO, KH, KW, OH, OW, src_layout, dst_layout):
layout_map = {"src": {}, "dst": {}}
if src_layout == "NCHW":
nchw = layout_map["src"]
nhwc = layout_map["dst"]
else:
nchw = layout_map["dst"]
nhwc = layout_map["src"]

nchw["data_layout"] = "NCHW"
nchw["data_shape"] = (N, CI, H, W)
nchw["offset_shape"] = (N, KH * KW * 2, OH, OW)
nchw["weight_shape"] = (CO, CI, KH, KW)
nchw["kernel_layout"] = "OIHW"

nhwc["data_layout"] = "NHWC"
nhwc["data_shape"] = (N, H, W, CI)
nhwc["offset_shape"] = (N, OH, OW, KH * KW * 2)
nhwc["weight_shape"] = (KH, KW, CI, CO)
nhwc["kernel_layout"] = "HWIO"

bias_shape = (CO,)

data = relay.var("data", shape=layout_map["src"]["data_shape"], dtype="float32")
offset = relay.var("offset", shape=layout_map["src"]["offset_shape"], dtype="float32")
weight = relay.var("weight", shape=layout_map["src"]["weight_shape"], dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")

data = relay.layout_transform(
data, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
offset = relay.layout_transform(
offset, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
weight = relay.layout_transform(
weight, layout_map["src"]["kernel_layout"], layout_map["dst"]["kernel_layout"]
)
y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout_map["dst"]["data_layout"],
kernel_layout=layout_map["dst"]["kernel_layout"],
)
if layout_map["src"]["data_layout"] == "NHWC":
bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
else:
bias = relay.expand_dims(bias, axis=1, num_newaxis=2)
bias = relay.expand_dims(bias, axis=0)
bias = relay.layout_transform(
bias, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
y = relay.add(y, bias)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=layout_map["dst"]["data_layout"])
y = relay.cast(y, "int32")
y = relay.layout_transform(
y, layout_map["dst"]["data_layout"], layout_map["src"]["data_layout"]
)
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y

# NHWC -> NCHW
a = before(1, 3, 224, 224, 32, 3, 3, "NHWC")
a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NCHW", "default"]}))
b = run_opt_pass(
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NHWC", "NCHW"), transform.InferType()
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)

# NCHW -> NHWC
a = before(1, 3, 224, 224, 32, 3, 3, "NCHW")
a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NHWC", "default"]}))
b = run_opt_pass(
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NCHW", "NHWC"), transform.InferType()
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)


def test_dual_path_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
Expand Down Expand Up @@ -1308,6 +1424,7 @@ def expected():
test_conv_roi_align_convert_layout()
test_conv_roi_pool_convert_layout()
test_conv_strided_slice_convert_layout()
test_deformable_conv_bias_pool_convert_layout()
test_default_keyword()
test_different_ops_convert_layout()
test_no_desired_layout()
Expand Down

0 comments on commit 7a20b4a

Please sign in to comment.