Skip to content

Commit f7c2fd6

Browse files
Xingyu Zhouzhiics
authored andcommitted
[Relay] Conv2D padding representation (apache#4787)
* enforce 4-way padding * add util with get_pad_tuple * delete unnecessary arguments * fix lint * add container.Array case * fix cudnn conv2d asymmetric padding logic * rename get_pad_tuple to get_pad_tuple2d * revert change for topi/python/topi/nn/conv2d.py * add get_pad_tuple2d for several contrib conv2d ops * add get_pad_tuple2d for all conv2d ops
1 parent ca92d17 commit f7c2fd6

File tree

5 files changed

+90
-16
lines changed

5 files changed

+90
-16
lines changed

python/tvm/relay/op/nn/nn.py

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
from __future__ import absolute_import as _abs
2020
from ...expr import TupleWrapper
2121
from . import _make
22+
from .util import get_pad_tuple2d
2223

2324

2425
def conv1d(data,
@@ -200,8 +201,9 @@ def conv2d(data,
200201
strides = (strides, strides)
201202
if isinstance(dilation, int):
202203
dilation = (dilation, dilation)
203-
if isinstance(padding, int):
204-
padding = (padding, padding)
204+
# TODO enforce 4-way padding in topi/nn/conv2d after #4644 merged
205+
# convert 2-way padding to 4-way padding
206+
padding = get_pad_tuple2d(padding)
205207

206208
return _make.conv2d(data, weight, strides, padding, dilation,
207209
groups, channels, kernel_size, data_layout,
@@ -363,6 +365,8 @@ def conv2d_transpose(data,
363365
result : tvm.relay.Expr
364366
The computed result.
365367
"""
368+
# convert 2-way padding to 4-way padding
369+
padding = get_pad_tuple2d(padding)
366370
return _make.conv2d_transpose(data, weight, strides, padding, dilation,
367371
groups, channels, kernel_size, data_layout,
368372
kernel_layout, out_layout, output_padding, out_dtype)
@@ -1758,6 +1762,8 @@ def contrib_conv2d_winograd_without_weight_transform(data,
17581762
result : tvm.relay.Expr
17591763
The computed result.
17601764
"""
1765+
# convert 2-way padding to 4-way padding
1766+
padding = get_pad_tuple2d(padding)
17611767
return _make.contrib_conv2d_winograd_without_weight_transform(
17621768
data, weight, tile_size, strides, padding, dilation,
17631769
groups, channels, kernel_size, data_layout,
@@ -1824,6 +1830,8 @@ def contrib_conv2d_winograd_nnpack_without_weight_transform(data,
18241830
result : tvm.relay.Expr
18251831
The computed result.
18261832
"""
1833+
# convert 2-way padding to 4-way padding
1834+
padding = get_pad_tuple2d(padding)
18271835
return _make.contrib_conv2d_winograd_nnpack_without_weight_transform(
18281836
data, weight, strides, padding, dilation,
18291837
groups, channels, kernel_size, data_layout,
@@ -1891,6 +1899,8 @@ def contrib_conv2d_nchwc(data,
18911899
result : tvm.relay.Expr
18921900
The computed result.
18931901
"""
1902+
# convert 2-way padding to 4-way padding
1903+
padding = get_pad_tuple2d(padding)
18941904
return _make.contrib_conv2d_NCHWc(data, kernel, strides, padding, dilation,
18951905
groups, channels, kernel_size, data_layout,
18961906
kernel_layout, out_layout, out_dtype)
@@ -1956,6 +1966,8 @@ def contrib_depthwise_conv2d_nchwc(data,
19561966
result : tvm.relay.Expr
19571967
The computed result.
19581968
"""
1969+
# convert 2-way padding to 4-way padding
1970+
padding = get_pad_tuple2d(padding)
19591971
return _make.contrib_depthwise_conv2d_NCHWc(data, kernel, strides, padding, dilation,
19601972
groups, channels, kernel_size, data_layout,
19611973
kernel_layout, out_layout, out_dtype)
@@ -2021,6 +2033,8 @@ def contrib_conv2d_nchwc_int8(data,
20212033
result : tvm.relay.Expr
20222034
The computed result.
20232035
"""
2036+
# convert 2-way padding to 4-way padding
2037+
padding = get_pad_tuple2d(padding)
20242038
return _make.contrib_conv2d_NCHWc_int8(data, kernel, strides, padding, dilation,
20252039
groups, channels, kernel_size, data_layout,
20262040
kernel_layout, out_layout, out_dtype)
@@ -2142,6 +2156,8 @@ def deformable_conv2d(data,
21422156
The computed result.
21432157
21442158
"""
2159+
# convert 2-way padding to 4-way padding
2160+
padding = get_pad_tuple2d(padding)
21452161
return _make.deformable_conv2d(data, offset, weight, strides, padding, dilation,
21462162
deformable_groups, groups, channels, kernel_size, data_layout,
21472163
kernel_layout, out_layout, out_dtype)
@@ -2251,7 +2267,8 @@ def bitserial_conv2d(data,
22512267
result : tvm.relay.Expr
22522268
The computed result.
22532269
"""
2254-
2270+
# convert 2-way padding to 4-way padding
2271+
padding = get_pad_tuple2d(padding)
22552272
return _make.bitserial_conv2d(data, weight, strides, padding, channels,
22562273
kernel_size, activation_bits, weight_bits,
22572274
data_layout, kernel_layout, pack_dtype,

python/tvm/relay/op/nn/util.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one
2+
# or more contributor license agreements. See the NOTICE file
3+
# distributed with this work for additional information
4+
# regarding copyright ownership. The ASF licenses this file
5+
# to you under the Apache License, Version 2.0 (the
6+
# "License"); you may not use this file except in compliance
7+
# with the License. You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing,
12+
# software distributed under the License is distributed on an
13+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14+
# KIND, either express or implied. See the License for the
15+
# specific language governing permissions and limitations
16+
# under the License.
17+
# pylint: disable=invalid-name, unused-variable
18+
"""NN operator common utilities"""
19+
from __future__ import absolute_import
20+
from .... import container
21+
22+
def get_pad_tuple2d(padding):
23+
"""Common code to get the pad option
24+
Parameters
25+
----------
26+
padding : Union[int, Tuple[int, ...]]
27+
Padding size
28+
Returns
29+
-------
30+
pad_top : int
31+
Padding size on top
32+
pad_left : int
33+
Padding size on left
34+
pad_down : int
35+
Padding size on down.
36+
pad_right : int
37+
Padding size on right.
38+
"""
39+
# compute the padding size
40+
if isinstance(padding, container.Array):
41+
padding = list(padding)
42+
if isinstance(padding, (tuple, list)):
43+
if len(padding) == 2:
44+
pad_h = padding[0] * 2
45+
pad_w = padding[1] * 2
46+
elif len(padding) == 4:
47+
return padding[0], padding[1], padding[2], padding[3]
48+
else:
49+
raise ValueError("Size of padding can only be 2 or 4")
50+
elif isinstance(padding, int):
51+
pad_h = pad_w = padding * 2
52+
else:
53+
raise ValueError("Unknown padding option %s" % padding)
54+
pad_top = (pad_h + 1) // 2
55+
pad_left = (pad_w + 1) // 2
56+
return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left

tests/python/relay/test_pass_alter_op_layout.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -617,7 +617,7 @@ def expected():
617617
w = relay.var("w", shape=(32, 1, 3, 3))
618618
x = relay.layout_transform(x, "NCHW", "NCHW8c")
619619
w = relay.layout_transform(w, "OIHW", "OIHW1i8o")
620-
y = relay.nn.contrib_depthwise_conv2d_nchwc(x, w, padding=(1, 1), channels=32, kernel_size=(3, 3),
620+
y = relay.nn.contrib_depthwise_conv2d_nchwc(x, w, padding=(1, 1, 1, 1), channels=32, kernel_size=(3, 3),
621621
groups=32, data_layout="NCHW8c", kernel_layout="OIHW1i8o",
622622
out_layout="NCHW8c")
623623
y = relay.layout_transform(y, "NCHW8c", "NCHW")

tests/python/unittest/test_graph_tuner_core.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,9 @@ def _create_data(target, dshape, dtype, layout):
5050
params=params,
5151
ops=(relay.op.nn.conv2d,))
5252
wkl_list = [
53-
create_workload((1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
54-
create_workload((1, 16, 8, 8), (32, 16, 1, 1), (1, 1), (0, 0), (1, 1), layout, layout, dtype, dtype),
55-
create_workload((1, 32, 8, 8), (32, 32, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
53+
create_workload((1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype),
54+
create_workload((1, 16, 8, 8), (32, 16, 1, 1), (1, 1), (0, 0, 0, 0), (1, 1), layout, layout, dtype, dtype),
55+
create_workload((1, 32, 8, 8), (32, 32, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype),
5656
]
5757
costs = [0.04, 0.012, 0.03]
5858
config_list = []
@@ -279,9 +279,9 @@ def test_many_sub_graphs():
279279
params=params,
280280
ops=(relay.op.nn.conv2d,))
281281
wkl_list = [
282-
create_workload((1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
283-
create_workload((1, 16, 8, 8), (32, 16, 1, 1), (1, 1), (0, 0), (1, 1), layout, layout, dtype, dtype),
284-
create_workload((1, 32, 8, 8), (32, 32, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
282+
create_workload((1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype),
283+
create_workload((1, 16, 8, 8), (32, 16, 1, 1), (1, 1), (0, 0, 0, 0), (1, 1), layout, layout, dtype, dtype),
284+
create_workload((1, 32, 8, 8), (32, 32, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype),
285285
]
286286
costs = [0.04, 0.012, 0.03, 0.02, 0.02, 0.045]
287287
config_list = []
@@ -392,8 +392,8 @@ def test_tuple():
392392
params=params,
393393
ops=(relay.op.nn.conv2d,))
394394
wkl_list = [
395-
create_workload((1, 5, 32, 32), (2, 5, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
396-
create_workload((1, 5, 32, 32), (3, 5, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
395+
create_workload((1, 5, 32, 32), (2, 5, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype),
396+
create_workload((1, 5, 32, 32), (3, 5, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype),
397397
]
398398
costs = [0.01, 0.012, 0.03, 0.04]
399399
config_list = []
@@ -490,9 +490,9 @@ def test_triangle_block():
490490
params=params,
491491
ops=(relay.op.nn.conv2d,))
492492
wkl_list = [
493-
create_workload((1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
494-
create_workload((1, 16, 8, 8), (32, 16, 1, 1), (1, 1), (0, 0), (1, 1), layout, layout, dtype, dtype),
495-
create_workload((1, 3, 8, 8), (32, 3, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
493+
create_workload((1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype),
494+
create_workload((1, 16, 8, 8), (32, 16, 1, 1), (1, 1), (0, 0, 0, 0), (1, 1), layout, layout, dtype, dtype),
495+
create_workload((1, 3, 8, 8), (32, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype),
496496
]
497497
costs = [0.04, 0.012, 0.03, 0.02, 0.02, 0.045]
498498
config_list = []

topi/python/topi/cuda/conv2d.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,8 @@ def conv2d_cuda(cfg, data, kernel, strides, padding, dilation, layout='NCHW', ou
8585
stride_h, stride_w = (strides, strides) if isinstance(strides, int) else strides
8686
dilation_h, dilation_w = (dilation, dilation) if isinstance(dilation, int) else dilation
8787

88-
if isinstance(padding, (list, tuple)) and len(padding) > 2:
88+
if isinstance(padding, (list, tuple)) and len(padding) == 4 and \
89+
(padding[0] != padding[2] or padding[1] != padding[3]):
8990
raise ValueError("Cudnn doesn't support asymmetric padding.")
9091
pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW))
9192
OH = (H + pt + pb - KH) // stride_h + 1

0 commit comments

Comments
 (0)