@@ -1171,35 +1171,62 @@ def test_flatten_infer_type():
1171
1171
1172
1172
@tvm .testing .uses_gpu
1173
1173
def test_pad_infer_type ():
1174
- # entirely concrete case
1174
+ # entirely concrete cases
1175
1175
n , c , h , w = 1 , 2 , 3 , 4
1176
1176
t = relay .var ("t" , relay .TensorType ((n , c , h , w ), "float32" ))
1177
1177
y = relay .nn .pad (t , ((1 , 1 ), (2 , 2 ), (3 , 3 ), (4 , 4 )))
1178
- "pad_width=" in y .astext ()
1179
1178
yy = run_infer_type (y )
1180
1179
assert yy .checked_type == relay .TensorType ((3 , 6 , 9 , 12 ), "float32" )
1181
1180
1181
+ n , c , h , w = 4 , 6 , 3 , 5
1182
+ t = relay .var ("t" , relay .TensorType ((n , c , h , w ), "float32" ))
1183
+ y = relay .nn .pad (t , ((- 1 , - 1 ), (2 , - 2 ), (0 , - 3 ), (4 , 4 )), pad_mode = "reflect" )
1184
+ yy = run_infer_type (y )
1185
+ assert yy .checked_type == relay .TensorType ((2 , 6 , 0 , 13 ), "float32" )
1186
+
1182
1187
# some symbolic values
1183
1188
n , c , h , w = te .size_var ("n" ), 2 , 3 , te .size_var ("w" )
1184
1189
t = relay .var ("t" , relay .TensorType ((n , c , h , w ), "float32" ))
1185
1190
y = relay .nn .pad (t , ((1 , 1 ), (2 , 2 ), (3 , 3 ), (4 , 4 )))
1186
1191
yy = run_infer_type (y )
1187
1192
assert yy .checked_type == relay .TensorType ((n + 2 , 6 , 9 , w + 8 ), "float32" )
1188
1193
1194
+ n , c , h , w = te .size_var ("n" ), te .size_var ("c" ), te .size_var ("h" ), te .size_var ("w" )
1195
+ t = relay .var ("t" , relay .TensorType ((n , c , h , w ), "float32" ))
1196
+ y = relay .nn .pad (t , ((- 1 , - 1 ), (- 2 , - 2 ), (1 , - 3 ), (4 , 4 )))
1197
+ yy = run_infer_type (y )
1198
+ assert yy .checked_type == relay .TensorType ((n + (- 2 ), c + (- 4 ), h + (- 2 ), w + 8 ), "float32" )
1199
+
1189
1200
1190
1201
@tvm .testing .uses_gpu
1191
1202
def test_pad_run ():
1192
1203
def _test_run (dtype ):
1193
- dshape = (4 , 10 , 7 , 7 )
1194
- x = relay .var ("x" , shape = dshape )
1195
- y = relay .nn .pad (x , ((1 , 1 ), (2 , 2 ), (3 , 3 ), (4 , 4 )))
1196
- func = relay .Function ([x ], y )
1197
- data = np .random .uniform (size = dshape ).astype (dtype )
1198
- ref_res = np .pad (data , ((1 , 1 ), (2 , 2 ), (3 , 3 ), (4 , 4 )), "constant" )
1199
- for target , ctx in tvm .testing .enabled_targets ():
1200
- intrp1 = relay .create_executor ("graph" , ctx = ctx , target = target )
1201
- op_res1 = intrp1 .evaluate (func )(data )
1202
- tvm .testing .assert_allclose (op_res1 .asnumpy (), ref_res , rtol = 1e-5 , atol = 1e-5 )
1204
+ dshape_list = [(4 , 10 , 7 , 7 ), (4 , 6 , 3 , 5 )]
1205
+ pad_list = [((1 , 1 ), (2 , 2 ), (3 , 3 ), (4 , 4 )), ((- 1 , - 1 ), (2 , - 2 ), (0 , - 2 ), (4 , 4 ))]
1206
+
1207
+ for dshape , pad in zip (dshape_list , pad_list ):
1208
+ x = relay .var ("x" , shape = dshape )
1209
+ y = relay .nn .pad (x , pad )
1210
+ func = relay .Function ([x ], y )
1211
+ data = np .random .uniform (size = dshape ).astype (dtype )
1212
+ mod_pad = []
1213
+ mod_data = data
1214
+ for axis , (pad_x , pad_y ) in enumerate (pad ):
1215
+ indices = range (dshape [axis ])
1216
+ if pad_x < 0 :
1217
+ indices = indices [abs (pad_x ) :]
1218
+ pad_x = 0
1219
+ if pad_y < 0 :
1220
+ indices = indices [:pad_y ]
1221
+ pad_y = 0
1222
+ mod_data = np .take (mod_data , indices , axis )
1223
+ mod_pad .append ((pad_x , pad_y ))
1224
+
1225
+ ref_res = np .pad (mod_data , tuple (mod_pad ), "constant" )
1226
+ for target , ctx in tvm .testing .enabled_targets ():
1227
+ intrp1 = relay .create_executor ("graph" , ctx = ctx , target = target )
1228
+ op_res1 = intrp1 .evaluate (func )(data )
1229
+ tvm .testing .assert_allclose (op_res1 .asnumpy (), ref_res , rtol = 1e-5 , atol = 1e-5 )
1203
1230
1204
1231
_test_run ("float32" )
1205
1232
_test_run ("int32" )
0 commit comments