@@ -20,7 +20,7 @@ def my_clip(x, a_min, a_max):
2020 x = tvm .compute (x .shape , lambda * i : tvm .max (x (* i ), const_min ), name = "clipB" )
2121 return x
2222
23- def conv2d (N , CI , H , W , CO , KH , KW , strides , padding , in_dtype , out_dtype ):
23+ def conv2d (N , CI , H , W , CO , KH , KW , strides , padding , dilation , in_dtype , out_dtype ):
2424 data_shape = (N // env .BATCH , CI // env .BLOCK_IN , H , W , env .BATCH , env .BLOCK_IN )
2525 kernel_shape = (CO // env .BLOCK_OUT , CI // env .BLOCK_IN , KH , KW , env .BLOCK_OUT , env .BLOCK_IN )
2626 bias_shape = (N // env .BATCH , CO // env .BLOCK_OUT , 1 , 1 , env .BATCH , env .BLOCK_OUT )
@@ -33,7 +33,7 @@ def conv2d(N, CI, H, W, CO, KH, KW, strides, padding, in_dtype, out_dtype):
3333 kernel = tvm .placeholder (kernel_shape , name = "kernel" , dtype = env .wgt_dtype )
3434
3535 with tvm .target .vta ():
36- res = topi .nn .conv2d (data , kernel , padding = padding , strides = strides ,
36+ res = topi .nn .conv2d (data , kernel , padding = padding , strides = strides , dilation = dilation ,
3737 layout = 'NCHW%dn%dc' % (env .BATCH , env .BLOCK_IN ), out_dtype = 'int32' )
3838 res = topi .add (res , bias )
3939 res = topi .right_shift (res , 8 )
@@ -46,13 +46,13 @@ def conv2d(N, CI, H, W, CO, KH, KW, strides, padding, in_dtype, out_dtype):
4646 s = tvm .create_schedule ([res .op ])
4747
4848
49- return s , [data , kernel , bias , res ]
49+ return s , [data , kernel , bias , res ]
5050
5151if __name__ == '__main__' :
52- N , CI , H , W , CO , KH , KW , strides , padding , in_dtype , out_dtype = \
53- 1 , 64 , 56 , 56 , 64 , 3 , 3 , (1 , 1 ), (1 , 1 ), 'int8' , 'int32'
52+ N , CI , H , W , CO , KH , KW , strides , padding , dilation , in_dtype , out_dtype = \
53+ 1 , 64 , 56 , 56 , 64 , 3 , 3 , (1 , 1 ), (1 , 1 ), ( 1 , 1 ), 'int8' , 'int32'
5454
55- task = autotvm .task .create (conv2d , args = (N , CI , H , W , CO , KH , KW , strides , padding , in_dtype , out_dtype ),
55+ task = autotvm .task .create (conv2d , args = (N , CI , H , W , CO , KH , KW , strides , padding , dilation , in_dtype , out_dtype ),
5656 target = tvm .target .vta (env .MODEL ), target_host = env .target_host , template_key = 'direct' )
5757 print (task .config_space )
5858
@@ -62,7 +62,7 @@ def conv2d(N, CI, H, W, CO, KH, KW, strides, padding, in_dtype, out_dtype):
6262
6363 measure_option = autotvm .measure_option (
6464 builder = autotvm .LocalBuilder (build_func = vta .vta_autotvm_build_func ),
65- runner = autotvm .RPCRunner (env .TARGET , 'fleet ' , 9190 , number = 4 , repeat = 3 , timeout = 30 ,
65+ runner = autotvm .RPCRunner (env .TARGET , '10.77.1.109 ' , 9190 , number = 4 , repeat = 3 , timeout = 30 ,
6666 check_correctness = True ))
6767
6868 tuner = autotvm .tuner .RandomTuner (task )
0 commit comments