Skip to content

Commit 1238115

Browse files
JZZ-NOTExiaoxiaohehe001jiweibo
authored
fix conv2d convert test (#35627)
* support nnadapter and ascend310 * modify code * add anchor_generator convert test * add gelu convert test * add conv2d convert test * modify anchor_operator convert test * modify conv2d test * modify con2d convert test * modify conv2d convert test * modify conv2d convert test * modify conv2d test * fix WITH_PYTHON compile error * modify test file * modify test file * modify test file * modify test file * modify test file * modify test file * modify test file * modify test file Co-authored-by: xiaoxiaohehe001 <hiteezsf@163.com> Co-authored-by: jiweibo <jiweibo@baidu.com>
1 parent be4d002 commit 1238115

File tree

4 files changed

+342
-108
lines changed

4 files changed

+342
-108
lines changed

paddle/fluid/inference/tensorrt/convert/conv2d_op.cc

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,20 @@ void ConvertConv2d(TensorRTEngine* engine, const framework::proto::OpDesc& op,
8686
nvinfer1::DimsHW nv_ksize(filter_h, filter_w);
8787
nvinfer1::DimsHW nv_dilations(dilations[0], dilations[1]);
8888
nvinfer1::DimsHW nv_strides(strides[0], strides[1]);
89-
nvinfer1::DimsHW nv_paddings(paddings[0], paddings[1]);
89+
nvinfer1::DimsHW nv_paddings;
90+
nvinfer1::Dims nv_pre_paddings;
91+
nvinfer1::Dims nv_post_paddings;
92+
if (paddings.size() == 2) {
93+
nv_paddings.d[0] = paddings[0];
94+
nv_paddings.d[1] = paddings[1];
95+
} else {
96+
nv_pre_paddings.nbDims = 2;
97+
nv_post_paddings.nbDims = 2;
98+
nv_pre_paddings.d[0] = paddings[0];
99+
nv_pre_paddings.d[1] = paddings[2];
100+
nv_post_paddings.d[0] = paddings[1];
101+
nv_post_paddings.d[1] = paddings[3];
102+
}
90103

91104
TensorRTEngine::Weight weight{nvinfer1::DataType::kFLOAT,
92105
static_cast<void*>(weight_data),
@@ -116,7 +129,13 @@ void ConvertConv2d(TensorRTEngine* engine, const framework::proto::OpDesc& op,
116129
layer, platform::errors::Fatal("TensorRT create conv2d/conv2d_transpose"
117130
" layer failed."));
118131
layer->setStride(nv_strides);
119-
layer->setPadding(nv_paddings);
132+
if (paddings.size() == 2) {
133+
layer->setPadding(nv_paddings);
134+
} else {
135+
layer->setPrePadding(nv_pre_paddings);
136+
layer->setPostPadding(nv_post_paddings);
137+
}
138+
120139
layer->setNbGroups(groups);
121140
if (padding_algorithm == "SAME") {
122141
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);

paddle/fluid/inference/tensorrt/op_teller.cc

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -149,13 +149,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
149149
return false;
150150

151151
for (auto& teller : tellers_) {
152-
if (op_type == "depthwise_conv2d") {
153-
std::vector<int> paddings =
154-
BOOST_GET_CONST(std::vector<int>, desc.GetAttr("paddings"));
155-
156-
if (paddings.size() > 2) return false;
157-
}
158-
159152
if (op_type == "relu" || op_type == "relu6" || op_type == "tanh" ||
160153
op_type == "sigmoid") {
161154
auto* block = desc.Block();
@@ -208,9 +201,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
208201
std::vector<int> paddings =
209202
BOOST_GET_CONST(std::vector<int>, desc.GetAttr("paddings"));
210203

211-
// conv2d and conv2d_transpose need padding check
212-
if (paddings.size() > 2 && op_type != "conv2d_fusion") return false;
213-
214204
if (desc.Input("Input").size() != 1) {
215205
VLOG(3) << "TRT Conv2d expect 1 input, but got "
216206
<< desc.Input("Input").size() << " input.";
@@ -223,6 +213,14 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
223213
return false;
224214
}
225215

216+
if (desc.HasAttr("padding_algorithm")) {
217+
auto padding_algorithm =
218+
BOOST_GET_CONST(std::string, desc.GetAttr("padding_algorithm"));
219+
if (padding_algorithm == "SAME" || padding_algorithm == "VALID") {
220+
return false;
221+
}
222+
}
223+
226224
if (desc.HasAttr("enable_int8")) {
227225
if (op_type == "conv2d" || op_type == "conv2d_fusion") {
228226
if (!desc.HasAttr("Input_scale")) {

python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py

Lines changed: 104 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -22,121 +22,143 @@
2222

2323
class TrtConvertConv2dTest(TrtLayerAutoScanTest):
2424
def is_program_valid(self, program_config: ProgramConfig) -> bool:
25-
# TODO: This is just the example to remove the wrong attrs.
2625
inputs = program_config.inputs
2726
weights = program_config.weights
2827
attrs = [
2928
program_config.ops[i].attrs
3029
for i in range(len(program_config.ops))
3130
]
3231

33-
# groups restriction.
3432
if inputs['input_data'].shape[1] != weights['conv2d_weight'].shape[
3533
1] * attrs[0]['groups']:
3634
return False
3735

38-
# others restriction, todo.
39-
4036
return True
4137

4238
def sample_program_configs(self):
43-
def generate_input1(attrs: List[Dict[str, Any]]):
44-
# TODO: This is just the example to illustrate the releation between axis and input.
45-
# for each attr, can generate different datas
39+
self.trt_param.workspace_size = 1073741824
40+
41+
def generate_input1(batch, attrs: List[Dict[str, Any]]):
4642
if attrs[0]['groups'] == 1:
47-
return np.ones([2, 3, 64, 64]).astype(np.float32)
43+
return np.ones([batch, 3, 64, 64]).astype(np.float32)
44+
elif attrs[0]['groups'] == 2:
45+
return np.ones([batch, 6, 64, 64]).astype(np.float32)
4846
else:
49-
return np.ones([1, 3, 64, 64]).astype(np.float32)
47+
return np.ones([batch, 9, 64, 64]).astype(np.float32)
5048

5149
def generate_weight1(attrs: List[Dict[str, Any]]):
5250
return np.random.random([24, 3, 3, 3]).astype(np.float32)
5351

54-
# for strides in [[1, 1], [2, 2], [1, 2], [2, 3]]:
55-
# for paddings in [[0, 3], [3, 1], [1, 1, 1, 1]]:
56-
# for groups in [1, 2]:
57-
# for padding_algotithm in ['EXPLICIT', 'SAME', 'VALID']:
58-
# for dilations in [[1, 1], [1, 2]]:
59-
# for data_format in ['NCHW']:
60-
for strides in [[1, 1], [2, 2]]:
61-
for paddings in [[0, 3], [3, 1]]:
62-
for groups in [1]:
63-
for padding_algotithm in ['EXPLICIT']:
64-
for dilations in [[1, 1]]:
65-
for data_format in ['NCHW']:
66-
67-
dics = [{
68-
"data_fromat": data_format,
69-
"dilations": dilations,
70-
"padding_algorithm": padding_algotithm,
71-
"groups": groups,
72-
"paddings": paddings,
73-
"strides": strides,
74-
"data_format": data_format
75-
}, {}]
76-
77-
ops_config = [{
78-
"op_type": "conv2d",
79-
"op_inputs": {
80-
"Input": ["input_data"],
81-
"Filter": ["conv2d_weight"]
82-
},
83-
"op_outputs": {
84-
"Output": ["conv_output_data"]
85-
},
86-
"op_attrs": dics[0]
87-
}, {
88-
"op_type": "relu",
89-
"op_inputs": {
90-
"X": ["conv_output_data"]
91-
},
92-
"op_outputs": {
93-
"Out": ["relu_output_data"]
94-
},
95-
"op_attrs": dics[1]
96-
}]
97-
ops = self.generate_op_config(ops_config)
98-
99-
program_config = ProgramConfig(
100-
ops=ops,
101-
weights={
102-
"conv2d_weight": TensorConfig(
103-
data_gen=partial(generate_weight1,
104-
dics))
105-
},
106-
inputs={
107-
"input_data": TensorConfig(
108-
data_gen=partial(generate_input1,
109-
dics))
110-
},
111-
outputs=["relu_output_data"])
112-
113-
yield program_config
52+
for batch in [1, 2, 4]:
53+
for strides in [[1, 1], [2, 2], [1, 2]]:
54+
for paddings in [[0, 3], [1, 2, 3, 4]]:
55+
for groups in [1, 2, 3]:
56+
for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']:
57+
for dilations in [[1, 1], [2, 2], [1, 2]]:
58+
for data_format in ['NCHW']:
59+
60+
dics = [{
61+
"data_fromat": data_format,
62+
"dilations": dilations,
63+
"padding_algorithm": padding_algorithm,
64+
"groups": groups,
65+
"paddings": paddings,
66+
"strides": strides,
67+
"data_format": data_format
68+
}, {}]
69+
70+
if padding_algorithm == 'EXPLICIT':
71+
ops_config = [{
72+
"op_type": "conv2d",
73+
"op_inputs": {
74+
"Input": ["input_data"],
75+
"Filter": ["conv2d_weight"]
76+
},
77+
"op_outputs": {
78+
"Output": ["conv_output_data"]
79+
},
80+
"op_attrs": dics[0]
81+
}, {
82+
"op_type": "relu",
83+
"op_inputs": {
84+
"X": ["conv_output_data"]
85+
},
86+
"op_outputs": {
87+
"Out": ["output_data"]
88+
},
89+
"op_attrs": dics[1]
90+
}]
91+
else:
92+
ops_config = [{
93+
"op_type": "conv2d",
94+
"op_inputs": {
95+
"Input": ["input_data"],
96+
"Filter": ["conv2d_weight"]
97+
},
98+
"op_outputs": {
99+
"Output": ["output_data"]
100+
},
101+
"op_attrs": dics[0]
102+
}]
103+
ops = self.generate_op_config(ops_config)
104+
105+
program_config = ProgramConfig(
106+
ops=ops,
107+
weights={
108+
"conv2d_weight":
109+
TensorConfig(data_gen=partial(
110+
generate_weight1, dics))
111+
},
112+
inputs={
113+
"input_data":
114+
TensorConfig(data_gen=partial(
115+
generate_input1, batch, dics))
116+
},
117+
outputs=["output_data"])
118+
119+
yield program_config
114120

115121
def sample_predictor_configs(
116122
self, program_config) -> (paddle_infer.Config, List[int], float):
117123
def generate_dynamic_shape(attrs):
118-
if len(attrs[0]['paddings']) == 4:
124+
if attrs[0]['groups'] == 1:
119125
self.dynamic_shape.min_input_shape = {
120126
"input_data": [1, 3, 32, 32],
121-
'': []
127+
"output_data": [1, 24, 32, 32]
122128
}
123129
self.dynamic_shape.max_input_shape = {
124130
"input_data": [4, 3, 64, 64],
125-
'': []
131+
"output_data": [4, 24, 64, 64]
126132
}
127133
self.dynamic_shape.opt_input_shape = {
128134
"input_data": [1, 3, 64, 64],
129-
'': []
135+
"output_data": [1, 24, 64, 64]
136+
}
137+
elif attrs[0]['groups'] == 2:
138+
self.dynamic_shape.min_input_shape = {
139+
"input_data": [1, 6, 32, 32],
140+
"output_data": [1, 24, 32, 32]
141+
}
142+
self.dynamic_shape.max_input_shape = {
143+
"input_data": [4, 6, 64, 64],
144+
"output_data": [4, 24, 64, 64]
145+
}
146+
self.dynamic_shape.opt_input_shape = {
147+
"input_data": [1, 6, 64, 64],
148+
"output_data": [1, 24, 64, 64]
130149
}
131150
else:
132151
self.dynamic_shape.min_input_shape = {
133-
"input_data": [1, 3, 32, 32]
152+
"input_data": [1, 9, 32, 32],
153+
"output_data": [1, 24, 32, 32]
134154
}
135155
self.dynamic_shape.max_input_shape = {
136-
"input_data": [4, 3, 64, 64]
156+
"input_data": [4, 9, 64, 64],
157+
"output_data": [4, 24, 64, 64]
137158
}
138159
self.dynamic_shape.opt_input_shape = {
139-
"input_data": [1, 3, 64, 64]
160+
"input_data": [1, 9, 64, 64],
161+
"output_data": [1, 24, 64, 64]
140162
}
141163

142164
def clear_dynamic_shape():
@@ -145,11 +167,7 @@ def clear_dynamic_shape():
145167
self.dynamic_shape.opt_input_shape = {}
146168

147169
def generate_trt_nodes_num(attrs, dynamic_shape):
148-
# TODO: This is just the example, need to be fixed.
149-
if len(attrs[0]['paddings']) == 4:
150-
return 1, 2
151-
else:
152-
return 1, 2
170+
return 1, 2
153171

154172
attrs = [
155173
program_config.ops[i].attrs
@@ -169,6 +187,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
169187
attrs, False), (1e-5, 1e-5)
170188

171189
# for dynamic_shape
190+
172191
generate_dynamic_shape(attrs)
173192
self.trt_param.precision = paddle_infer.PrecisionType.Float32
174193
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
@@ -181,29 +200,18 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
181200
attrs, True), (1e-5, 1e-5)
182201

183202
def add_skip_trt_case(self):
184-
# TODO(wilber): This is just the example to illustrate the skip usage.
185203
def teller1(program_config, predictor_config):
186-
if len(program_config.ops[0].attrs['paddings']) == 4:
204+
if program_config.ops[0].attrs[
205+
'padding_algorithm'] == "SAME" or program_config.ops[
206+
0].attrs['padding_algorithm'] == "VALID":
187207
return True
188208
return False
189209

190210
self.add_skip_case(
191211
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
192-
"NOT Implemented: we need to add support in the future ....TODO, just for the example"
212+
"When padding_algorithm is 'SAME' or 'VALID', Trt dose not support. In this case, trt build error is caused by scale op."
193213
)
194214

195-
def teller2(program_config, predictor_config):
196-
if (
197-
program_config.ops[0].attrs['dilations'][0] == 1 and
198-
program_config.ops[0].attrs['dilations'][0] == 2
199-
) or program_config.ops[0].attrs['padding_algorithm'] != 'EXPLICIT':
200-
return True
201-
return False
202-
203-
self.add_skip_case(teller2, SkipReasons.TRT_NOT_SUPPORT,
204-
"TODO, just for the example")
205-
pass
206-
207215
def test(self):
208216
self.add_skip_trt_case()
209217
self.run_test()

0 commit comments

Comments
 (0)