Skip to content

Commit 7325c9f

Browse files
authored
add unittest (PaddlePaddle#36371)
1 parent 06bd348 commit 7325c9f

19 files changed

+208
-35
lines changed

paddle/fluid/inference/tensorrt/op_teller.cc

Lines changed: 97 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
174174
<< " op does not support input's dim is 1 in tensorrt.";
175175
return false;
176176
}
177+
// TODO(inference): fix
178+
if (x_shape.size() == 2 && !with_dynamic_shape) {
179+
VLOG(3) << "activation op does not support input's dim is 2 in "
180+
"tensorrt static shape, the output shape has diff.";
181+
return false;
182+
}
177183
}
178184

179185
if (op_type == "pool2d") {
@@ -346,6 +352,24 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
346352
}
347353
}
348354
}
355+
if (op_type == "softmax") {
356+
auto* block = desc.Block();
357+
if (block == nullptr) {
358+
VLOG(3) << "The block desc is nullptr, we can't continue to analyze. "
359+
"Developers need to check whether block_desc is passed in "
360+
"the pass.";
361+
return false;
362+
}
363+
auto x_var_name = desc.Input("X")[0];
364+
auto* x_var_desc = block->FindVar(x_var_name);
365+
const auto x_shape = x_var_desc->GetShape();
366+
// TODO(inference): fix
367+
if (x_shape.size() == 2 && !with_dynamic_shape) {
368+
VLOG(3) << "softmax op does not support input's dim is 2 in tensorrt "
369+
"static shape, the output shape has diff.";
370+
return false;
371+
}
372+
}
349373
if (op_type == "group_norm") {
350374
if (!with_dynamic_shape) return false;
351375
bool has_attrs = (desc.HasAttr("epsilon") && desc.HasAttr("groups"));
@@ -357,20 +381,35 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
357381
if (op_type == "concat") {
358382
if (!desc.HasAttr("axis")) {
359383
return false;
384+
}
385+
int axis = BOOST_GET_CONST(int, desc.GetAttr("axis"));
386+
if (with_dynamic_shape) {
387+
if (axis < 0) return false;
360388
} else {
361-
int axis = BOOST_GET_CONST(int, desc.GetAttr("axis"));
362-
if (with_dynamic_shape) {
363-
if (axis < 0) return false;
364-
} else {
365-
if (axis <= 0) return false;
366-
}
367-
auto concat_inputs = desc.Inputs();
368-
if (concat_inputs.find("AxisTensor") != concat_inputs.end()) {
369-
if (desc.Input("AxisTensor").size() >= 1) {
370-
return false;
371-
}
389+
if (axis <= 0) return false;
390+
}
391+
auto concat_inputs = desc.Inputs();
392+
if (concat_inputs.find("AxisTensor") != concat_inputs.end()) {
393+
if (desc.Input("AxisTensor").size() >= 1) {
394+
return false;
372395
}
373396
}
397+
auto* block = desc.Block();
398+
if (block == nullptr) {
399+
VLOG(3) << "The block desc is nullptr, we can't continue to analyze. "
400+
"Developers need to check whether block_desc is passed in "
401+
"the pass.";
402+
return false;
403+
}
404+
auto x_var_name = desc.Input("X")[0];
405+
auto* x_var_desc = block->FindVar(x_var_name);
406+
const auto x_shape = x_var_desc->GetShape();
407+
// TODO(inference): fix
408+
if (x_shape.size() == 2 && !with_dynamic_shape) {
409+
VLOG(3) << "concat op does not support input's dim is 2 in tensorrt "
410+
"static shape, the output shape has diff.";
411+
return false;
412+
}
374413
}
375414
if (op_type == "transpose2" || op_type == "transpose") {
376415
if (!desc.HasAttr("axis")) {
@@ -687,6 +726,22 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
687726
<< desc.Output("Y").size() << ".";
688727
return false;
689728
}
729+
auto* block = desc.Block();
730+
if (block == nullptr) {
731+
VLOG(3) << "The block desc is nullptr, we can't continue to analyze. "
732+
"Developers need to check whether block_desc is passed in "
733+
"the pass.";
734+
return false;
735+
}
736+
auto x_var_name = desc.Input("X")[0];
737+
auto* x_var_desc = block->FindVar(x_var_name);
738+
const auto x_shape = x_var_desc->GetShape();
739+
// TODO(inference): fix
740+
if (x_shape.size() == 2 && !with_dynamic_shape) {
741+
VLOG(3) << "batch_norm op does not support input's dim is 2 in "
742+
"tensorrt static shape, the output shape has diff.";
743+
return false;
744+
}
690745
}
691746

692747
if (op_type == "split") {
@@ -774,6 +829,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
774829
VLOG(3) << "The output_length should be equal to the output size.";
775830
return false;
776831
}
832+
// TODO(inference): fix
833+
if (x_shape.size() == 2 && !with_dynamic_shape) {
834+
VLOG(3) << "split op does not support input's dim is 2 in tensorrt "
835+
"static shape. The output shape has diff.";
836+
return false;
837+
}
777838
}
778839
if (op_type == "scale") {
779840
auto scale_inputs = desc.Inputs();
@@ -926,6 +987,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
926987
VLOG(3) << "gelu op does not support input's dim is 1 in tensorrt.";
927988
return false;
928989
}
990+
// TODO(inference): fix
991+
if (x_shape.size() == 2 && !with_dynamic_shape) {
992+
VLOG(3) << "gelu op does not support input's dim is 2 in tensorrt "
993+
"static shape, the output shape has diff.";
994+
return false;
995+
}
929996
}
930997

931998
if (op_type == "layer_norm") {
@@ -1041,7 +1108,13 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
10411108
auto* x_var_desc = block->FindVar(x_var_name);
10421109
const auto x_shape = x_var_desc->GetShape();
10431110
if (x_shape.size() == 1) {
1044-
VLOG(3) << "dropout op does not support input's dim is 1 in tensorrt.";
1111+
VLOG(3) << "scale op does not support input's dim is 1 in tensorrt.";
1112+
return false;
1113+
}
1114+
// TODO(inference): fix
1115+
if (x_shape.size() == 2 && !with_dynamic_shape) {
1116+
VLOG(3) << "scale op does not support input's dim is 2 in tensorrt "
1117+
"static shape, the output shape has diff.";
10451118
return false;
10461119
}
10471120
}
@@ -1061,6 +1134,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
10611134
VLOG(3) << "swish op does not support input's dim is 1 in tensorrt.";
10621135
return false;
10631136
}
1137+
// TODO(inference): fix
1138+
if (x_shape.size() == 2 && !with_dynamic_shape) {
1139+
VLOG(3) << "swish op does not support input's dim is 2 in tensorrt "
1140+
"static shape, the output shape has diff.";
1141+
return false;
1142+
}
10641143
}
10651144

10661145
if (op_type == "prelu") {
@@ -1314,6 +1393,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
13141393
VLOG(3) << "clip op does not support input's dim is 1 in tensorrt.";
13151394
return false;
13161395
}
1396+
// TODO(inference): fix
1397+
if (x_shape.size() == 2 && !with_dynamic_shape) {
1398+
VLOG(3) << "clip op does not support input's dim is 2 in tensorrt "
1399+
"static shape, the output shape has diff.";
1400+
return false;
1401+
}
13171402
}
13181403

13191404
if (op_type == "reduce_sum" || op_type == "reduce_mean") {

paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ class HardSwishPluginDynamicCreator : public nvinfer1::IPluginCreator {
161161
public:
162162
HardSwishPluginDynamicCreator() {}
163163
const char* getPluginName() const TRT_NOEXCEPT override {
164-
return "hardswish_plugin_dynamic";
164+
return "hard_swish_plugin_dynamic";
165165
}
166166

167167
const char* getPluginVersion() const TRT_NOEXCEPT override { return "1"; }

python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_anchor_generator.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
1616
from program_config import TensorConfig, ProgramConfig
17+
import unittest
1718
import numpy as np
1819
import paddle.inference as paddle_infer
1920
from functools import partial
@@ -83,7 +84,10 @@ def clear_dynamic_shape():
8384
self.dynamic_shape.opt_input_shape = {}
8485

8586
def generate_trt_nodes_num(attrs, dynamic_shape):
86-
return 1, 3
87+
if dynamic_shape:
88+
return 1, 3
89+
else:
90+
return 0, 4
8791

8892
attrs = [
8993
program_config.ops[i].attrs

python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
1616
from program_config import TensorConfig, ProgramConfig
17+
import unittest
1718
import numpy as np
1819
import paddle.inference as paddle_infer
1920
from functools import partial
@@ -211,6 +212,18 @@ def teller1(program_config, predictor_config):
211212
self.add_skip_case(teller1, SkipReasons.TRT_NOT_SUPPORT,
212213
"INPUT MomentumTensor NOT SUPPORT")
213214

215+
def teller2(program_config, predictor_config):
216+
if len(
217+
program_config.inputs['batch_norm_input'].shape
218+
) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled():
219+
return True
220+
return False
221+
222+
self.add_skip_case(
223+
teller2, SkipReasons.TRT_NOT_IMPLEMENTED,
224+
"The output shape has diff, but we can add shuffle layer to resolve it."
225+
)
226+
214227
def test(self):
215228
self.add_skip_trt_case()
216229
self.run_test()

python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
import paddle.inference as paddle_infer
1919
from functools import partial
2020
from typing import Optional, List, Callable, Dict, Any, Set
21+
import unittest
2122

2223

2324
class TrtConvertClipTest(TrtLayerAutoScanTest):
@@ -84,8 +85,7 @@ def generate_weight2(attrs: List[Dict[str, Any]]):
8485

8586
yield program_config
8687

87-
def sample_predictor_configs(
88-
self, program_config) -> (paddle_infer.Config, List[int], float):
88+
def sample_predictor_configs(self, program_config):
8989
def generate_dynamic_shape(attrs):
9090
if self.dims == 1:
9191
self.dynamic_shape.min_input_shape = {"input_data": [1]}
@@ -146,7 +146,21 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
146146
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
147147
True), 1e-5
148148

149+
def add_skip_trt_case(self):
150+
def teller1(program_config, predictor_config):
151+
if len(
152+
program_config.inputs['input_data'].shape
153+
) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled():
154+
return True
155+
return False
156+
157+
self.add_skip_case(
158+
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
159+
"The output shape has diff, but we can add shuffle layer to resolve it."
160+
)
161+
149162
def test(self):
163+
self.add_skip_trt_case()
150164
self.run_test()
151165

152166

python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
1616
from program_config import TensorConfig, ProgramConfig
17+
import unittest
1718
import numpy as np
1819
import paddle.inference as paddle_infer
1920
from functools import partial
@@ -317,6 +318,18 @@ def teller1(program_config, predictor_config):
317318
self.add_skip_case(teller1, SkipReasons.TRT_NOT_SUPPORT,
318319
"INPUT AxisTensor NOT SUPPORT")
319320

321+
def teller2(program_config, predictor_config):
322+
if len(
323+
program_config.inputs['concat_input1'].shape
324+
) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled():
325+
return True
326+
return False
327+
328+
self.add_skip_case(
329+
teller2, SkipReasons.TRT_NOT_IMPLEMENTED,
330+
"The output shape has diff, but we can add shuffle layer to resolve it."
331+
)
332+
320333
def test(self):
321334
self.add_skip_trt_case()
322335
self.run_test()

python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
1616
from program_config import TensorConfig, ProgramConfig
17+
import unittest
1718
import numpy as np
1819
import paddle.inference as paddle_infer
1920
from functools import partial
@@ -141,15 +142,19 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
141142

142143
def add_skip_trt_case(self):
143144
def teller1(program_config, predictor_config):
144-
if self.dims == 2:
145+
if len(
146+
program_config.inputs['input_data'].shape
147+
) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled():
145148
return True
146149
return False
147150

148151
self.add_skip_case(
149152
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
150-
"When input dims is 2, pulgin will product a 4 dims output.")
153+
"The output shape has diff, but we can add shuffle layer to resolve it."
154+
)
151155

152156
def test(self):
157+
self.add_skip_trt_case()
153158
self.run_test()
154159

155160

python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
import paddle.inference as paddle_infer
1919
from functools import partial
2020
from typing import Optional, List, Callable, Dict, Any, Set
21+
import unittest
2122

2223

2324
class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest):

python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multihead_matmul.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
1616
from program_config import TensorConfig, ProgramConfig
17+
import unittest
1718
import numpy as np
1819
import paddle.inference as paddle_infer
1920
from functools import partial
@@ -26,16 +27,16 @@ def is_program_valid(self, program_config: ProgramConfig) -> bool:
2627

2728
def sample_program_configs(self):
2829
def generate_input1(batch, dim1):
29-
return np.random.randn(batch, dim1, 768).astype(np.float32)
30+
return np.random.random((batch, dim1, 768)).astype(np.float32)
3031

3132
def generate_input2(shape):
3233
return np.random.random(shape).astype(np.float32)
3334

3435
def generate_weight1():
35-
return np.random.randn(768, 768).astype(np.float32)
36+
return np.random.random((768, 768)).astype(np.float32)
3637

3738
def generate_weight2():
38-
return np.random.randn(768).astype(np.float32)
39+
return np.random.random(768).astype(np.float32)
3940

4041
for batch in [1, 2, 4]:
4142
self.batch = batch

0 commit comments

Comments
 (0)