Skip to content

Commit 22bbaa8

Browse files
enkileeco63oc
authored andcommitted
【Error Message No. 17】 paddle/cinn/frontend/decomposer/* (PaddlePaddle#64310)
* fix * fix
1 parent 67dd6df commit 22bbaa8

File tree

7 files changed

+164
-74
lines changed

7 files changed

+164
-74
lines changed

paddle/cinn/frontend/computation.cc

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,6 @@ void CinnComputation::SetTensorData(hlir::framework::Tensor &t,
209209
true,
210210
phi::errors::InvalidArgument("The size of the input data is not equal to "
211211
"the size of the tensor."));
212-
CHECK_EQ(size, t->shape().numel() * t->type().bytes());
213212
context_->target.arch.Visit(adt::match{
214213
[&](common::UnknownArch) { CINN_NOT_IMPLEMENTED; },
215214
[&](common::X86Arch) { memcpy(tdata, data, size); },
@@ -233,7 +232,6 @@ void CinnComputation::GetTensorData(hlir::framework::Tensor &t,
233232
true,
234233
phi::errors::InvalidArgument("The size of the input data is not equal to "
235234
"the size of the tensor."));
236-
CHECK_EQ(size, t->shape().numel() * t->type().bytes());
237235
context_->target.arch.Visit(adt::match{
238236
[&](common::UnknownArch) { CINN_NOT_IMPLEMENTED; },
239237
[&](common::X86Arch) { memcpy(data, tdata, size); },

paddle/cinn/frontend/decomposer/activation.cc

Lines changed: 33 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -14,16 +14,21 @@
1414

1515
#include "paddle/cinn/frontend/decomposer_registry.h"
1616
#include "paddle/cinn/frontend/syntax.h"
17+
#include "paddle/common/enforce.h"
1718

1819
namespace cinn {
1920
namespace frontend {
2021
namespace decomposer {
2122

2223
void relu(const Instruction& instr, const DecomposerContext& context) {
23-
CHECK_EQ(instr->inputs.size(), 1UL)
24-
<< " 1 input tensor for " << instr->op_type;
25-
CHECK_EQ(instr->outputs.size(), 1UL)
26-
<< "1 output tensor for " << instr->op_type;
24+
PADDLE_ENFORCE_EQ(
25+
instr->inputs.size(),
26+
1UL,
27+
phi::errors::InvalidArgument("1 input tensor for ", instr->op_type));
28+
PADDLE_ENFORCE_EQ(
29+
instr->outputs.size(),
30+
1UL,
31+
phi::errors::InvalidArgument("1 output tensor for ", instr->op_type));
2732
auto x = instr->inputs[0];
2833
auto output = instr->outputs[0];
2934
auto* builder = context.builder();
@@ -39,10 +44,14 @@ void relu(const Instruction& instr, const DecomposerContext& context) {
3944
}
4045

4146
void relu_grad(const Instruction& instr, const DecomposerContext& context) {
42-
CHECK_EQ(instr->inputs.size(), 2UL)
43-
<< " 2 input tensors for " << instr->op_type;
44-
CHECK_EQ(instr->outputs.size(), 1UL)
45-
<< "1 output tensor for " << instr->op_type;
47+
PADDLE_ENFORCE_EQ(
48+
instr->inputs.size(),
49+
2UL,
50+
phi::errors::InvalidArgument(" 2 input tensors for ", instr->op_type));
51+
PADDLE_ENFORCE_EQ(
52+
instr->outputs.size(),
53+
1UL,
54+
phi::errors::InvalidArgument("1 output tensor for ", instr->op_type));
4655
auto dout = instr->inputs[0];
4756
auto out = instr->inputs[1];
4857
auto dx = instr->outputs[0];
@@ -60,10 +69,14 @@ void relu_grad(const Instruction& instr, const DecomposerContext& context) {
6069
}
6170

6271
void gelu(const Instruction& instr, const DecomposerContext& context) {
63-
CHECK_EQ(instr->inputs.size(), 1UL)
64-
<< " 1 input tensor for " << instr->op_type;
65-
CHECK_EQ(instr->outputs.size(), 1UL)
66-
<< "1 output tensor for " << instr->op_type;
72+
PADDLE_ENFORCE_EQ(
73+
instr->inputs.size(),
74+
1UL,
75+
phi::errors::InvalidArgument(" 1 input tensor for ", instr->op_type));
76+
PADDLE_ENFORCE_EQ(
77+
instr->outputs.size(),
78+
1UL,
79+
phi::errors::InvalidArgument("1 output tensor for ", instr->op_type));
6780
auto x = instr->inputs[0];
6881
auto output = instr->outputs[0];
6982
auto* builder = context.builder();
@@ -86,10 +99,14 @@ void gelu(const Instruction& instr, const DecomposerContext& context) {
8699
}
87100

88101
void softmax(const Instruction& instr, const DecomposerContext& context) {
89-
CHECK_EQ(instr->inputs.size(), 1UL)
90-
<< " 1 input tensor for " << instr->op_type;
91-
CHECK_EQ(instr->outputs.size(), 1UL)
92-
<< "1 output tensor for " << instr->op_type;
102+
PADDLE_ENFORCE_EQ(
103+
instr->inputs.size(),
104+
1UL,
105+
phi::errors::InvalidArgument(" 1 input tensor for ", instr->op_type));
106+
PADDLE_ENFORCE_EQ(
107+
instr->outputs.size(),
108+
1UL,
109+
phi::errors::InvalidArgument("1 output tensor for ", instr->op_type));
93110
auto x = instr->inputs[0];
94111
auto output = instr->outputs[0];
95112
auto* builder = context.builder();

paddle/cinn/frontend/decomposer/batch_norm.cc

Lines changed: 74 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
#include "paddle/cinn/frontend/decomposer_registry.h"
1616
#include "paddle/cinn/frontend/syntax.h"
17-
17+
#include "paddle/common/enforce.h"
1818
namespace cinn {
1919
namespace frontend {
2020
namespace decomposer {
@@ -25,9 +25,12 @@ struct BatchNormHelper {
2525
const std::vector<int>& arg_param_shape,
2626
std::string data_layout,
2727
std::string bn_op_type) {
28-
CHECK_EQ(arg_x_shape.size(), 4UL)
29-
<< "Only 4-D input tensor is supported, but get " << arg_x_shape.size()
30-
<< "-D input tensor.";
28+
PADDLE_ENFORCE_EQ(arg_x_shape.size(),
29+
4UL,
30+
phi::errors::InvalidArgument(
31+
"Only 4-D input tensor is supported, but get %d",
32+
arg_x_shape.size(),
33+
"-D input tensor."));
3134

3235
builder = net_builder;
3336
x_shape = arg_x_shape;
@@ -162,21 +165,34 @@ struct BatchNormHelper {
162165

163166
void batch_norm_train(const Instruction& instr,
164167
const DecomposerContext& context) {
165-
CHECK_EQ(instr->inputs.size(), 5UL)
166-
<< "The number of the given inputs is not equal to the required for op "
167-
<< instr->op_type;
168-
CHECK_EQ(instr->outputs.size(), 5UL)
169-
<< "The number of the given outputs is not equal to the required for op "
170-
<< instr->op_type;
168+
PADDLE_ENFORCE_EQ(
169+
instr->inputs.size(),
170+
5UL,
171+
phi::errors::InvalidArgument(
172+
"The number of the given inputs is not equal to the required"));
173+
PADDLE_ENFORCE_EQ(
174+
instr->outputs.size(),
175+
5UL,
176+
phi::errors::InvalidArgument(
177+
"The number of the given outputs is not equal to the required"));
171178

172179
auto& x = instr->inputs[0];
173180
auto& scale = instr->inputs[1];
174181
auto& bias = instr->inputs[2];
175182
auto& moving_mean = instr->inputs[3];
176183
auto& moving_variance = instr->inputs[4];
177-
CHECK_EQ(scale->type, bias->type);
178-
CHECK_EQ(scale->type, moving_mean->type);
179-
CHECK_EQ(scale->type, moving_variance->type);
184+
PADDLE_ENFORCE_EQ(
185+
scale->type == bias->type,
186+
true,
187+
phi::errors::InvalidArgument("The type of scale and bias is not equal"));
188+
PADDLE_ENFORCE_EQ(scale->type == moving_mean->type,
189+
true,
190+
phi::errors::InvalidArgument(
191+
"The type of scale and moving_mean is not equal"));
192+
PADDLE_ENFORCE_EQ(scale->type == moving_variance->type,
193+
true,
194+
phi::errors::InvalidArgument(
195+
"The type of scale and moving_variance is not equal"));
180196

181197
float epsilon = instr.GetAttrs<float>("epsilon");
182198
float momentum = instr.GetAttrs<float>("momentum");
@@ -219,21 +235,34 @@ void batch_norm_train(const Instruction& instr,
219235

220236
void batch_norm_grad(const Instruction& instr,
221237
const DecomposerContext& context) {
222-
CHECK_EQ(instr->inputs.size(), 5UL)
223-
<< " The number of the given inputs is not equal to the required "
224-
<< instr->op_type;
225-
CHECK_EQ(instr->outputs.size(), 3UL)
226-
<< " The number of the given outputs is not equal to the required"
227-
<< instr->op_type;
238+
PADDLE_ENFORCE_EQ(
239+
instr->inputs.size(),
240+
5UL,
241+
phi::errors::InvalidArgument(
242+
"The number of the given inputs is not equal to the required"));
243+
PADDLE_ENFORCE_EQ(
244+
instr->outputs.size(),
245+
3UL,
246+
phi::errors::InvalidArgument(
247+
"The number of the given outputs is not equal to the required"));
228248

229249
auto& y_grad = instr->inputs[0];
230250
auto& x = instr->inputs[1];
231251
auto& scale = instr->inputs[2];
232252
auto& save_mean = instr->inputs[3];
233253
auto& save_variance = instr->inputs[4];
234-
CHECK_EQ(y_grad->type, x->type);
235-
CHECK_EQ(scale->type, save_mean->type);
236-
CHECK_EQ(scale->type, save_variance->type);
254+
PADDLE_ENFORCE_EQ(
255+
y_grad->type == x->type,
256+
true,
257+
phi::errors::InvalidArgument("The type of y_grad and x is not equal"));
258+
PADDLE_ENFORCE_EQ(scale->type == save_mean->type,
259+
true,
260+
phi::errors::InvalidArgument(
261+
"The type of scale and save_mean is not equal"));
262+
PADDLE_ENFORCE_EQ(scale->type == save_variance->type,
263+
true,
264+
phi::errors::InvalidArgument(
265+
"The type of scale and save_variance is not equal"));
237266

238267
auto epsilon = instr.GetAttrs<float>("epsilon");
239268
auto layout = instr.GetAttrs<std::string>("data_layout");
@@ -304,21 +333,35 @@ void batch_norm_grad(const Instruction& instr,
304333
}
305334

306335
void batch_norm(const Instruction& instr, const DecomposerContext& context) {
307-
CHECK_EQ(instr->inputs.size(), 5UL)
308-
<< "The number of the given inputs is not equal to the required for op "
309-
<< instr->op_type;
310-
CHECK_EQ(instr->outputs.size(), 1UL)
311-
<< "The number of the given outputs is not equal to the required for op "
312-
<< instr->op_type;
336+
PADDLE_ENFORCE_EQ(
337+
instr->inputs.size(),
338+
5UL,
339+
phi::errors::InvalidArgument(
340+
"The number of the given inputs is not equal to the required"));
341+
342+
PADDLE_ENFORCE_EQ(
343+
instr->outputs.size(),
344+
1UL,
345+
phi::errors::InvalidArgument(
346+
"The number of the given outputs is not equal to the required"));
313347

314348
auto& x = instr->inputs[0];
315349
auto& scale = instr->inputs[1];
316350
auto& bias = instr->inputs[2];
317351
auto& moving_mean = instr->inputs[3];
318352
auto& moving_variance = instr->inputs[4];
319-
CHECK_EQ(scale->type, bias->type);
320-
CHECK_EQ(scale->type, moving_mean->type);
321-
CHECK_EQ(scale->type, moving_variance->type);
353+
PADDLE_ENFORCE_EQ(
354+
scale->type == bias->type,
355+
true,
356+
phi::errors::InvalidArgument("The type of scale and bias is not equal"));
357+
PADDLE_ENFORCE_EQ(scale->type == moving_mean->type,
358+
true,
359+
phi::errors::InvalidArgument(
360+
"The type of scale and moving_mean is not equal"));
361+
PADDLE_ENFORCE_EQ(scale->type == moving_variance->type,
362+
true,
363+
phi::errors::InvalidArgument(
364+
"The type of scale and moving_variance is not equal"));
322365

323366
float epsilon = instr.GetAttrs<float>("epsilon");
324367
float momentum = instr.GetAttrs<float>("momentum");

paddle/cinn/frontend/decomposer/elementwise.cc

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,16 +14,19 @@
1414

1515
#include "paddle/cinn/frontend/decomposer_registry.h"
1616
#include "paddle/cinn/frontend/syntax.h"
17-
17+
#include "paddle/common/enforce.h"
1818
namespace cinn {
1919
namespace frontend {
2020
namespace decomposer {
2121

2222
void sum(const Instruction& instr, const DecomposerContext& context) {
23-
CHECK_GT(instr->inputs.size(), 0UL)
24-
<< "At least 1 input tensor for " << instr->op_type;
25-
CHECK_EQ(instr->outputs.size(), 1UL)
26-
<< "1 output tensor for " << instr->op_type;
23+
PADDLE_ENFORCE_GT(instr->inputs.size(),
24+
0UL,
25+
phi::errors::InvalidArgument("At least 1 input tensor."));
26+
PADDLE_ENFORCE_EQ(
27+
instr->outputs.size(),
28+
1UL,
29+
phi::errors::InvalidArgument("Output size should be 1 tensor."));
2730
auto inputs = instr->inputs;
2831
auto output = instr->outputs[0];
2932
auto* builder = context.builder();

paddle/cinn/frontend/decomposer/test_helper.cc

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
// limitations under the License.
1414

1515
#include "paddle/cinn/frontend/decomposer/test_helper.h"
16-
16+
#include "paddle/common/enforce.h"
1717
namespace cinn::frontend {
1818

1919
void RunDecomposer(Program* prog,
@@ -55,14 +55,20 @@ void CopyFromVector<bool>(const std::vector<bool>& vec,
5555
auto* data = tensor->mutable_data<bool>(target);
5656

5757
size_t numel = tensor->shape().numel();
58-
CHECK_EQ(vec.size(), numel);
58+
PADDLE_ENFORCE_EQ(vec.size(),
59+
numel,
60+
phi::errors::InvalidArgument(
61+
"The size of the input vector should be equal to the "
62+
"number of elements in the tensor."));
5963

6064
#ifdef CINN_WITH_CUDA
6165
// why not use vector<bool> ? Because to optimizes space, each value is stored
6266
// in a single bit. So that the vector<bool> doesn't has data() function.
63-
CHECK_EQ(sizeof(bool), sizeof(char))
64-
<< "The test need ensure the byte size of bool equal to the byte size of "
65-
"char.";
67+
PADDLE_ENFORCE_EQ(sizeof(bool) == sizeof(char),
68+
true,
69+
phi::errors::InvalidArgument(
70+
"The test need ensure the byte size of bool equal to "
71+
"the byte size of char."));
6672

6773
std::vector<char> vec_char(numel);
6874
for (int i = 0; i < numel; ++i) vec_char[i] = static_cast<char>(vec[i]);
@@ -84,9 +90,11 @@ void CopyToVector<bool>(const hlir::framework::Tensor tensor,
8490
#ifdef CINN_WITH_CUDA
8591
// why not use vector<bool> ? Because to optimizes space, each value is stored
8692
// in a single bit. So that the vector<bool> doesn't has data() function.
87-
CHECK_EQ(sizeof(bool), sizeof(char))
88-
<< "The test need ensure the byte size of bool equal to the byte size of "
89-
"char.";
93+
PADDLE_ENFORCE_EQ(
94+
sizeof(bool) == sizeof(char),
95+
true,
96+
phi::errors::InvalidArgument("The test need ensure the byte size of bool "
97+
"equal to the byte size of char."));
9098

9199
std::vector<char> vec_char(numel);
92100
cudaMemcpy(

paddle/cinn/frontend/decomposer/test_helper.h

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
#include "paddle/cinn/hlir/framework/tensor.h"
3333
#include "paddle/cinn/hlir/op/use_ops.h"
3434
#include "paddle/cinn/hlir/pass/use_pass.h"
35-
35+
#include "paddle/common/enforce.h"
3636
namespace cinn::frontend {
3737

3838
using CPUKernelFunc = std::function<void(const std::vector<size_t>& lengths,
@@ -83,7 +83,11 @@ void CopyFromVector(const std::vector<T>& vec,
8383
auto* data = tensor->mutable_data<T>(target);
8484

8585
size_t numel = tensor->shape().numel();
86-
CHECK_EQ(vec.size(), numel);
86+
PADDLE_ENFORCE_EQ(vec.size(),
87+
numel,
88+
phi::errors::InvalidArgument(
89+
"The size of the input vector should be equal to the "
90+
"number of elements in the tensor."));
8791

8892
if (target == cinn::common::DefaultNVGPUTarget()) {
8993
#ifdef CINN_WITH_CUDA
@@ -127,7 +131,11 @@ void CheckOutput(const std::vector<T>& actual,
127131
const std::vector<T>& expect,
128132
float atol = 1e-8,
129133
float rtol = 1e-5) {
130-
CHECK_EQ(actual.size(), expect.size());
134+
PADDLE_ENFORCE_EQ(actual.size(),
135+
expect.size(),
136+
phi::errors::InvalidArgument(
137+
"The size of the actual result should be equal to the "
138+
"size of the expected result."));
131139

132140
auto allclose = [](T a, T e, float atol, float rtol) {
133141
return abs(a - e) <= (atol + rtol * abs(e));
@@ -159,7 +167,11 @@ void CheckOutput(const std::vector<T>& actual,
159167
<< " (expect), maximum_relative_diff=" << max_diff
160168
<< " (absolute_diff=" << abs((actual[offset] - expect[offset]))
161169
<< ")";
162-
CHECK_EQ(num_diffs, 0);
170+
PADDLE_ENFORCE_EQ(
171+
num_diffs,
172+
0,
173+
phi::errors::InvalidArgument("The actual result is different from the "
174+
"expected result, please check the log."));
163175
}
164176

165177
template <typename T>
@@ -229,8 +241,10 @@ void RunAndCheckShape(NetBuilder* builder,
229241

230242
for (size_t i = 0; i < output_names.size(); ++i) {
231243
auto tensor = scope->GetTensor(output_names[i]);
232-
CHECK_EQ(tensor->shape().data() == output_shapes[i], true)
233-
<< "The " << i << "-th shape is expected to be " << output_shapes[i];
244+
PADDLE_ENFORCE_EQ(tensor->shape().data() == output_shapes[i],
245+
true,
246+
phi::errors::InvalidArgument(
247+
"The shape of the output tensor is not correct"));
234248
if (output_vecs) {
235249
std::vector<T> vec;
236250
CopyToVector<T>(tensor, &vec);

0 commit comments

Comments
 (0)