Skip to content

Commit 4e8ccc0

Browse files
authored
Merge branch 'develop' into cinn/margin
2 parents d6b9b1d + 0519800 commit 4e8ccc0

File tree

3,051 files changed

+59986
-38061
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

3,051 files changed

+59986
-38061
lines changed

.github/CODEOWNERS

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# This file is migrated from CI script, it's an effort of modenizing our dev infra.
22
# Code owners are expected to take responsibility for review patches to respective file.
33

4-
/CMakeLists.txt @wanghuancoder @Aurelius84 @XiaoguangHu01 @qili93
4+
/CMakeLists.txt @wanghuancoder @Aurelius84 @XiaoguangHu01
55
paddle/fluid/distributed/collective @sneaxiy @ForFishes
66
paddle/fluid/eager/autograd_meta.cc @JiabinYang @phlrain
77
paddle/fluid/eager/autograd_meta.h @JiabinYang @phlrain
@@ -12,20 +12,20 @@ paddle/fluid/eager/grad_node_info.h @JiabinYang @phlrain
1212
paddle/fluid/eager/grad_tensor_holder.cc @JiabinYang @phlrain
1313
paddle/fluid/eager/grad_tensor_holder.h @JiabinYang @phlrain
1414
paddle/fluid/eager/tensor_wrapper.h @JiabinYang @phlrain
15-
paddle/fluid/framework/block_desc.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
16-
paddle/fluid/framework/details/op_registry.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
17-
paddle/fluid/framework/framework.proto @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
18-
paddle/fluid/framework/grad_op_desc_maker.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
19-
paddle/fluid/framework/ir/graph.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
20-
paddle/fluid/framework/ir/node.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
21-
paddle/fluid/framework/lod_tensor.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
22-
paddle/fluid/framework/op_desc.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
23-
paddle/fluid/framework/operator.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
24-
paddle/fluid/framework/scope.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
25-
paddle/fluid/framework/selected_rows.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
26-
paddle/fluid/framework/tensor.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
15+
paddle/fluid/framework/block_desc.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
16+
paddle/fluid/framework/details/op_registry.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
17+
paddle/fluid/framework/framework.proto @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
18+
paddle/fluid/framework/grad_op_desc_maker.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
19+
paddle/fluid/framework/ir/graph.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
20+
paddle/fluid/framework/ir/node.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
21+
paddle/fluid/framework/lod_tensor.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
22+
paddle/fluid/framework/op_desc.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
23+
paddle/fluid/framework/operator.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
24+
paddle/fluid/framework/scope.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
25+
paddle/fluid/framework/selected_rows.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
26+
paddle/fluid/framework/tensor.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
2727
paddle/fluid/framework/unused_var_check.cc @zhiqiu @phlrain
28-
paddle/fluid/framework/var_desc.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
28+
paddle/fluid/framework/var_desc.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
2929
paddle/fluid/operators/distributed/send_recv.proto.in @gongweibao @seiriosPlus
3030
paddle/fluid/prim/api/api.yaml @cxxly @xiaoguoguo626807 @Charles-hit @cyber-pioneer @JiabinYang
3131
paddle/fluid/prim/api/composite_backward/composite_backward_api.h @cxxly @xiaoguoguo626807 @Charles-hit @cyber-pioneer @JiabinYang

.github/PULL_REQUEST_TEMPLATE.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
<!-- Demo: https://github.com/PaddlePaddle/Paddle/pull/24810 -->
33

44
### PR Category
5-
<!-- One of [ User Experience | Execute Infrastructure | Operator Mechanism | CINN | Custom Device | Performance Optimization | Distributed Strategy | Parameter Server | Communication Library | Auto Parallel | Inference | Environment Adaptation | Others ] -->
5+
<!-- One of [ User Experience | Execute Infrastructure | Operator Mechanism | CINN | Custom Device | Performance Optimization | Distributed Strategy | Parameter Server | Communication Library | Auto Parallel | Inference | Environment Adaptation ] -->
66

77

88
### PR Types

cmake/external/xpu.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ if(NOT DEFINED XPU_XRE_BASE_VERSION)
3030
set(XPU_XRE_BASE_VERSION "4.32.0.1")
3131
endif()
3232
if(NOT DEFINED XPU_XHPC_BASE_DATE)
33-
set(XPU_XHPC_BASE_DATE "20240712")
33+
set(XPU_XHPC_BASE_DATE "20240804")
3434
endif()
3535
set(XPU_XCCL_BASE_VERSION "1.2.5")
3636
if(NOT DEFINED XPU_XFT_BASE_VERSION)

cmake/generic.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1312,7 +1312,7 @@ function(math_library TARGET)
13121312
set(cc_srcs)
13131313
set(cu_srcs)
13141314
set(hip_srcs)
1315-
set(math_common_deps device_context framework_proto enforce)
1315+
set(math_common_deps device_context framework_proto phi common)
13161316
if(WITH_GPU)
13171317
if(${CMAKE_CUDA_COMPILER_VERSION} LESS 11.0)
13181318
list(APPEND math_common_deps cub)

paddle/cinn/adt/adt.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ struct Ok final {
283283
bool operator!=(const Ok&) const { return false; }
284284
};
285285

286-
#define ADT_TODO() PADDLE_THROW(phi::errors::Fatal("TODO"))
286+
#define ADT_TODO() PADDLE_THROW(::common::errors::Fatal("TODO"))
287287

288288
inline std::size_t hash_combine(std::size_t lhs, std::size_t rhs) {
289289
return lhs ^= rhs + 0x9e3779b9 + (lhs << 6) + (lhs >> 2);

paddle/cinn/adt/anchor_sd_equation_context.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ void GenerateScheduleMeshEquationsImpl(const List<ScheduleDim>& sched_dims,
3131
PADDLE_ENFORCE_EQ(
3232
input_iterators->size() == output_iterators->size(),
3333
true,
34-
phi::errors::InvalidArgument(
34+
::common::errors::InvalidArgument(
3535
"The size of input iterators and output iterators should be equal, "
3636
"but got input iterators size = %d, output iterators size = %d.",
3737
input_iterators->size(),
@@ -53,7 +53,7 @@ void GenerateScheduleMeshEquationsImpl(
5353
PADDLE_ENFORCE_EQ(
5454
shape.value()->size() == output_iterators->size(),
5555
true,
56-
phi::errors::InvalidArgument(
56+
::common::errors::InvalidArgument(
5757
"The size of shape and output iterators should be equal, but got "
5858
"shape size = %d, output iterators size = %d.",
5959
shape.value()->size(),
@@ -75,7 +75,7 @@ void GenerateScheduleMeshEquationsImpl(
7575
const auto& [sched_mesh, perm] = sched_transpose.tuple();
7676
PADDLE_ENFORCE_EQ(GetOutputRank(sched_mesh) == output_iterators->size(),
7777
true,
78-
phi::errors::InvalidArgument(
78+
::common::errors::InvalidArgument(
7979
"The size of output iterators should be equal to the "
8080
"rank of the schedule mesh, but got output iterators "
8181
"size = %d, rank of the schedule mesh = %d.",
@@ -99,7 +99,7 @@ void GenerateScheduleMeshEquationsImpl(
9999
const auto& [sched_mesh, _] = sched_padding.tuple();
100100
PADDLE_ENFORCE_EQ(GetOutputRank(sched_mesh) == output_iterators->size(),
101101
true,
102-
phi::errors::InvalidArgument(
102+
::common::errors::InvalidArgument(
103103
"The size of output iterators should be equal to the "
104104
"rank of the schedule mesh, but got output iterators "
105105
"size = %d, rank of the schedule mesh = %d.",

paddle/cinn/adt/equation_solver.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ std::unordered_map<Variable, Value> InferValuesImpl(
186186
PADDLE_ENFORCE_EQ(
187187
out_msg_in_indexes.value()->size() == in_msg_in_indexes.value()->size(),
188188
true,
189-
phi::errors::InvalidArgument(
189+
::common::errors::InvalidArgument(
190190
"The size of out_msg_in_indexes should be equal to the size of "
191191
"in_msg_in_indexes, but got out_msg_in_indexes size = %d, "
192192
"in_msg_in_indexes size = %d.",
@@ -195,7 +195,7 @@ std::unordered_map<Variable, Value> InferValuesImpl(
195195
PADDLE_ENFORCE_EQ(
196196
out_msg_out_indexes.value()->size() == in_msg_out_indexes.value()->size(),
197197
true,
198-
phi::errors::InvalidArgument(
198+
::common::errors::InvalidArgument(
199199
"The size of out_msg_out_indexes should be equal to the size of "
200200
"in_msg_out_indexes, but got out_msg_out_indexes size = %d, "
201201
"in_msg_out_indexes size = %d.",
@@ -288,8 +288,8 @@ void CheckEquationsSolvable(
288288
[&](const auto& opt_old_value, const auto& simplified_value) {
289289
LOG(ERROR) << "old_value: " << ToTxtString(opt_old_value);
290290
LOG(ERROR) << "simplified_value: " << ToTxtString(simplified_value);
291-
PADDLE_THROW(
292-
phi::errors::InvalidArgument("CheckEquationsSolvable Failed"));
291+
PADDLE_THROW(::common::errors::InvalidArgument(
292+
"CheckEquationsSolvable Failed"));
293293
return tValueInferSuccess<bool>{false};
294294
});
295295
};

paddle/cinn/adt/generate_map_expr.cc

Lines changed: 32 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ bool HasDynamicShape(const ::pir::Value& tensor) {
105105
PADDLE_ENFORCE_EQ(
106106
dim,
107107
-1UL,
108-
phi::errors::InvalidArgument(
108+
::common::errors::InvalidArgument(
109109
"The dynamic shape dim should be -1, but got %d.", dim));
110110
return true;
111111
}
@@ -170,7 +170,11 @@ hlir::framework::OpPatternKind GetOpPatternKind(const ::pir::Operation* node) {
170170
bool CollectRewrittenReductionOpStmts(const OpStmt& op_stmt,
171171
List<OpStmt>* ret) {
172172
const auto& [op, inputs, outputs] = op_stmt.tuple();
173-
CHECK(op.Has<const ::pir::Operation*>());
173+
PADDLE_ENFORCE_EQ(
174+
op.Has<const ::pir::Operation*>(),
175+
true,
176+
phi::errors::InvalidArgument(
177+
"The op should have a value of type ::pir::Operation*"));
174178
if (GetOpPatternKind(op.Get<const ::pir::Operation*>()) ==
175179
hlir::framework::OpPatternKind::kReduction) {
176180
tReduceInit<const ::pir::Operation*> init_op{
@@ -234,7 +238,10 @@ std::vector<std::shared_ptr<IGroup>> GenerateIGroups(
234238
std::vector<std::shared_ptr<IGroup>> ret{};
235239

236240
List<OpStmt> op_stmts = MakeOpStmts(group);
237-
CHECK(!op_stmts->empty());
241+
PADDLE_ENFORCE_EQ(
242+
!op_stmts->empty(),
243+
true,
244+
phi::errors::InvalidArgument("The op_stmts should not be empty"));
238245

239246
PartitionIGroupOpStmts(op_stmts, [&](const auto& igroup_spec) {
240247
ret.push_back(MakeIGroup(igroup_spec));
@@ -249,7 +256,7 @@ std::shared_ptr<KGroup> GenerateKGroups(
249256
PADDLE_ENFORCE_EQ(
250257
igroups.size(),
251258
1UL,
252-
phi::errors::InvalidArgument(
259+
::common::errors::InvalidArgument(
253260
"The size of igroups should be 1, but got %d.", igroups.size()));
254261
return std::make_shared<KGroup>(group, igroups);
255262
}
@@ -271,9 +278,12 @@ std::unordered_map<Variable, const Value> MakeSdIterator2Iterator(
271278
std::unordered_map<Variable, const Value> ret{};
272279

273280
for (std::size_t i = 0; i < igroup.loop_iterators()->size(); ++i) {
274-
CHECK(ret.emplace(igroup.loop_iterators()->at(i),
275-
igroup.loop_iterators()->at(i))
276-
.second);
281+
PADDLE_ENFORCE_EQ(
282+
ret.emplace(igroup.loop_iterators()->at(i),
283+
igroup.loop_iterators()->at(i))
284+
.second,
285+
true,
286+
phi::errors::InvalidArgument("The loop iterator should be unique"));
277287
}
278288

279289
return ret;
@@ -326,15 +336,18 @@ LoopDescriptor4IterVarT MakeGetterLoopDescriptor4IterVar(
326336
PADDLE_ENFORCE_EQ(
327337
loop_iters->size(),
328338
sd->size(),
329-
phi::errors::InvalidArgument(
339+
::common::errors::InvalidArgument(
330340
"The size of loop iterators and loop descriptors should be equal, "
331341
"but got loop iterators size = %d, loop descriptors size = %d.",
332342
loop_iters->size(),
333343
sd->size()));
334344
using Cache = std::unordered_map<Iterator, LoopDescriptor>;
335345
const auto& sd_iter2sd = std::make_shared<Cache>();
336346
for (std::size_t i = 0; i < loop_iters->size(); ++i) {
337-
CHECK(sd_iter2sd->emplace(loop_iters->at(i), sd->at(i)).second);
347+
PADDLE_ENFORCE_EQ(
348+
sd_iter2sd->emplace(loop_iters->at(i), sd->at(i)).second,
349+
true,
350+
phi::errors::InvalidArgument("The loop iterator should be unique"));
338351
}
339352
return [sd_iter2sd](const auto& sd_iter) { return sd_iter2sd->at(sd_iter); };
340353
}
@@ -343,7 +356,10 @@ TreeMerger<Stmt> MakeTreeMerger(const MapIr& map_ir) {
343356
using Cache = std::unordered_map<OpStmt, LoopIterators>;
344357
auto cache = std::make_shared<Cache>();
345358
for (const auto& op_stmt : *(map_ir.op_stmts())) {
346-
CHECK(cache->emplace(op_stmt, map_ir.loop_iterators()).second);
359+
PADDLE_ENFORCE_EQ(
360+
cache->emplace(op_stmt, map_ir.loop_iterators()).second,
361+
true,
362+
phi::errors::InvalidArgument("The op_stmt should be unique"));
347363
}
348364

349365
TreeMerger<Stmt> tree_merger{};
@@ -363,9 +379,12 @@ MapStmt<Stmt> MakeMapStmt(const MapIrList& map_irs) {
363379
PADDLE_ENFORCE_EQ(
364380
stmts->size(),
365381
1UL,
366-
phi::errors::InvalidArgument("The size of stmts should be 1, but got %d.",
367-
stmts->size()));
368-
CHECK(stmts->at(0).Has<MapStmt<Stmt>>());
382+
::common::errors::InvalidArgument(
383+
"The size of stmts should be 1, but got %d.", stmts->size()));
384+
PADDLE_ENFORCE_EQ(stmts->at(0).Has<MapStmt<Stmt>>(),
385+
true,
386+
phi::errors::InvalidArgument(
387+
"The stmts should have a value of type MapStmt<Stmt>"));
369388
return stmts->at(0).Get<MapStmt<Stmt>>();
370389
}
371390

paddle/cinn/adt/get_sub_reshape_dim_ranges.cc

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,13 @@ GetSubReshapeDimRanges(const List<DimExpr>& lhs_dims,
3737
if (GetNumel(lhs_dims) != GetNumel(rhs_dims)) {
3838
return std::nullopt;
3939
}
40-
CHECK(!lhs_dims->empty());
41-
CHECK(!rhs_dims->empty());
40+
PADDLE_ENFORCE_EQ(
41+
!lhs_dims->empty(),
42+
true,
43+
phi::errors::InvalidArgument("Sorry,but lhs_dims is empty"));
44+
PADDLE_ENFORCE_EQ(!rhs_dims->empty(),
45+
true,
46+
phi::errors::InvalidArgument("Sory,but rhs_dims is empty"));
4247
std::vector<std::pair<int, int>> lhs_ranges{};
4348
std::vector<std::pair<int, int>> rhs_ranges{};
4449
int lhs_start = 0;
@@ -51,7 +56,10 @@ GetSubReshapeDimRanges(const List<DimExpr>& lhs_dims,
5156
end = (end > dims->size() ? dims->size() : end);
5257
std::int64_t ret = 1;
5358
for (std::size_t i = 0; i < end; ++i) {
54-
CHECK(dims->at(i).Has<std::int64_t>());
59+
PADDLE_ENFORCE_EQ(
60+
dims->at(i).Has<std::int64_t>(),
61+
true,
62+
phi::errors::InvalidArgument("dims->at(i) is not int64_t"));
5563
ret *= dims->at(i).Get<std::int64_t>();
5664
}
5765
return ret;
@@ -82,10 +90,14 @@ GetSubReshapeDimRanges(const List<DimExpr>& lhs_dims,
8290
} else if (LhsAcc() > RhsAcc()) {
8391
rhs_end++;
8492
} else {
85-
PADDLE_THROW(phi::errors::Fatal("Dead code"));
93+
PADDLE_THROW(::common::errors::Fatal("Dead code"));
8694
}
8795
}
88-
CHECK(lhs_end == lhs_dims->size() && rhs_end == rhs_dims->size());
96+
PADDLE_ENFORCE_EQ(lhs_end == lhs_dims->size() && rhs_end == rhs_dims->size(),
97+
true,
98+
phi::errors::InvalidArgument(
99+
"lhs_end is not equal to lhs_dims->size() and rhs_end "
100+
"is not equal to rhs_dims->size()"));
89101
if (lhs_start < lhs_end && rhs_start < rhs_end) {
90102
lhs_ranges.emplace_back(std::make_pair(lhs_start, lhs_end));
91103
rhs_ranges.emplace_back(std::make_pair(rhs_start, rhs_end));

paddle/cinn/adt/igroup.cc

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,14 @@ std::shared_ptr<IndexExprInferContext> MakeIndexExprInferContext(
2727
const auto& anchor_iterators = igroup.GetAnchorIterators();
2828

2929
for (std::size_t i = 0; i < anchor_iterators->size(); ++i) {
30-
CHECK(anchor_iterator2value
31-
.emplace(anchor_iterators->at(i), anchor_iterators->at(i))
32-
.second);
30+
PADDLE_ENFORCE_EQ(
31+
anchor_iterator2value
32+
.emplace(anchor_iterators->at(i), anchor_iterators->at(i))
33+
.second,
34+
true,
35+
phi::errors::InvalidArgument(
36+
"The element in anchor iterators failed to insert in anchor "
37+
"iterator2value! Please check."));
3338
}
3439

3540
return std::make_shared<IndexExprInferContext>(anchor_iterator2value);
@@ -102,10 +107,10 @@ List<Iterator> IGroup::GetIndexIterators(const Index& index) const {
102107
} else if (arg_pos.Has<Undefined>()) {
103108
// do nothing
104109
} else {
105-
PADDLE_THROW(phi::errors::Fatal("Dead code"));
110+
PADDLE_THROW(::common::errors::Fatal("Dead code"));
106111
}
107112
}
108-
PADDLE_THROW(phi::errors::Fatal("Can not find anchor iterators"));
113+
PADDLE_THROW(::common::errors::Fatal("Can not find anchor iterators"));
109114
}
110115

111116
} // namespace cinn::adt

0 commit comments

Comments
 (0)