Skip to content

Commit d24a402

Browse files
authored
[cherry-pick] Adjust the Phi C++ API and yaml (PaddlePaddle#41576, PaddlePaddle#41778, PaddlePaddle#41909) (PaddlePaddle#41928)
* [PHI] Support some c++ api in paddle namespace (PaddlePaddle#41778) * support some c++ api in paddle namespace * change c++ api namespace in custom op * [Phi] Support setting size of vector<Tensor> for out in yaml (PaddlePaddle#41576) * support setting vector out size in yaml * support setting size of vector<tensor> for out in yaml * add data transform config for shape and size (PaddlePaddle#41909) * fix api_gen bug
1 parent ec1d2a1 commit d24a402

File tree

4 files changed

+121
-7
lines changed

4 files changed

+121
-7
lines changed

paddle/phi/api/ext/tensor_compat.h

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,114 @@ limitations under the License. */
2424
namespace paddle {
2525
using Tensor = experimental::Tensor;
2626
// using several Tensor initialize functions in paddle namespace
27+
using experimental::abs;
28+
using experimental::acos;
29+
using experimental::acosh;
30+
using experimental::add;
31+
using experimental::allclose;
32+
using experimental::argsort;
33+
using experimental::asin;
34+
using experimental::asinh;
35+
using experimental::atan;
36+
using experimental::atan2;
37+
using experimental::atanh;
38+
using experimental::bernoulli;
39+
using experimental::ceil;
40+
using experimental::cholesky;
41+
using experimental::cholesky_solve;
42+
using experimental::clip;
43+
using experimental::concat;
44+
using experimental::conj;
45+
using experimental::cos;
46+
using experimental::cosh;
47+
using experimental::cross;
48+
using experimental::det;
49+
using experimental::diag;
50+
using experimental::diagonal;
51+
using experimental::digamma;
52+
using experimental::dist;
53+
using experimental::divide;
54+
using experimental::dot;
55+
using experimental::elu;
2756
using experimental::empty;
57+
using experimental::empty_like;
58+
using experimental::equal_all;
59+
using experimental::erf;
60+
using experimental::erfinv;
61+
using experimental::exp;
62+
using experimental::expand;
63+
using experimental::expm1;
64+
using experimental::flatten;
65+
using experimental::flip;
66+
using experimental::floor;
67+
using experimental::floor_divide;
2868
using experimental::full;
69+
using experimental::gather;
70+
using experimental::gather_nd;
71+
using experimental::gelu;
72+
using experimental::gumbel_softmax;
73+
using experimental::imag;
74+
using experimental::increment;
75+
using experimental::index_sample;
76+
using experimental::is_empty;
77+
using experimental::isclose;
78+
using experimental::isfinite;
79+
using experimental::isinf;
80+
using experimental::isnan;
81+
using experimental::kron;
82+
using experimental::kthvalue;
83+
using experimental::label_smooth;
84+
using experimental::lerp;
85+
using experimental::lgamma;
86+
using experimental::log;
87+
using experimental::log10;
88+
using experimental::log1p;
89+
using experimental::log2;
90+
using experimental::logit;
91+
using experimental::masked_select;
92+
using experimental::matmul;
93+
using experimental::matrix_power;
94+
using experimental::maximum;
95+
using experimental::maxout;
96+
using experimental::minimum;
97+
using experimental::mode;
98+
using experimental::multi_dot;
99+
using experimental::multinomial;
100+
using experimental::multiply;
101+
using experimental::mv;
102+
using experimental::nll_loss;
103+
using experimental::one_hot;
104+
using experimental::pixel_shuffle;
105+
using experimental::poisson;
106+
using experimental::qr;
107+
using experimental::real;
108+
using experimental::reciprocal;
109+
using experimental::relu;
110+
using experimental::reshape;
111+
using experimental::roll;
112+
using experimental::round;
113+
using experimental::rsqrt;
114+
using experimental::scatter;
115+
using experimental::scatter_nd_add;
116+
using experimental::selu;
117+
using experimental::sign;
118+
using experimental::silu;
119+
using experimental::sin;
120+
using experimental::sinh;
121+
using experimental::split;
122+
using experimental::sqrt;
123+
using experimental::square;
124+
using experimental::stack;
125+
using experimental::strided_slice;
126+
using experimental::subtract;
127+
using experimental::tanh;
128+
using experimental::thresholded_relu;
129+
using experimental::tile;
130+
using experimental::trace;
131+
using experimental::triangular_solve;
132+
using experimental::unbind;
133+
using experimental::unique;
134+
using experimental::unsqueeze;
135+
using experimental::where;
136+
29137
} // namespace paddle

python/paddle/fluid/tests/custom_op/custom_linear_op.cc

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,7 @@ limitations under the License. */
2020
std::vector<paddle::Tensor> PhiLinearForward(const paddle::Tensor& x,
2121
const paddle::Tensor& weight,
2222
const paddle::Tensor& bias) {
23-
return {
24-
paddle::experimental::add(paddle::experimental::matmul(x, weight), bias)};
23+
return {paddle::add(paddle::matmul(x, weight), bias)};
2524
}
2625

2726
std::vector<std::vector<int64_t>> LinearInferShape(

python/paddle/utils/code_gen/api.yaml

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1787,6 +1787,8 @@
17871787
func : ShapeInferMeta
17881788
kernel :
17891789
func : shape, shape_sr
1790+
data_transform:
1791+
skip_transform : input
17901792

17911793
# shard_index
17921794
- api : shard_index
@@ -1863,6 +1865,8 @@
18631865
func : SizeInferMeta
18641866
kernel :
18651867
func : size
1868+
data_transform:
1869+
skip_transform : x
18661870

18671871
- api : slice
18681872
args : (Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis)
@@ -2146,7 +2150,7 @@
21462150
data_type : x
21472151

21482152
- api : unsqueeze
2149-
args : (Tensor x, IntArray axes)
2153+
args : (Tensor x, IntArray axis)
21502154
output : Tensor(xshape), Tensor(out)
21512155
infer_meta :
21522156
func : UnsqueezeInferMeta

python/paddle/utils/code_gen/api_base.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -555,9 +555,11 @@ def get_kernel_args(self, code_indent):
555555
kernel_param = input_names + attr_names
556556

557557
input_tensor_code = ""
558+
kernel_idx = -1
558559
for i, input_name in enumerate(input_names):
559560
# set input code
560561
if input_name in kernel_param:
562+
kernel_idx = kernel_idx + 1
561563
trans_flag = "{}"
562564
if input_name in self.data_transform['skip_transform']:
563565
trans_flag = "{true}"
@@ -566,20 +568,20 @@ def get_kernel_args(self, code_indent):
566568
if input_name in self.optional_vars:
567569
input_tensor_code = input_tensor_code + f"""
568570
{code_indent} {input_trans_map[input_infos[input_name]]} {PREFIX_TENSOR_NAME}{input_name}(paddle::none);
569-
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_ptr = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});
571+
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_ptr = PrepareData({input_name}, kernel.InputAt({kernel_idx}), {trans_flag});
570572
{code_indent} if ({PREFIX_TENSOR_NAME}{input_name}_ptr) {{
571573
{code_indent} {PREFIX_TENSOR_NAME}{input_name} = paddle::make_optional<const phi::DenseTensor&>(*{PREFIX_TENSOR_NAME}{input_name}_ptr);
572574
{code_indent} }}"""
573575

574576
else:
575577
if self.inputs['input_info'][input_name] == "const Tensor&":
576578
input_tensor_code = input_tensor_code + f"""
577-
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});"""
579+
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({kernel_idx}), {trans_flag});"""
578580

579581
elif self.inputs['input_info'][
580582
input_name] == "const std::vector<Tensor>&":
581583
input_tensor_code = input_tensor_code + f"""
582-
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});
584+
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({kernel_idx}), {trans_flag});
583585
{code_indent} std::vector<const phi::DenseTensor*> {PREFIX_TENSOR_NAME}{input_name}({PREFIX_TENSOR_NAME}{input_name}_vec->size());
584586
{code_indent} for (size_t i = 0; i < {PREFIX_TENSOR_NAME}{input_name}.size(); ++i) {{
585587
{code_indent} {PREFIX_TENSOR_NAME}{input_name}[i] = &{PREFIX_TENSOR_NAME}{input_name}_vec->at(i);
@@ -588,7 +590,8 @@ def get_kernel_args(self, code_indent):
588590
else:
589591
# do nothing
590592
pass
591-
else:
593+
elif self.infer_meta[
594+
'param'] is None or input_name in self.infer_meta['param']:
592595
if input_name in self.optional_vars:
593596
input_tensor_code = input_tensor_code + f"""
594597
{code_indent} {input_trans_map[input_infos[input_name]]} {PREFIX_TENSOR_NAME}{input_name}(paddle::none);

0 commit comments

Comments
 (0)