Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 5th No.1】 为 Paddle 新增 copysign API -part #57785

Merged
merged 73 commits into from
Jan 11, 2024
Merged
Show file tree
Hide file tree
Changes from 69 commits
Commits
Show all changes
73 commits
Select commit Hold shift + click to select a range
9e4cbf6
add copysign op
cocoshe Sep 26, 2023
8396602
fix codestyle
cocoshe Oct 19, 2023
f4cd7f1
fix conflict
cocoshe Nov 11, 2023
390b2d9
codestyle
cocoshe Nov 14, 2023
0413912
fix test
cocoshe Nov 16, 2023
9aa2850
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
cocoshe Nov 16, 2023
b923632
fix std bug
cocoshe Nov 16, 2023
ed681d1
Merge branch 'develop' of https://github.com/cocoshe/Paddle into develop
cocoshe Nov 16, 2023
03d75c6
Merge branch 'develop' into develop
cocoshe Dec 1, 2023
3ae286f
merge init
cocoshe Dec 1, 2023
a3dfdbe
merge init
cocoshe Dec 1, 2023
dbaefa3
merge init
cocoshe Dec 1, 2023
b02ca12
add static cast
cocoshe Dec 1, 2023
ccadeb3
add std
cocoshe Dec 1, 2023
6b785c2
static cast
cocoshe Dec 1, 2023
24fab40
static cast
cocoshe Dec 1, 2023
bdc577b
copysignf
cocoshe Dec 2, 2023
c244d4b
static cast to float input
cocoshe Dec 2, 2023
2e57ad2
float input
cocoshe Dec 2, 2023
6831d85
static cast to double input
cocoshe Dec 2, 2023
dd12864
fix
cocoshe Dec 2, 2023
1477a0f
add inplace test
cocoshe Dec 2, 2023
065df10
fix api
cocoshe Dec 2, 2023
41ef0e7
fix cast when grad
cocoshe Dec 2, 2023
87e77b3
modify paddle.cast_ to cast_
cocoshe Dec 2, 2023
5bcf730
remove cast in python api
cocoshe Dec 5, 2023
a16666e
support fp16 && bf16
cocoshe Dec 5, 2023
2599b12
set grad y to zero
cocoshe Dec 5, 2023
fd6886e
fix en doc
cocoshe Dec 5, 2023
df3897c
support number input
cocoshe Dec 9, 2023
4e1b29c
add hostdevice
cocoshe Dec 9, 2023
72e6b0a
refactor kernel
cocoshe Dec 11, 2023
a24a00d
fix nan when backward
cocoshe Dec 11, 2023
349ebac
add broadcast unit test
cocoshe Dec 11, 2023
6181a85
modify .cu
cocoshe Dec 11, 2023
d65e19a
Merge branch 'develop' into develop
cocoshe Dec 12, 2023
1cc02ca
Update __init__.py
cocoshe Dec 12, 2023
e567e18
Update __init__.py
cocoshe Dec 12, 2023
f692f51
Merge branch 'develop' into develop
cocoshe Dec 18, 2023
2d9e4f0
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
cocoshe Dec 19, 2023
73e1883
for ci test
cocoshe Dec 20, 2023
56cecf4
static float
cocoshe Dec 20, 2023
c718f5a
codestyle
cocoshe Dec 20, 2023
a81f9cb
static double
cocoshe Dec 20, 2023
63e3fb7
fix broadcast, try coverage
cocoshe Dec 20, 2023
3927283
Delete paddle/phi/kernels/funcs/broadcast_function.h
cocoshe Dec 21, 2023
8145f8c
remove unused
cocoshe Dec 21, 2023
0a587ae
Update math.py
cocoshe Dec 22, 2023
1b4199d
Update math.py
cocoshe Dec 22, 2023
2d69b81
fix en doc
cocoshe Dec 22, 2023
04f7ca5
add test for output dtype, integer unsupported for now
cocoshe Dec 25, 2023
b9e0251
update
cocoshe Dec 28, 2023
fda6647
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
cocoshe Dec 28, 2023
91aea3f
update
cocoshe Dec 28, 2023
5fe32ca
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
cocoshe Jan 3, 2024
d5bc812
fix
cocoshe Jan 3, 2024
d564078
fix
cocoshe Jan 3, 2024
147c310
add cast for input
cocoshe Jan 5, 2024
c32ee61
fix
cocoshe Jan 5, 2024
4e7e279
add pir test
cocoshe Jan 5, 2024
8c14454
fix doc
cocoshe Jan 5, 2024
395db45
fix doc
cocoshe Jan 6, 2024
6e6c04c
fix doc
cocoshe Jan 7, 2024
110b472
detail doc
cocoshe Jan 8, 2024
ca0ff53
adjust for MSVC
cocoshe Jan 8, 2024
e1f8f49
fix
cocoshe Jan 8, 2024
6327baa
Update python/paddle/tensor/math.py
cocoshe Jan 9, 2024
baed47b
Update python/paddle/tensor/math.py
cocoshe Jan 9, 2024
59df646
fix doc output dtype, fix Equation
cocoshe Jan 9, 2024
731a965
Merge branch 'develop' into develop
cocoshe Jan 10, 2024
984f25b
codestyle
cocoshe Jan 10, 2024
1e8c37e
codestyle
cocoshe Jan 10, 2024
c6ad009
Update math.py
cocoshe Jan 10, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions paddle/phi/api/yaml/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -413,6 +413,17 @@
func : conv3d_transpose_grad
data_type : x

- backward_op : copysign_grad
forward : copysign (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : copysign_grad
inplace : (out_grad -> x_grad)

- backward_op : cos_double_grad
forward : cos_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_out, Tensor grad_x_grad)
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/api/yaml/ops.yaml
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -558,6 +558,16 @@
data_type : x
backward : conv3d_transpose_grad

- op : copysign
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : copysign
inplace: (x -> out)
backward : copysign_grad

- op : cos
args : (Tensor x)
output : Tensor(out)
Expand Down
53 changes: 53 additions & 0 deletions paddle/phi/kernels/copysign_grad_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.

// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at

// http://www.apache.org/licenses/LICENSE-2.0

// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/elementwise_base.h"
#include "paddle/phi/kernels/funcs/elementwise_functor.h"

namespace phi {

using float16 = phi::dtype::float16;
using bfloat16 = phi::dtype::bfloat16;

template <typename T>
inline HOSTDEVICE auto copysign_func(const T& a, const T& b) {
#ifdef WIN32
using U = typename std::conditional_t<std::is_integral<T>::value, float, T>;
return static_cast<T>(std::copysign(static_cast<U>(a), static_cast<U>(b)));
#else
return static_cast<T>(std::copysign(a, b));
#endif
}

inline HOSTDEVICE phi::dtype::float16 copysign_func(phi::dtype::float16 a,
phi::dtype::float16 b) {
return phi::dtype::raw_uint16_to_float16((a.x & 0x7fff) | (b.x & 0x8000));
}

inline HOSTDEVICE phi::dtype::bfloat16 copysign_func(phi::dtype::bfloat16 a,
phi::dtype::bfloat16 b) {
return phi::dtype::raw_uint16_to_bfloat16((a.x & 0x7fff) | (b.x & 0x8000));
}

template <typename T, typename Context>
void CopySignGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& out_grad,
DenseTensor* x_grad,
DenseTensor* y_grad);
} // namespace phi
63 changes: 63 additions & 0 deletions paddle/phi/kernels/copysign_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.

// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at

// http://www.apache.org/licenses/LICENSE-2.0

// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/broadcast_function.h"
#include "paddle/phi/kernels/funcs/elementwise_base.h"
namespace phi {

using float16 = phi::dtype::float16;
using bfloat16 = phi::dtype::bfloat16;

template <typename T>
inline HOSTDEVICE auto copysign_func(const T& a, const T& b) {
#ifdef WIN32
using U = typename std::conditional_t<std::is_integral<T>::value, float, T>;
return static_cast<T>(std::copysign(static_cast<U>(a), static_cast<U>(b)));
#else
return static_cast<T>(std::copysign(a, b));
#endif
}

inline HOSTDEVICE phi::dtype::float16 copysign_func(phi::dtype::float16 a,
phi::dtype::float16 b) {
return phi::dtype::raw_uint16_to_float16((a.x & 0x7fff) | (b.x & 0x8000));
}

inline HOSTDEVICE phi::dtype::bfloat16 copysign_func(phi::dtype::bfloat16 a,
phi::dtype::bfloat16 b) {
return phi::dtype::raw_uint16_to_bfloat16((a.x & 0x7fff) | (b.x & 0x8000));
}
cocoshe marked this conversation as resolved.
Show resolved Hide resolved

template <typename T>
struct CopySignFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
return copysign_func(a, b);
}
};
template <typename T>
struct InverseCopySignFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
return copysign_func(b, a);
}
};

template <typename T, typename Context>
void CopySignKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out);
} // namespace phi
82 changes: 82 additions & 0 deletions paddle/phi/kernels/cpu/copysign_grad_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/copysign_grad_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/elementwise_grad.h"

namespace phi {

template <typename T>
HOSTDEVICE T compute_copysign_grad_dx(T x, T y, T out, T dout) {
if (x == static_cast<T>(0))
return x;
else
return static_cast<T>(dout * (phi::copysign_func(x, y) / x));
}

template <typename T>
struct CopySignGradDX {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
return compute_copysign_grad_dx<T>(x, y, out, dout);
}
};

template <typename T>
struct CopySignGradDY {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
return static_cast<T>(0);
}
};

template <typename T, typename Context>
void CopySignGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& out_grad,
DenseTensor* x_grad,
DenseTensor* y_grad) {
funcs::ElementwiseGradPreProcess(out_grad, x_grad);
int axis = -1;
phi::funcs::
ElemwiseGradCompute<Context, T, CopySignGradDX<T>, CopySignGradDY<T>>(
dev_ctx,
x,
y,
out_grad,
out_grad,
axis,
x_grad,
y_grad,
CopySignGradDX<T>(),
CopySignGradDY<T>());
}
} // namespace phi

PD_REGISTER_KERNEL(copysign_grad,
CPU,
ALL_LAYOUT,
phi::CopySignGradKernel,
bool,
uint8_t,
int8_t,
int16_t,
int,
int64_t,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
51 changes: 51 additions & 0 deletions paddle/phi/kernels/cpu/copysign_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/copysign_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename Context>
void CopySignKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out) {
dev_ctx.template Alloc<T>(out);
auto x_dims = x.dims();
auto y_dims = y.dims();
if (x_dims.size() >= y_dims.size()) {
funcs::ElementwiseCompute<phi::CopySignFunctor<T>, T>(
dev_ctx, x, y, phi::CopySignFunctor<T>(), out);
} else {
funcs::ElementwiseCompute<phi::InverseCopySignFunctor<T>, T>(
dev_ctx, x, y, phi::InverseCopySignFunctor<T>(), out);
}
}
} // namespace phi

PD_REGISTER_KERNEL(copysign,
CPU,
ALL_LAYOUT,
phi::CopySignKernel,
bool,
uint8_t,
int8_t,
int16_t,
int,
int64_t,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
99 changes: 99 additions & 0 deletions paddle/phi/kernels/gpu/copysign_grad_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/copysign_grad_kernel.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/elementwise_grad.h"

namespace phi {

template <typename T>
struct CopySignGradXFunctor {
inline HOSTDEVICE T operator()(const T x, const T y, const T dout) const {
if (x == static_cast<T>(0)) return x;
return dout * (phi::copysign_func(x, y) / x);
}
};

template <typename T>
struct CopySignGradYFunctor {
inline HOSTDEVICE T operator()(const T x, const T y, const T dout) const {
return static_cast<T>(0);
}
};

template <typename InT, typename OutT>
struct CopySignGradXYFunctor {
inline HOSTDEVICE phi::Array<OutT, 2> operator()(const InT x,
const InT y,
const InT dout) {
phi::Array<OutT, 2> outs;
// dx
if (x == static_cast<InT>(0))
outs[0] = static_cast<OutT>(0);
else
outs[0] = static_cast<OutT>(dout * (phi::copysign_func(x, y)) / x);
// dy = 0
outs[1] = static_cast<OutT>(0);
return outs;
}
};

template <typename T, typename Context>
void CopySignGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& out_grad,
DenseTensor* x_grad,
DenseTensor* y_grad) {
const auto place = dev_ctx.GetPlace();
int axis = -1;
if (x_grad != nullptr && y_grad != nullptr) {
std::vector<const DenseTensor*> ins = {&x, &y, &out_grad};
GetGradXAndYOut<T>(dev_ctx,
place,
axis,
ins,
out_grad,
x_grad,
y_grad,
CopySignGradXYFunctor<T, T>());
} else if (x_grad != nullptr && y_grad == nullptr) {
std::vector<const DenseTensor*> ins = {&x, &y, &out_grad};
GetGradXOrYOut<T>(
dev_ctx, place, axis, ins, out_grad, x_grad, CopySignGradXFunctor<T>());
} else if (y_grad != nullptr && x_grad == nullptr) {
std::vector<const DenseTensor*> ins = {&x, &y, &out_grad};
GetGradXOrYOut<T>(
dev_ctx, place, axis, ins, out_grad, y_grad, CopySignGradYFunctor<T>());
}
}
} // namespace phi

PD_REGISTER_KERNEL(copysign_grad,
GPU,
ALL_LAYOUT,
phi::CopySignGradKernel,
bool,
uint8_t,
int8_t,
int16_t,
int,
int64_t,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
Loading