Skip to content

Commit

Permalink
Revert "【Hackathon 6th Fundable Projects 3 No.242】 [fluid_ops] mp_all…
Browse files Browse the repository at this point in the history
…reduce_s…" (#67832)

This reverts commit 29eb8d7.
  • Loading branch information
yuanlehome authored Aug 30, 2024
1 parent 3fd9bda commit a604ea2
Show file tree
Hide file tree
Showing 12 changed files with 109 additions and 187 deletions.
10 changes: 10 additions & 0 deletions paddle/fluid/operators/collective/mp_allreduce_sum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -82,3 +82,13 @@ REGISTER_OPERATOR(mp_allreduce_sum,
ops::MpAllReduceSumOpGradMaker<paddle::imperative::OpBase>,
ops::MpAllReduceSumOpMaker,
ops::MpAllReduceSumInplaceInferer);

PD_REGISTER_STRUCT_KERNEL(mp_allreduce_sum,
CPU,
ALL_LAYOUT,
ops::MpAllReduceSumCPUKernel,
float,
double,
int,
int64_t,
phi::dtype::float16) {}
39 changes: 39 additions & 0 deletions paddle/fluid/operators/collective/mp_allreduce_sum_op.cu.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/collective/c_allreduce_op.h"

namespace paddle {
namespace operators {
DEFINE_C_ALLREDUCE_CUDA_KERNEL(MpAllReduceSum, kRedSum)
} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;

PD_REGISTER_STRUCT_KERNEL(mp_allreduce_sum,
GPU,
ALL_LAYOUT,
ops::MpAllReduceSumCUDAKernel,
float,
double,
int,
int64_t,
#if (NCCL_VERSION_CODE >= 21000 && CUDA_VERSION >= 11000) || \
defined(PADDLE_WITH_HIP)
phi::dtype::bfloat16,
#endif
phi::dtype::float16) {
}
29 changes: 29 additions & 0 deletions paddle/fluid/operators/collective/mp_allreduce_sum_op.kps
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
#ifdef PADDLE_WITH_XPU_KP

// Please do not modify the following code
#if defined(__CUDA_ARCH__)
#undef __CUDA_ARCH__
#endif

#if defined(__CUDACC__)
#undef __CUDACC__
#endif

#if defined(__CUDA__)
#undef __CUDA__
#endif

#if defined(__NVCC__)
#undef __NVCC__
#endif

#include "paddle/fluid/operators/collective/c_allreduce_op.h"

namespace ops = paddle::operators;

REGISTER_OP_KERNEL(mp_allreduce_sum,
KP,
phi::XPUPlace,
ops::CAllReduceOpXPUKernel<ops::kRedSum, float>);

#endif
31 changes: 31 additions & 0 deletions paddle/fluid/operators/collective/mp_allreduce_sum_op_xpu.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/collective/c_allreduce_op.h"

namespace paddle {
namespace operators {
DEFINE_C_ALLREDUCE_XPU_KERNEL(CAllReduceSum, kRedSum)
} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;

PD_REGISTER_STRUCT_KERNEL(mp_allreduce_sum,
XPU,
ALL_LAYOUT,
ops::CAllReduceSumXPUKernel,
float,
int,
phi::dtype::float16) {}
2 changes: 0 additions & 2 deletions paddle/fluid/pir/dialect/op_generator/ops_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,8 +202,6 @@
'nce',
'lrn',
'max_pool2d_v2',
'mp_allreduce_sum',
'mp_allreduce_sum_',
'partial_sum',
'pull_gpups_sparse',
'pull_gpups_sparse_',
Expand Down
37 changes: 0 additions & 37 deletions paddle/phi/kernels/cpu/mp_allreduce_sum_kernel.cc

This file was deleted.

52 changes: 0 additions & 52 deletions paddle/phi/kernels/gpu/mp_allreduce_sum_kernel.cu

This file was deleted.

36 changes: 0 additions & 36 deletions paddle/phi/kernels/kps/mp_allreduce_sum_kernel.kps

This file was deleted.

36 changes: 0 additions & 36 deletions paddle/phi/kernels/xpu/mp_allreduce_sum_kernel.cc

This file was deleted.

6 changes: 0 additions & 6 deletions paddle/phi/ops/yaml/inconsistent/static_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -313,12 +313,6 @@
func : minimum_grad
composite : minimum_grad(x, y, out_grad, x_grad, y_grad)

- backward_op : mp_allreduce_sum_grad
forward : mp_allreduce_sum(Tensor x, int ring_id = 0, bool use_calc_stream = false) -> Tensor(out)
args : (Tensor out_grad, int ring_id = 0, bool use_calc_stream = false)
output : Tensor(x_grad)
invoke : c_identity(out_grad, ring_id, false, false)

- backward_op : multiply_double_grad
forward : multiply_grad (Tensor x, Tensor y, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y)
args : (Tensor x, Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, int axis = -1)
Expand Down
12 changes: 0 additions & 12 deletions paddle/phi/ops/yaml/inconsistent/static_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -576,18 +576,6 @@
backward : minimum_grad
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : mp_allreduce_sum
args : (Tensor x, int ring_id = 0, bool use_calc_stream = false)
output : Tensor(out)
infer_meta :
func : AllReduceInferMeta
param: [x]
kernel :
func : mp_allreduce_sum
param: [x, ring_id, use_calc_stream]
backward: mp_allreduce_sum_grad
inplace: (x -> out)

- op : multiply
args : (Tensor x, Tensor y)
output : Tensor
Expand Down
6 changes: 0 additions & 6 deletions paddle/phi/ops/yaml/op_compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4431,12 +4431,6 @@
outputs:
out: Out

- op: mp_allreduce_sum
inputs :
x : X
outputs :
out: Out

- op: nce
backward: nce_grad
inputs:
Expand Down

0 comments on commit a604ea2

Please sign in to comment.