Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
86 changes: 22 additions & 64 deletions paddle/fluid/operators/inverse_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,57 +12,23 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/inverse_op.h"

#include <string>
#include <unordered_map>

#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/matrix_inverse.h"

namespace paddle {
namespace operators {

class InverseOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "Inverse");
OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "Inverse");

auto input_dims = ctx->GetInputDim("Input");
int64_t input_rank = input_dims.size();
PADDLE_ENFORCE_GE(
input_rank,
2,
platform::errors::InvalidArgument(
"The dimension of Input(Input) is expected to be no less than 2. "
"But received: Input(Input)'s dimension = %d, shape = [%s].",
input_rank,
input_dims));
for (int64_t i = 0; i < input_rank; ++i) {
PADDLE_ENFORCE_EQ(
(input_dims[i] == -1) || (input_dims[i] > 0),
true,
platform::errors::InvalidArgument(
"Each dimension of input tensor is expected to be -1 or a "
"positive number, but received %d. Input's shape is [%s].",
input_dims[i],
input_dims));
}
if (input_dims[input_rank - 2] > 0 && input_dims[input_rank - 1] > 0) {
PADDLE_ENFORCE_EQ(input_dims[input_rank - 2],
input_dims[input_rank - 1],
platform::errors::InvalidArgument(
"The last two dimensions are expected to be equal. "
"But received: %d and %d; "
"Input(Input)'s shape = [%s].",
input_dims[input_rank - 2],
input_dims[input_rank - 1],
input_dims));
}

ctx->SetOutputDim("Output", input_dims);
ctx->ShareLoD("Input", /*->*/ "Output");
}
};

class InverseOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
Expand All @@ -78,19 +44,6 @@ class InverseOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
class InverseGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext* ctx) const override {
auto input_grad = framework::GradVarName("Input");
auto output_grad = framework::GradVarName("Output");

OP_INOUT_CHECK(ctx->HasInput("Output"), "Input", "Output", "InverseGrad");
OP_INOUT_CHECK(
ctx->HasInput(output_grad), "Input", output_grad, "InverseGrad");

if (ctx->HasOutput(input_grad)) {
ctx->SetOutputDim(input_grad, ctx->GetInputDim(output_grad));
}
}
};

class InverseOpMaker : public framework::OpProtoAndCheckerMaker {
Expand Down Expand Up @@ -128,18 +81,23 @@ class InverseGradOpMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle

namespace ops = paddle::operators;

DECLARE_INFER_SHAPE_FUNCTOR(inverse,
InverseInferShapeFunctor,
PD_INFER_META(phi::InverseInferMeta));

DECLARE_INFER_SHAPE_FUNCTOR(inverse_grad,
InverseGradInferShapeFunctor,
PD_INFER_META(phi::InverseGradInferMeta));

REGISTER_OPERATOR(inverse,
ops::InverseOp,
ops::InverseOpMaker,
ops::InverseOpInferVarType,
ops::InverseGradOpMaker<paddle::framework::OpDesc>,
ops::InverseGradOpMaker<paddle::imperative::OpBase>);

REGISTER_OPERATOR(inverse_grad, ops::InverseGradOp);
ops::InverseGradOpMaker<paddle::imperative::OpBase>,
InverseInferShapeFunctor);

REGISTER_OP_CPU_KERNEL(inverse,
ops::InverseKernel<phi::CPUContext, float>,
ops::InverseKernel<phi::CPUContext, double>);
REGISTER_OP_CPU_KERNEL(inverse_grad,
ops::InverseGradKernel<phi::CPUContext, float>,
ops::InverseGradKernel<phi::CPUContext, double>);
REGISTER_OPERATOR(inverse_grad,
ops::InverseGradOp,
InverseGradInferShapeFunctor);
26 changes: 0 additions & 26 deletions paddle/fluid/operators/inverse_op.cu.cc

This file was deleted.

73 changes: 0 additions & 73 deletions paddle/fluid/operators/inverse_op.h

This file was deleted.

9 changes: 9 additions & 0 deletions paddle/phi/api/yaml/legacy_api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -988,6 +988,15 @@
intermediate : saved_mean, saved_variance
backward : instance_norm_grad

- api : inverse
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : InverseInferMeta
kernel :
func : inverse
backward : inverse_grad

# is_empty
- api : is_empty
args : (Tensor x)
Expand Down
9 changes: 9 additions & 0 deletions paddle/phi/api/yaml/legacy_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -912,6 +912,15 @@
optional : scale
backward : instance_norm_double_grad

- backward_api : inverse_grad
forward : inverse(Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta:
func : InverseGradInferMeta
kernel :
func : inverse_grad

- backward_api : kldiv_loss_grad
forward : kldiv_loss(Tensor x, Tensor label, str reduction) -> Tensor(out)
args : (Tensor x, Tensor label, Tensor out_grad, str reduction)
Expand Down
8 changes: 8 additions & 0 deletions paddle/phi/infermeta/backward.cc
Original file line number Diff line number Diff line change
Expand Up @@ -385,6 +385,14 @@ void InstanceNormDoubleGradInferMeta(const MetaTensor& x,
}
}

void InverseGradInferMeta(const MetaTensor& out,
const MetaTensor& dout,
MetaTensor* dx) {
if (dx) {
dx->set_dims(dout.dims());
}
}

void KernelWithXShapeInferMeta(const MetaTensor& xshape, MetaTensor* dx) {
auto xshape_dims = xshape.dims();
auto x_dims = phi::slice_ddim(xshape_dims, 1, xshape_dims.size());
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/infermeta/backward.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,10 @@ void InstanceNormDoubleGradInferMeta(const MetaTensor& x,
MetaTensor* dscale,
MetaTensor* ddy);

void InverseGradInferMeta(const MetaTensor& out,
const MetaTensor& dout,
MetaTensor* dx);

void KernelWithXShapeInferMeta(const MetaTensor& xshape, MetaTensor* dx);

void MaxPoolWithIndexGradInferMeta(const MetaTensor& x,
Expand Down
37 changes: 37 additions & 0 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -891,6 +891,43 @@ void InferMetaFromVecValue(const MetaTensor& x,
}
}

void InverseInferMeta(const MetaTensor& x, MetaTensor* out) {
auto input_dims = x.dims();
int64_t input_rank = input_dims.size();
PADDLE_ENFORCE_GE(
input_rank,
2,
errors::InvalidArgument(
"The dimension of Input(Input) is expected to be no less than 2. "
"But received: Input(Input)'s dimension = %d, shape = [%s].",
input_rank,
input_dims));
for (int64_t i = 0; i < input_rank; ++i) {
PADDLE_ENFORCE_EQ(
(input_dims[i] == -1) || (input_dims[i] > 0),
true,
errors::InvalidArgument(
"Each dimension of input tensor is expected to be -1 or a "
"positive number, but received %d. Input's shape is [%s].",
input_dims[i],
input_dims));
}
if (input_dims[input_rank - 2] > 0 && input_dims[input_rank - 1] > 0) {
PADDLE_ENFORCE_EQ(input_dims[input_rank - 2],
input_dims[input_rank - 1],
errors::InvalidArgument(
"The last two dimensions are expected to be equal. "
"But received: %d and %d; "
"Input(Input)'s shape = [%s].",
input_dims[input_rank - 2],
input_dims[input_rank - 1],
input_dims));
}

out->set_dims(input_dims);
out->share_lod(x);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里需要set dtype,下个pr补一下吧

}

void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out) {
out->set_dims(phi::make_ddim({1}));
out->set_dtype(DataType::BOOL);
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,8 @@ void InferMetaFromVecValue(const MetaTensor& x,
const std::vector<int64_t>& shape,
MetaTensor* out);

void InverseInferMeta(const MetaTensor& x, MetaTensor* out);

void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out);

void IsfiniteInferMeta(const MetaTensor& input, MetaTensor* out);
Expand Down
20 changes: 20 additions & 0 deletions paddle/phi/kernels/cpu/inverse_grad_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/impl/inverse_grad_kernel_impl.h"

#include "paddle/phi/core/kernel_registry.h"

PD_REGISTER_KERNEL(
inverse_grad, CPU, ALL_LAYOUT, phi::InverseGradKernel, float, double) {}
20 changes: 20 additions & 0 deletions paddle/phi/kernels/cpu/inverse_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/impl/inverse_kernel_impl.h"

#include "paddle/phi/core/kernel_registry.h"

PD_REGISTER_KERNEL(
inverse, CPU, ALL_LAYOUT, phi::InverseKernel, float, double) {}
22 changes: 22 additions & 0 deletions paddle/phi/kernels/gpu/inverse_grad_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/inverse_grad_kernel.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/inverse_grad_kernel_impl.h"

PD_REGISTER_KERNEL(
inverse_grad, GPU, ALL_LAYOUT, phi::InverseGradKernel, float, double) {}
Loading