Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/dialect/operator/ir/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : pool2d
args : (Tensor x, int[] kernel_size, int[] stride_size, int[] padding_size, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
args : (Tensor x, int64_t[] kernel_size, int64_t[] stride_size, int64_t[] padding_size, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output : Tensor(out)
infer_meta :
func : Pool2DInferMeta
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ symbol::ShapeOrDataDimExprs Pool2dRawInferSymbolicShape(
op->attributes().at("strides").dyn_cast<pir::ArrayAttribute>();
for (size_t i = 0; i < stride_attr.size(); i++) {
res.emplace_back(
stride_attr.at(i).dyn_cast<pir::Int32Attribute>().data());
stride_attr.at(i).dyn_cast<pir::Int64Attribute>().data());
}
return res;
}();
Expand Down Expand Up @@ -156,7 +156,7 @@ symbol::ShapeOrDataDimExprs Pool2dRawInferSymbolicShape(
op->attributes().at("paddings").dyn_cast<pir::ArrayAttribute>();
for (size_t i = 0; i < padding_attr.size(); i++) {
paddings.emplace_back(
padding_attr.at(i).dyn_cast<pir::Int32Attribute>().data());
padding_attr.at(i).dyn_cast<pir::Int64Attribute>().data());
}
return GetRealPadding(paddings,
global_pooling,
Expand Down Expand Up @@ -2841,8 +2841,8 @@ bool Pool2dOpInferSymbolicShape(pir::Operation *op,

bool Pool3dOpInferSymbolicShape(pir::Operation *op,
pir::InferSymbolicShapeContext *infer_context) {
std::vector<int> kernel_size_ =
paddle::dialect::details::GetVectorAttr<int>(op, "kernel_size");
std::vector<int64_t> kernel_size_ =
paddle::dialect::details::GetVectorAttr<int64_t>(op, "kernel_size");
std::vector<symbol::DimExpr> kernel_size;
for (size_t i = 0; i < kernel_size_.size(); ++i) {
kernel_size.push_back(symbol::DimExpr(kernel_size_[i]));
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/pir/drr/src/ir_operation_factory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -401,14 +401,14 @@ void OperationFactory::RegisterManualOpCreator() {
common::errors::InvalidArgument(
"'strides' Attribute is expected for "
"Pool2dOp. "));
std::vector<int> strides;
std::vector<int64_t> strides;
for (size_t i = 0;
i < attrs.at("strides").dyn_cast<pir::ArrayAttribute>().size();
i++) {
strides.push_back(attrs.at("strides")
.dyn_cast<pir::ArrayAttribute>()
.at(i)
.dyn_cast<pir::Int32Attribute>()
.dyn_cast<pir::Int64Attribute>()
.data());
}

Expand All @@ -417,14 +417,14 @@ void OperationFactory::RegisterManualOpCreator() {
common::errors::InvalidArgument(
"'paddings' Attribute is expected for "
"Pool2dOp. "));
std::vector<int> paddings;
std::vector<int64_t> paddings;
for (size_t i = 0;
i < attrs.at("paddings").dyn_cast<pir::ArrayAttribute>().size();
i++) {
paddings.push_back(attrs.at("paddings")
.dyn_cast<pir::ArrayAttribute>()
.at(i)
.dyn_cast<pir::Int32Attribute>()
.dyn_cast<pir::Int64Attribute>()
.data());
}

Expand Down
52 changes: 52 additions & 0 deletions paddle/fluid/pir/serialize_deserialize/patch/0.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
op_patches:
- op_name : pd_op.lp_pool2d
actions:
- action : modify_attr
object : strides
type : pir::ArrayAttribute
default :
- type: pir::Int64Attribute
- type: pir::Int64Attribute
- action : modify_attr
object : paddings
type : pir::ArrayAttribute
default :
- type: pir::Int64Attribute
- type: pir::Int64Attribute
- op_name : pd_op.pool2d
actions:
- action : modify_attr
object : strides
type : pir::ArrayAttribute
default :
- type: pir::Int64Attribute
- type: pir::Int64Attribute
- action : modify_attr
object : paddings
type : pir::ArrayAttribute
default :
- type: pir::Int64Attribute
- type: pir::Int64Attribute
- op_name : pd_op.pool3d
actions:
- action : modify_attr
object : kernel_size
type : pir::ArrayAttribute
default :
- type: pir::Int64Attribute
- type: pir::Int64Attribute
- type: pir::Int64Attribute
- action : modify_attr
object : strides
type : pir::ArrayAttribute
default :
- type: pir::Int64Attribute
- type: pir::Int64Attribute
- type: pir::Int64Attribute
- action : modify_attr
object : paddings
type : pir::ArrayAttribute
default :
- type: pir::Int64Attribute
- type: pir::Int64Attribute
- type: pir::Int64Attribute
8 changes: 4 additions & 4 deletions paddle/fluid/pir/transforms/tensorrt/trt_op_marker_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -370,9 +370,9 @@ class Pool2dOpPattern
}

auto padding_attr = op->attribute<pir::ArrayAttribute>("paddings");
std::vector<int32_t> paddings;
std::vector<int64_t> paddings;
for (const auto &attr : padding_attr.AsVector()) {
paddings.push_back(attr.dyn_cast<pir::Int32Attribute>().data());
paddings.push_back(attr.dyn_cast<pir::Int64Attribute>().data());
}
if (paddings.size() > 2) {
VLOG(3) << "The padding size should be less than 2";
Expand Down Expand Up @@ -435,10 +435,10 @@ class Pool2dOpPattern
int g_post_pad_w = 0;
int input_height = input_dims[input_dims.size() - 2];
int input_width = input_dims[input_dims.size() - 1];
std::vector<int32_t> strides;
std::vector<int64_t> strides;
auto strides_attr = op->attribute<pir::ArrayAttribute>("strides");
for (const auto &attr : strides_attr.AsVector()) {
strides.push_back(attr.dyn_cast<pir::Int32Attribute>().data());
strides.push_back(attr.dyn_cast<pir::Int64Attribute>().data());
}
if (input_height > 0 &&
input_height - kernel_size[0] + 2 * paddings[0] < 0) {
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/backends/onednn/onednn_reuse.h
Original file line number Diff line number Diff line change
Expand Up @@ -1546,8 +1546,8 @@ class PoolingOneDNNHandler
PoolingOneDNNHandler(const OneDNNContext& dev_ctx,
const std::string& pooling_type,
const IntArray& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int64_t>& strides,
const std::vector<int64_t>& paddings,
bool global_pooling,
const std::string& padding_algorithm,
bool ceil_mode,
Expand Down Expand Up @@ -1647,8 +1647,8 @@ class PoolingOneDNNHandler
PoolingOneDNNHandler(const OneDNNContext& dev_ctx,
const std::string& pooling_type,
const IntArray& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int64_t>& strides,
const std::vector<int64_t>& paddings,
bool global_pooling,
const std::string& padding_algorithm,
bool ceil_mode,
Expand Down
40 changes: 21 additions & 19 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2655,10 +2655,13 @@ void MaxPoolV2InferMeta(const MetaTensor& x,
false,
common::errors::InvalidArgument(
"max_pool2d_v2 op does not support adaptive."));
std::vector<int64_t> kernel_size_(kernel_size.begin(), kernel_size.end());
std::vector<int64_t> strides_(strides.begin(), strides.end());
std::vector<int64_t> paddings_(paddings.begin(), paddings.end());
Pool2DInferMeta(x,
kernel_size,
strides,
paddings,
kernel_size_,
strides_,
paddings_,
false,
false,
data_format,
Expand Down Expand Up @@ -3383,8 +3386,8 @@ void PNormInferMeta(const MetaTensor& x,

void Pool2DInferMeta(const MetaTensor& x,
const IntArray& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int64_t>& strides,
const std::vector<int64_t>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
Expand All @@ -3410,8 +3413,8 @@ void Pool2DInferMeta(const MetaTensor& x,
out->share_lod(x);
out->set_dtype(x.dtype());
} else {
std::vector<int> kernel_size_val(kernel_size.GetData().begin(),
kernel_size.GetData().end());
std::vector<int64_t> kernel_size_val(kernel_size.GetData().begin(),
kernel_size.GetData().end());
PoolInferMeta(x,
kernel_size_val,
strides,
Expand Down Expand Up @@ -3468,9 +3471,9 @@ void SendV2InferMeta(const int peer, const int ring_id) {
}

void PoolInferMeta(const MetaTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int64_t>& kernel_size,
const std::vector<int64_t>& strides,
const std::vector<int64_t>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
Expand All @@ -3480,8 +3483,8 @@ void PoolInferMeta(const MetaTensor& x,
const std::string& padding_algorithm,
MetaTensor* out,
MetaConfig config) {
std::vector<int> paddings_ = paddings;
std::vector<int> kernel_size_ = kernel_size;
std::vector<int64_t> paddings_ = paddings;
std::vector<int64_t> kernel_size_ = kernel_size;

auto x_dims = x.dims();
PADDLE_ENFORCE_EQ(
Expand Down Expand Up @@ -3555,13 +3558,12 @@ void PoolInferMeta(const MetaTensor& x,
if ((!config.is_runtime) && (data_dims[i] < 0)) {
output_shape.push_back(data_dims[i]);
} else {
output_shape.push_back(
funcs::PoolOutputSize(static_cast<int>(data_dims[i]),
kernel_size_[i],
paddings_[2 * i],
paddings_[2 * i + 1],
strides[i],
ceil_mode));
output_shape.push_back(funcs::PoolOutputSize(data_dims[i],
kernel_size_[i],
paddings_[2 * i],
paddings_[2 * i + 1],
strides[i],
ceil_mode));
}
}
}
Expand Down
10 changes: 5 additions & 5 deletions paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -529,9 +529,9 @@ void PNormInferMeta(const MetaTensor& x,
MetaTensor* out);

void PoolInferMeta(const MetaTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int64_t>& kernel_size,
const std::vector<int64_t>& strides,
const std::vector<int64_t>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
Expand All @@ -544,8 +544,8 @@ void PoolInferMeta(const MetaTensor& x,

void Pool2DInferMeta(const MetaTensor& x,
const IntArray& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int64_t>& strides,
const std::vector<int64_t>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
Expand Down
15 changes: 8 additions & 7 deletions paddle/phi/kernels/funcs/pooling.h
Original file line number Diff line number Diff line change
Expand Up @@ -498,20 +498,21 @@ class FractionalMaxPool3dGradFunctor {
DenseTensor* input_grad);
};

inline int PoolOutputSize(int input_size,
int filter_size,
int padding_1,
int padding_2,
int stride,
bool ceil_mode) {
template <typename T = int>
inline T PoolOutputSize(T input_size,
T filter_size,
T padding_1,
T padding_2,
T stride,
bool ceil_mode) {
PADDLE_ENFORCE_NE(
stride,
0,
common::errors::InvalidArgument(
"The stride of PoolOutputSize shall not be 0, but received %d.",
stride));

int output_size;
T output_size;
if (!ceil_mode) {
output_size =
(input_size - filter_size + padding_1 + padding_2) / stride + 1;
Expand Down
29 changes: 17 additions & 12 deletions paddle/phi/kernels/gpudnn/pool_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -307,8 +307,8 @@ void Pool2dGradGPUDNNKernel(const Context& ctx,
const DenseTensor& out,
const DenseTensor& dout,
const IntArray& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int64_t>& strides,
const std::vector<int64_t>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
Expand All @@ -319,13 +319,15 @@ void Pool2dGradGPUDNNKernel(const Context& ctx,
DenseTensor* dx) {
std::vector<int> kernel_size_val(kernel_size.GetData().begin(),
kernel_size.GetData().end());
std::vector<int> strides_val(strides.begin(), strides.end());
std::vector<int> paddings_val(paddings.begin(), paddings.end());
PoolGradRawGPUDNNKernel<T, Context>(ctx,
x,
out,
dout,
kernel_size_val,
strides,
paddings,
strides_val,
paddings_val,
exclusive,
data_format,
pooling_type,
Expand All @@ -339,8 +341,8 @@ template <typename T, typename Context>
void Pool2dDoubleGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const IntArray& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int64_t>& strides,
const std::vector<int64_t>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
Expand Down Expand Up @@ -374,9 +376,9 @@ void Pool3dGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int64_t>& kernel_size,
const std::vector<int64_t>& strides,
const std::vector<int64_t>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
Expand All @@ -385,13 +387,16 @@ void Pool3dGradGPUDNNKernel(const Context& ctx,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* dx) {
std::vector<int> kernel_size_val(kernel_size.begin(), kernel_size.end());
std::vector<int> strides_val(strides.begin(), strides.end());
std::vector<int> paddings_val(paddings.begin(), paddings.end());
PoolGradRawGPUDNNKernel<T, Context>(ctx,
x,
out,
dout,
kernel_size,
strides,
paddings,
kernel_size_val,
strides_val,
paddings_val,
exclusive,
data_format,
pooling_type,
Expand Down
Loading
Loading