Skip to content

【PaddlePaddle Hackathon 5】add paddle pool3d op #20095

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
202 changes: 202 additions & 0 deletions src/frontends/paddle/src/op/pool3d.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
//*****************************************************************************
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
//*****************************************************************************

#include "openvino/frontend/paddle/node_context.hpp"
#include "openvino/opsets/opset6.hpp"
#include "openvino/opsets/opset8.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {
// helper func - get pad_begin and pad_end
static void get_paddings(const NodeContext& node, ov::Shape& pad_begin, ov::Shape& pad_end, ov::op::PadType& auto_pad) {
if (node.has_attribute("padding_algorithm")) {
auto pad_algo = node.get_attribute<std::string>("padding_algorithm");
if (pad_algo == "SAME") {
auto_pad = ov::op::PadType::SAME_UPPER;
} else if (pad_algo == "VALID") {
auto_pad = ov::op::PadType::VALID;
} else if (pad_algo == "EXPLICIT") {
auto_pad = ov::op::PadType::EXPLICIT;
} else {
throw std::runtime_error("Unsupported pooling padding_algorithm " + pad_algo);
}
} else {
// adaptive_maxpool with no such attr.
auto_pad = ov::op::PadType::EXPLICIT;
}

/*If pool padding size is a tuple or list, it could be in three forms:
[pad_depth, pad_height, pad_width] or [pad_depth_front, pad_depth_back,
pad_height_top, pad_height_bottom, pad_width_left, pad_width_right],
and when data_format is “NCDHW”, pool_padding can
be in the form [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top,
pad_height_bottom], [pad_width_left, pad_width_right]]. when
data_format is “NDHWC”, pool_padding can be in the form
[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top,
pad_height_bottom], [pad_width_left, pad_width_right], [0,0]].
Otherwise, the pool padding size will be a square of an int.*/
auto paddings = node.get_attribute<std::vector<int32_t>>("paddings");

// Default is empty for 'adaptive max pooling'
auto data_format = node.get_attribute<std::string>("data_format", {});

switch (paddings.size()) {
case 3:
pad_begin =
Shape{static_cast<size_t>(paddings[0]), static_cast<size_t>(paddings[1]), static_cast<size_t>(paddings[2])};
pad_end = pad_begin;
break;
case 6:
pad_begin =
Shape{static_cast<size_t>(paddings[0]), static_cast<size_t>(paddings[2]), static_cast<size_t>(paddings[4])};
pad_end = Shape{
static_cast<size_t>(paddings[1]),
static_cast<size_t>(paddings[3]),
static_cast<size_t>(paddings[5]),
};
break;
default:
throw std::runtime_error("Unsupported pooling paddings " + std::to_string(paddings.size()));
}
}

NamedOutputs pool3d(const NodeContext& node) {
auto data = node.get_input("X");

auto pooling_type = node.get_attribute<std::string>("pooling_type", {});
auto global_pooling = node.get_attribute<bool>("global_pooling");
auto adaptive = node.get_attribute<bool>("adaptive");
auto kernel_shape = node.get_attribute<std::vector<int32_t>>("ksize");

auto rounding_type =
node.get_attribute<bool>("ceil_mode", false) ? ov::op::RoundingType::CEIL : ov::op::RoundingType::FLOOR;

if (pooling_type.empty()) {
pooling_type = "max";
}

PADDLE_OP_CHECK(node, (pooling_type == "max") || (pooling_type == "avg"), "pool3d: not supported pooling type !");
PADDLE_OP_CHECK(node, kernel_shape.size() == 1 || kernel_shape.size() == 3, "pool3d: ksize must be 1 or 3!");

PartialShape input_shape = data.get_partial_shape();

int32_t input_rank = static_cast<int32_t>(input_shape.rank().get_length());
PADDLE_OP_CHECK(node, input_rank >= 2, "input tensor rank must be greater than 2");

auto auto_pad = ov::op::PadType::EXPLICIT;
ov::Shape pad_begin, pad_end;
get_paddings(node, pad_begin, pad_end, auto_pad);

if (global_pooling || (adaptive && std::any_of(kernel_shape.begin(), kernel_shape.end(), [](int32_t i) {
return i == 1;
}))) {
if (pooling_type == "max") {
auto axes =
ov::opset6::Constant::create(ov::element::i64, {3}, {input_rank - 3, input_rank - 2, input_rank - 1});
return node.default_single_output_mapping({std::make_shared<ov::opset6::ReduceMax>(data, axes, true)},
{"Out"});
} else {
auto axes =
ov::opset6::Constant::create(ov::element::i64, {3}, {input_rank - 3, input_rank - 2, input_rank - 1});
return node.default_single_output_mapping({std::make_shared<ov::opset6::ReduceMean>(data, axes, true)},
{"Out"});
}
} else if (adaptive) {
auto pool_size = std::vector<int64_t>(2, 0);

if (kernel_shape.size() == 1) {
// Not tested: implemented according to spec, but can't generate real
// model to test
pool_size[0] = pool_size[1] = kernel_shape[0];
} else {
pool_size[0] = kernel_shape[0];
pool_size[1] = kernel_shape[1];
}

const Output<ov::Node> output_shape =
ov::opset6::Constant::create(ov::element::i64, {pool_size.size()}, pool_size);

if (pooling_type == "max") {
std::vector<Output<Node>> pool_outputs;
pool_outputs =
std::make_shared<ov::opset8::AdaptiveMaxPool>(data, output_shape, ov::element::i32)->outputs();
NamedOutputs outputs;
outputs["Out"] = {pool_outputs[0]};
outputs["Mask"] = {pool_outputs[1]};
return outputs;
} else {
return node.default_single_output_mapping(
{std::make_shared<ov::opset8::AdaptiveAvgPool>(data, output_shape)},
{"Out"});
}
} else {
auto strides = node.get_attribute<std::vector<int32_t>>("strides");
auto paddings = node.get_attribute<std::vector<int32_t>>("paddings");

size_t kernel_d, kernel_h, kernel_w;
if (kernel_shape.size() == 1) {
// Not tested: implemented according to spec, but can't generate real
// model to test
kernel_d = kernel_h = kernel_w = kernel_shape[0];
} else {
kernel_d = kernel_shape[0];
kernel_h = kernel_shape[1];
kernel_w = kernel_shape[2];
}

PADDLE_OP_CHECK(node,
kernel_d > 0 && kernel_h > 0 && kernel_w > 0,
"pool3d kernel shape must be greater than 0");

// Note: this shape check is only valid when the spatial dim of input_shape
// is static.
if (input_shape[2].is_static() && input_shape[3].is_static() && input_shape[4].is_static()) {
uint64_t input_d = input_shape[input_rank - 3].get_length();
uint64_t input_h = input_shape[input_rank - 2].get_length();
uint64_t input_w = input_shape[input_rank - 1].get_length();
if ((input_d > 0) && (input_d + pad_begin[0] + pad_end[0] < kernel_d)) {
kernel_d = input_d + pad_begin[0] + pad_end[0];
}
if ((input_h > 0) && (input_h + pad_begin[0] + pad_end[0] < kernel_h)) {
kernel_h = input_h + pad_begin[0] + pad_end[0];
}
if ((input_w > 0) && (input_w + pad_begin[1] + pad_end[1] < kernel_w)) {
kernel_w = input_w + pad_begin[1] + pad_end[1];
}
}

if (pooling_type == "max") {
return node.default_single_output_mapping(
{std::make_shared<ov::opset6::MaxPool>(data,
ov::Strides(strides.begin(), strides.end()),
pad_begin,
pad_end,
ov::Shape{kernel_d, kernel_h, kernel_w},
rounding_type,
auto_pad)},
{"Out"});
} else {
bool exclude_pad = node.get_attribute<bool>("exclusive", false);
return node.default_single_output_mapping(
{std::make_shared<ov::opset6::AvgPool>(data,
ov::Strides(strides.begin(), strides.end()),
pad_begin,
pad_end,
ov::Shape{kernel_d, kernel_h, kernel_w},
exclude_pad,
rounding_type,
auto_pad)},
{"Out"});
}
}
}

} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
3 changes: 3 additions & 0 deletions src/frontends/paddle/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ OP_CONVERTER(p_norm);
OP_CONVERTER(pad3d);
OP_CONVERTER(pow);
OP_CONVERTER(pool2d);
OP_CONVERTER(pool3d);
OP_CONVERTER(prior_box);
OP_CONVERTER(quantize_linear);
OP_CONVERTER(range);
Expand Down Expand Up @@ -196,6 +197,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"matmul", op::matmul},
{"matmul_v2", op::matmul_v2},
{"max_pool2d_with_index", op::pool2d},
{"max_pool3d_with_index", op::pool3d},
{"matrix_nms", op::matrix_nms},
{"meshgrid", op::meshgrid},
{"multiclass_nms3", op::multiclass_nms},
Expand All @@ -207,6 +209,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"pad3d", op::pad3d},
{"pow", op::pow},
{"pool2d", op::pool2d},
{"pool3d", op::pool3d},
{"prior_box", op::prior_box},
{"quantize_linear", op::quantize_linear},
{"range", op::range},
Expand Down
22 changes: 21 additions & 1 deletion src/frontends/paddle/tests/op_fuzzy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@

#include <fstream>

#include "common_test_utils/test_control.hpp"
#include "ngraph/ngraph.hpp"
#include "paddle_utils.hpp"
#include "common_test_utils/test_control.hpp"

using namespace ngraph;
using namespace InferenceEngine;
Expand Down Expand Up @@ -38,6 +38,16 @@ static const std::vector<std::string> models{
std::string("avgPool_test7"),
std::string("avgPool_test8"),
std::string("avgPool_test9"),
std::string("avg3dPool_test1"),
std::string("avg3dPool_test10"),
std::string("avg3dPool_test2"),
std::string("avg3dPool_test3"),
std::string("avg3dPool_test4"),
std::string("avg3dPool_test5"),
// avg3dPool_test6<NDHWC support is disabled now>,
std::string("avg3dPool_test7"),
std::string("avg3dPool_test8"),
std::string("avg3dPool_test9"),
std::string("batch_norm_nchw/batch_norm_nchw.pdmodel"),
std::string("batch_norm_nhwc/batch_norm_nhwc.pdmodel"),
std::string("bicubic_downsample_false_0/bicubic_downsample_false_0.pdmodel"),
Expand Down Expand Up @@ -334,6 +344,16 @@ static const std::vector<std::string> models{
std::string("maxPool_test7"),
std::string("maxPool_test8"),
std::string("maxPool_test9"),
std::string("max3dPool_test1"),
std::string("max3dPool_test10"),
std::string("max3dPool_test2"),
std::string("max3dPool_test3"),
std::string("max3dPool_test4"),
std::string("max3dPool_test5"),
// std::string("max3dPool_test6") // NDHWC support is disabled now
std::string("max3dPool_test7"),
std::string("max3dPool_test8"),
std::string("max3dPool_test9"),
std::string("meshgrid/meshgrid.pdmodel"),
std::string("multiclass_nms_by_background"),
std::string("multiclass_nms_by_class_id"),
Expand Down
Loading