Skip to content
177 changes: 177 additions & 0 deletions python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -1530,6 +1530,105 @@ def convert_pool2d(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_pool3d(g, op, block):
"""Operator converter for pool3d."""

adaptive = op.attr("adaptive")
ceil_mode = op.attr("ceil_mode")
global_pooling = op.attr("global_pooling")
ksize = op.attr("ksize")
paddings = op.attr("paddings")
padding_algorithm = op.attr("padding_algorithm")
pooling_type = op.attr("pooling_type")
data_format = op.attr("data_format")

if global_pooling:
adaptive = True
ksize = [1, 1, 1]

input_x = g.get_node(op.input("X")[0])
_, _, _, in_h, in_w = infer_shape(input_x)

op_map = {
"avg": "avg_pool3d",
"max": "max_pool3d",
}

strides = op.attr("strides")
if isinstance(strides, int):
strides = [strides, strides]
if isinstance(ksize, int):
ksize = [ksize, ksize, ksize]
if isinstance(paddings, int):
paddings = [paddings] * 3

if padding_algorithm == "VALID":
paddings = [0, 0, 0]
elif padding_algorithm == "SAME":
input_x = autopad(input_x, strides, ksize)
paddings = [0, 0, 0]
elif padding_algorithm == "EXPLICIT":
if len(paddings) == 3:
paddings = [
paddings[0],
paddings[1],
paddings[2],
paddings[0],
paddings[1],
paddings[2],
]
elif len(paddings) == 6:
paddings = [
paddings[0],
paddings[3],
paddings[1],
paddings[4],
paddings[2],
paddings[5],
]
else:
msg = 'Value {} in attribute "padding" of operator Pool3d is not "valid."'
raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm))

# handle with special case
# while kernel size less than input size
# shrink kernel size to input size
if (
not isinstance(in_h, _op.Expr)
and padding_algorithm == "EXPLICIT"
and in_h + paddings[0] + paddings[2] < ksize[0]
):
ksize[0] = in_h
if (
not isinstance(in_w, _op.Expr)
and padding_algorithm == "EXPLICIT"
and in_w + paddings[1] + paddings[3] < ksize[1]
):
ksize[1] = in_w

if not adaptive:
if pooling_type == "avg":
exclusive = op.attr("exclusive")
out = _op.nn.avg_pool3d(
input_x,
pool_size=ksize,
strides=strides,
padding=paddings,
ceil_mode=ceil_mode,
count_include_pad=not exclusive,
layout=data_format,
)
else:
out = getattr(_op.nn, op_map[pooling_type])(
input_x, pool_size=ksize, strides=strides, padding=paddings, ceil_mode=ceil_mode
)
else:
out = getattr(_op.nn, "adaptive_" + op_map[pooling_type])(
input_x, output_size=ksize, layout=data_format
)
g.add_node(op.output("Out")[0], out)


def convert_pow(g, op, block):
"""Operator converter for pow."""

Expand Down Expand Up @@ -2038,6 +2137,73 @@ def convert_selu(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_set_value(g, op, block):
"""Operator converter for set_value."""

x = g.get_node(op.input("Input")[0])
if op.input("StartsTensorList"):
starts = g.get_node(op.input("StartsTensorList")[0])
else:
starts = op.attr("starts")[0]

if op.input("EndsTensorList"):
ends = g.get_node(op.input("EndsTensorList")[0])
else:
ends = op.attr("ends")[0]

axes = op.attr("axes")
assert len(axes) == 1, "Only support one axes now."
axes = axes[0]

input_shape = infer_shape(x)
ends = min(ends, input_shape[axes])

if op.input("StepsTensorList"):
steps = g.get_node(op.input("StepsTensorList")[0])
else:
steps = op.attr("steps")[0]

if op.input("ValueTensor"):
value = g.get_node(op.input("ValueTensor")[0])
else:
input_dtype = infer_type(x).checked_type.dtype
if input_dtype == "float64":
value = _expr.const(op.attr("fp64_values"), dtype="float64")
elif input_dtype == "float32":
value = _expr.const(op.attr("fp32_values"), dtype="float32")
elif input_dtype == "int32":
value = _expr.const(op.attr("int32_values"), dtype="int32")
elif input_dtype == "int64":
value = _expr.const(op.attr("int64_values"), dtype="int64")
else:
raise tvm.error.OpNotImplemented(
"dtype {} is not supported for set_value".format(input_dtype)
)

sliced_data = _op.strided_slice(x, begin=[starts], end=[ends], strides=[steps], axes=[axes])
sliced_shape = infer_shape(sliced_data)

if infer_shape(value) != sliced_shape:
expand_value = _op.broadcast_to(value, sliced_shape)
else:
expand_value = value

if starts < 0:
starts = starts + input_shape[axes]
if ends < 0:
ends = ends + input_shape[axes]

indices = _op.arange(
start=_expr.const(starts, dtype="int32"),
stop=_expr.const(ends, dtype="int32"),
step=_expr.const(steps, dtype="int32"),
dtype="int32",
)
indices = _op.expand_dims(indices, axis=0)
out = _op.scatter_nd(x, indices, expand_value, "update")
g.add_node(op.output("Out")[0], out)


def convert_shape(g, op, block):
"""Operator converter for shape."""

Expand Down Expand Up @@ -2368,6 +2534,14 @@ def convert_take_along_axis(g, op, block):
g.add_node(op.output("Result")[0], out)


def convert_tanhshrink(g, op, block):
"""Operator converter for tanhshrink."""

x = g.get_node(op.input("X")[0])
out = x - _op.tanh(x)
g.add_node(op.output("Out")[0], out)


def convert_thresholded_relu(g, op, block):
"""Operator converter for thresholded_relu."""

Expand Down Expand Up @@ -2634,6 +2808,7 @@ def convert_where_index(g, op, block):
"pad3d": convert_padding,
"pixel_shuffle": convert_pixel_shuffle,
"pool2d": convert_pool2d,
"pool3d": convert_pool3d,
"pow": convert_pow,
"prelu": convert_prelu,
"range": convert_range,
Expand All @@ -2656,6 +2831,7 @@ def convert_where_index(g, op, block):
"scatter": convert_scatter,
"scatter_nd_add": convert_scatter_nd_add,
"selu": convert_selu,
"set_value": convert_set_value,
"shape": convert_shape,
"sigmoid": convert_unary_op,
"sign": convert_unary_op,
Expand All @@ -2679,6 +2855,7 @@ def convert_where_index(g, op, block):
"take_along_axis": convert_take_along_axis,
"tan": convert_unary_op,
"tanh": convert_unary_op,
"tanh_shrink": convert_tanhshrink,
"top_k": convert_topk,
"thresholded_relu": convert_thresholded_relu,
"tile": convert_tile,
Expand Down
91 changes: 91 additions & 0 deletions tests/python/frontend/paddlepaddle/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1931,6 +1931,16 @@ def topk8(inputs):
verify_model(topk8, input_data=input_data_fp32)


@tvm.testing.uses_gpu
def test_forward_tanhshrink():
@paddle.jit.to_static
def tanhshrink(inputs):
return paddle.nn.functional.tanhshrink(inputs)

input_data = paddle.randn(shape=[2, 3], dtype="float32")
verify_model(tanhshrink, input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_one_hot_v2():
@paddle.jit.to_static
Expand Down Expand Up @@ -2387,5 +2397,86 @@ def forward(self, input_data, label):
verify_model(SoftmaxWithCrossEntropy(soft_label=True, axis=0), input_data=[input_data, label])


@tvm.testing.uses_gpu
def test_forward_pool3d():
class Pool3D1(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return nn.functional.avg_pool3d(inputs, kernel_size=2, stride=2, padding=0)

class Pool3D2(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return nn.functional.adaptive_avg_pool3d(inputs, output_size=[3, 3, 3])

class Pool3D3(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return nn.functional.avg_pool3d(
inputs,
kernel_size=3,
stride=1,
padding=[1, 1, 1],
exclusive=False,
divisor_override=2.5,
)

class Pool3D4(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return nn.functional.avg_pool3d(
inputs,
kernel_size=2,
stride=1,
padding=[[0, 0], [0, 0], [1, 1], [1, 1], [1, 1]],
ceil_mode=True,
data_format="NCDHW",
)

class Pool3D5(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return nn.functional.avg_pool3d(
inputs,
kernel_size=2,
stride=1,
padding=[[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]],
ceil_mode=True,
data_format="NDHWC",
)

input_shapes = [[1, 2, 2, 8, 8], [1, 2, 3, 10, 10]] # [N, C, D, H, W]
for input_shape in input_shapes:
input_data = paddle.uniform(shape=input_shape, dtype="float32", min=-1, max=1)
verify_model(Pool3D1(), input_data=input_data)
verify_model(Pool3D2(), input_data=input_data)
verify_model(Pool3D3(), input_data=input_data)
verify_model(Pool3D4(), input_data=input_data)
verify_model(Pool3D5(), input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_set_value():
class SetValue(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs, update_input):
x = inputs + 1
x[3:] = 3
x[1:] = 3.0
x[2:] = update_input
x[0] = 1
x[-3:-2] = 1
x[0][0] = 5
return x

input_shapes = [[5, 2], [10, 3], [10, 3, 3]]
for input_shape in input_shapes:
input_data = paddle.uniform(shape=input_shape, dtype="float32", min=-1, max=1)
update_shape = input_shape.copy()
update_shape[0] = input_shape[0] - 2
update_input = paddle.uniform(shape=update_shape, dtype="float32", min=-1, max=1)
verify_model(SetValue(), input_data=[input_data, update_input])


if __name__ == "__main__":
tvm.testing.main()