Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
79ba943
[Frontend][Paddle] [PaddlePaddle Hackathon 4]
TheFormerWalker May 8, 2023
9f84017
Update test_forward.py
TheFormerWalker May 8, 2023
6f6bd51
Merge pull request #1 from TheFormerWalker/patch-2
TheFormerWalker May 8, 2023
9354f95
Merge pull request #2 from TheFormerWalker/patch-1
TheFormerWalker May 8, 2023
cf7c143
Update test_forward.py
TheFormerWalker May 12, 2023
3689041
Merge branch 'main' into main
TheFormerWalker May 15, 2023
ec3c524
Update paddlepaddle.py
TheFormerWalker May 24, 2023
112ee5d
Update test_forward.py
TheFormerWalker May 24, 2023
aeec5b4
Update paddlepaddle.py
TheFormerWalker May 26, 2023
9f500e3
Update test_forward.py
TheFormerWalker May 26, 2023
a97cbfa
Update test_forward.py
TheFormerWalker May 26, 2023
5d8e94c
Update test_forward.py
TheFormerWalker May 26, 2023
9d8e0ef
Update test_forward.py
TheFormerWalker May 26, 2023
667efca
Update test_forward.py
TheFormerWalker May 28, 2023
4879f33
Update test_forward.py
TheFormerWalker May 29, 2023
e56b750
trigger check again
TheFormerWalker May 30, 2023
11ad694
fix gpu CI error
TheFormerWalker May 31, 2023
33fdaa4
fix gpu CI error
TheFormerWalker May 31, 2023
b4e272e
fix softplus AssertionError
TheFormerWalker May 31, 2023
22f397e
fix gpu CI error
TheFormerWalker Jun 1, 2023
30888e7
fix gpu CI error
TheFormerWalker Jun 1, 2023
30c2a01
Merge pull request #3 from TheFormerWalker/TheFormerWalker-patch-3-1
TheFormerWalker Jun 1, 2023
7b9d7dd
Merge pull request #4 from TheFormerWalker/TheFormerWalker-patch-3
TheFormerWalker Jun 1, 2023
109edb9
fix the "dtype" error of gaussian_random
TheFormerWalker Jun 1, 2023
3b78def
fix gpu CI error of input_data of test_forward_gaussian_random
TheFormerWalker Jun 1, 2023
ce44bfb
Update test_forward.py for gaussian_random CI
TheFormerWalker Jun 1, 2023
ec8a550
Update test_forward.py delete test_forward_gaussian_random
TheFormerWalker Jun 2, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,7 @@ def convert_conv2d(g, op, block):

kernel = g.get_node(op.input("Filter")[0])
input_x = g.get_node(op.input("Input")[0])
data_layout = op.attr("data_format")
out_channels, _, k_h, k_w = infer_shape(kernel)
if padding_algorithm == "VALID":
paddings = [0, 0]
Expand Down Expand Up @@ -332,6 +333,7 @@ def convert_conv2d(g, op, block):
groups=groups,
channels=out_channels,
kernel_size=[k_h, k_w],
data_layout=data_layout,
)
g.add_node(op.output("Output")[0], out)

Expand Down Expand Up @@ -407,6 +409,7 @@ def convert_conv3d(g, op, block):

kernel = g.get_node(op.input("Filter")[0])
input_x = g.get_node(op.input("Input")[0])
data_layout = op.attr("data_format")
out_channels, _, k_d, k_h, k_w = infer_shape(kernel)
if padding_algorithm == "VALID":
paddings = [0, 0, 0]
Expand Down Expand Up @@ -446,6 +449,7 @@ def convert_conv3d(g, op, block):
groups=groups,
channels=out_channels,
kernel_size=[k_d, k_h, k_w],
data_layout=data_layout,
)
g.add_node(op.output("Output")[0], out)

Expand Down Expand Up @@ -821,7 +825,9 @@ def convert_gaussian_random(g, op, block):
std = op.attr("std")
shape = op.attr("shape")
seed = op.attr("seed")
out = _op.random.normal(key=seed, shape=shape, mean=mean, scale=std)
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)
out = _op.random.normal(key=seed, shape=shape, dtype=dtype, mean=mean, scale=std)
g.add_node(op.output("Out")[0], out)


Expand Down Expand Up @@ -2164,9 +2170,13 @@ def convert_softplus(g, op, block):
beta = op.attr("beta")
beta = _expr.const(beta, dtype=dtype)
threshold = op.attr("threshold")

if threshold is None:
threshold = _expr.const(20.0, dtype=dtype)
threshold = _expr.const(threshold, dtype=dtype)
out_softplus = _op.log(_op.exp(x * beta) + _expr.const(1.0, dtype=dtype)) / beta
out = _op.where(_op.greater(x * beta, threshold), x, out_softplus)

g.add_node(op.output("Out")[0], out)


Expand Down
92 changes: 88 additions & 4 deletions tests/python/frontend/paddlepaddle/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -509,6 +509,34 @@ def __init__(self, stride=1, padding=0, dilation=1, groups=1, padding_mode="zero
def forward(self, inputs):
return self.softmax(self.conv(inputs))

class Conv2D2(nn.Layer):
def __init__(
self,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode="zeros",
data_format="NCHW",
):
super(Conv2D2, self).__init__()
self.conv = nn.Conv2D(
3,
6,
3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
padding_mode=padding_mode,
data_format=data_format,
)
self.softmax = nn.Softmax()

@paddle.jit.to_static
def forward(self, inputs):
return self.softmax(self.conv(inputs))

input_shapes = [[1, 3, 10, 10], [1, 3, 12, 12]]

for input_shape in input_shapes:
Expand All @@ -521,6 +549,10 @@ def forward(self, inputs):
input_data=input_data,
)
verify_model(Conv2D1(stride=2, padding="SAME", dilation=2, groups=3), input_data=input_data)
verify_model(
Conv2D2(stride=2, padding="SAME", dilation=2, groups=3, data_format="NCHW"),
input_data=input_data,
)


@tvm.testing.uses_gpu
Expand Down Expand Up @@ -575,6 +607,34 @@ def __init__(self, stride=1, padding=0, dilation=1, groups=1, padding_mode="zero
def forward(self, inputs):
return self.softmax(self.conv(inputs))

class Conv3D2(nn.Layer):
def __init__(
self,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode="zeros",
data_format="NCDHW",
):
super(Conv3D2, self).__init__()
self.conv = nn.Conv3D(
3,
6,
3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
padding_mode=padding_mode,
data_format=data_format,
)
self.softmax = nn.Softmax()

@paddle.jit.to_static
def forward(self, inputs):
return self.softmax(self.conv(inputs))

input_shapes = [[1, 3, 10, 10, 10], [1, 3, 12, 12, 12]]

for input_shape in input_shapes:
Expand All @@ -595,6 +655,10 @@ def forward(self, inputs):
input_data=input_data,
)
verify_model(Conv3D(stride=2, padding="SAME", dilation=2, groups=3), input_data=input_data)
verify_model(
Conv3D2(stride=2, padding="SAME", dilation=2, groups=3, data_format="NCDHW"),
input_data=input_data,
)


@tvm.testing.uses_gpu
Expand Down Expand Up @@ -1720,11 +1784,31 @@ def test_forward_sin():
pass


@run_math_api
@tvm.testing.uses_gpu
def test_forward_softplus():
x = paddle.to_tensor([-0.4, 1], dtype="float32")
m = paddle.nn.Softplus(5, 1)
verify_model(m, [x])
@paddle.jit.to_static
def Softplus1(input):
return paddle.nn.functional.softplus(input, beta=1.0, threshold=20.0)

@paddle.jit.to_static
def Softplus2(input):
return paddle.nn.functional.softplus(input, beta=6.0, threshold=20.0)

@paddle.jit.to_static
def Softplus3(input):
return paddle.nn.functional.softplus(input, beta=1.0, threshold=10.0)

x = paddle.to_tensor([-8.0, -12.0, 1.0, 18.0, 25.0])
verify_model(Softplus1, x)
verify_model(Softplus2, x)
verify_model(Softplus3, x)

input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
for input_shape in input_shapes:
input_data = paddle.randn(shape=input_shape, dtype="float32")
verify_model(Softplus1, input_data=input_data)
verify_model(Softplus2, input_data=input_data)
verify_model(Softplus3, input_data=input_data)


@run_math_api
Expand Down