Skip to content

Commit 3c34b5a

Browse files
committed
add more cases for tests
1 parent 2a5e30d commit 3c34b5a

File tree

2 files changed

+131
-77
lines changed

2 files changed

+131
-77
lines changed

python/tvm/relay/frontend/paddlepaddle.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@
4343

4444

4545
def _get_pad_size(in_size, dilated_kernel_size, stride_size):
46-
"""Calculate the paddings size."""
46+
"""Calculate the paddings size for Conv/Pool in SAME padding mode."""
4747

4848
if stride_size == 1 or in_size % stride_size == 0:
4949
pad = max(dilated_kernel_size - stride_size, 0)

tests/python/frontend/paddlepaddle/test_forward.py

Lines changed: 130 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
import tvm.topi.testing
2525
from tvm import relay
2626
from tvm.contrib import graph_executor
27+
import pytest
2728

2829
import paddle
2930
import paddle.nn as nn
@@ -99,6 +100,7 @@ def verify_model(func, input_data, rtol=1e-5, atol=1e-5):
99100

100101
assert_shapes_match(baseline_output, compiled_output)
101102
tvm.testing.assert_allclose(baseline_output, compiled_output, rtol=rtol, atol=atol)
103+
break
102104

103105

104106
@tvm.testing.uses_gpu
@@ -127,8 +129,6 @@ def add_subtract3(inputs1, inputs2):
127129

128130
@tvm.testing.uses_gpu
129131
def test_forward_arg_max_min():
130-
input_shape = [1, 3, 10, 10]
131-
132132
class ArgMax(nn.Layer):
133133
@paddle.jit.to_static
134134
def forward(self, inputs):
@@ -169,32 +169,50 @@ class ArgMin3(nn.Layer):
169169
def forward(self, inputs):
170170
return inputs.argmin(axis=2, keepdim=True)
171171

172-
input_data = paddle.rand(input_shape, dtype="float32")
173-
verify_model(ArgMax(), input_data=input_data)
174-
verify_model(ArgMax1(), input_data=input_data)
175-
verify_model(ArgMax2(), input_data=input_data)
176-
verify_model(ArgMax3(), input_data=input_data)
177-
verify_model(ArgMin(), input_data=input_data)
178-
verify_model(ArgMin1(), input_data=input_data)
179-
verify_model(ArgMin2(), input_data=input_data)
180-
verify_model(ArgMin3(), input_data=input_data)
172+
input_shapes = [[256], [10, 128], [100, 500, 200], [1, 3, 224, 224]]
173+
for input_shape in input_shapes:
174+
input_data = paddle.rand(input_shape, dtype="float32")
175+
verify_model(ArgMax(), input_data=input_data)
176+
verify_model(ArgMin(), input_data=input_data)
177+
for input_shape in input_shapes[1:]:
178+
input_data = paddle.rand(input_shape, dtype="float32")
179+
verify_model(ArgMax1(), input_data=input_data)
180+
verify_model(ArgMax2(), input_data=input_data)
181+
verify_model(ArgMin1(), input_data=input_data)
182+
verify_model(ArgMin2(), input_data=input_data)
183+
for input_shape in input_shapes[2:]:
184+
input_data = paddle.rand(input_shape, dtype="float32")
185+
verify_model(ArgMax3(), input_data=input_data)
186+
verify_model(ArgMin3(), input_data=input_data)
181187

182188

183189
@tvm.testing.uses_gpu
184190
def test_forward_argsort():
185-
@paddle.jit.to_static
186-
def argsort(inputs):
187-
return paddle.argsort(inputs)
191+
class ArgSort1(nn.Layer):
192+
@paddle.jit.to_static
193+
def forward(self, inputs):
194+
return paddle.argsort(inputs)
188195

189-
@paddle.jit.to_static
190-
def argsort2(inputs):
191-
return paddle.argsort(inputs, axis=0, descending=True)
196+
class ArgSort2(nn.Layer):
197+
@paddle.jit.to_static
198+
def forward(self, inputs):
199+
return paddle.argsort(inputs, axis=0, descending=True)
192200

193-
input_shape = [2, 3, 5]
194-
input_data = paddle.rand(input_shape, dtype="float32")
195-
verify_model(argsort, input_data)
196-
input_data2 = np.random.randint(100, size=input_shape)
197-
verify_model(argsort2, input_data2)
201+
class ArgSort3(nn.Layer):
202+
@paddle.jit.to_static
203+
def forward(self, inputs):
204+
return paddle.argsort(inputs, axis=-1, descending=True)
205+
206+
input_shapes = [[256], [10, 20], [10, 10, 3], [1, 3, 5, 5]]
207+
for input_shape in input_shapes:
208+
# Avoid duplicate elements in the array which will bring
209+
# different results with different sort algorithms
210+
np.random.seed(13)
211+
np_data = np.random.choice(range(-5000, 5000), np.prod(input_shape), replace=False)
212+
input_data = paddle.to_tensor(np_data.reshape(input_shape).astype("int64"))
213+
verify_model(ArgSort1(), [input_data])
214+
verify_model(ArgSort2(), [input_data])
215+
verify_model(ArgSort3(), [input_data])
198216

199217

200218
@tvm.testing.uses_gpu
@@ -291,23 +309,27 @@ def cast2(inputs, dtype="int64"):
291309

292310
@tvm.testing.uses_gpu
293311
def test_forward_check_tensor():
294-
@paddle.jit.to_static
295-
def isfinite(inputs):
296-
return paddle.cast(paddle.isfinite(inputs), "int32")
312+
class IsFinite(nn.Layer):
313+
@paddle.jit.to_static
314+
def forward(self, inputs):
315+
return paddle.cast(paddle.isfinite(inputs), "int32")
297316

298-
@paddle.jit.to_static
299-
def isnan(inputs):
300-
return paddle.cast(paddle.isnan(inputs), "int32")
317+
class IsNan(nn.Layer):
318+
@paddle.jit.to_static
319+
def forward(self, inputs):
320+
return paddle.cast(paddle.isnan(inputs), "int32")
301321

302-
@paddle.jit.to_static
303-
def isinf(inputs):
304-
return paddle.cast(paddle.isinf(inputs), "int32")
322+
class IsInf(nn.Layer):
323+
@paddle.jit.to_static
324+
def forward(self, inputs):
325+
return paddle.cast(paddle.isinf(inputs), "int32")
305326

306-
input_shape = [5, 5]
307-
input_data = paddle.rand(input_shape, dtype="float32")
308-
verify_model(isfinite, input_data=input_data)
309-
verify_model(isnan, input_data=input_data)
310-
verify_model(isinf, input_data=input_data)
327+
input_shapes = [[32], [8, 128], [2, 128, 256], [2, 3, 224, 224], [2, 2, 3, 229, 229]]
328+
for input_shape in input_shapes:
329+
input_data = paddle.rand(input_shape, dtype="float32")
330+
verify_model(IsFinite(), input_data=input_data)
331+
verify_model(IsNan(), input_data=input_data)
332+
verify_model(IsInf(), input_data=input_data)
311333

312334

313335
@tvm.testing.uses_gpu
@@ -391,15 +413,16 @@ def forward(self, inputs):
391413

392414
@tvm.testing.uses_gpu
393415
def test_forward_dot():
394-
@paddle.jit.to_static
395-
def dot(x, y):
396-
return paddle.dot(x, y)
416+
class Dot(nn.Layer):
417+
@paddle.jit.to_static
418+
def forward(self, x, y):
419+
return paddle.dot(x, y)
397420

398-
x_shape = [10, 3]
399-
y_shape = [10, 3]
400-
x_data = paddle.rand(x_shape, dtype="float32")
401-
y_data = paddle.rand(y_shape, dtype="float32")
402-
verify_model(dot, input_data=[x_data, y_data])
421+
input_shapes = [[128], [8, 128]]
422+
for input_shape in input_shapes:
423+
x_data = paddle.rand(input_shape, dtype="float32")
424+
y_data = paddle.rand(input_shape, dtype="float32")
425+
verify_model(Dot(), input_data=[x_data, y_data])
403426

404427

405428
@tvm.testing.uses_gpu
@@ -435,44 +458,70 @@ def forward(self, input1, input2):
435458
api_list = [
436459
"equal",
437460
]
438-
input_shape = [10, 10]
439-
input_shape_2 = [
440-
10,
441-
]
442-
x_data = paddle.randint(1, 10, input_shape, dtype="int32")
443-
y_data = paddle.randint(1, 10, input_shape_2, dtype="int32")
444-
for api_name in api_list:
445-
verify_model(ElemwiseAPI(api_name), [x_data, y_data])
461+
x_shapes = [[128], [8, 128], [8, 200, 300], [2, 3, 229, 229], [2, 3, 3, 224, 224]]
462+
y_shapes = [[1], [8, 128], [8, 1, 1], [2, 3, 229, 229], [2, 3, 3, 224, 1]]
463+
for x_shape, y_shape in zip(x_shapes, y_shapes):
464+
x_data = paddle.randint(1, 1000, x_shape, dtype="int32")
465+
y_data = paddle.randint(1, 1000, y_shape, dtype="int32")
466+
for api_name in api_list:
467+
verify_model(ElemwiseAPI(api_name), [x_data, y_data])
446468

447469

448470
@tvm.testing.uses_gpu
449471
def test_forward_expand():
450472
@paddle.jit.to_static
451473
def expand1(inputs):
452-
return paddle.expand(inputs, shape=[2, 3])
474+
return paddle.expand(inputs, shape=[2, 128])
453475

454476
@paddle.jit.to_static
455477
def expand2(inputs):
456-
shape = paddle.to_tensor(np.array([2, 3]).astype("int32"))
478+
return paddle.expand(inputs, shape=[3, 1, 8, 256])
479+
480+
@paddle.jit.to_static
481+
def expand3(inputs):
482+
return paddle.expand(inputs, shape=[5, 1, 3, 224, 224])
483+
484+
@paddle.jit.to_static
485+
def expand4(inputs):
486+
shape = paddle.to_tensor(np.array([2, 128]).astype("int32"))
457487
return paddle.expand(inputs, shape=shape)
458488

459-
x_shape = [3]
460-
x_data = paddle.rand(x_shape, dtype="float32")
461-
verify_model(expand1, input_data=[x_data])
462-
verify_model(expand2, input_data=[x_data])
489+
@paddle.jit.to_static
490+
def expand5(inputs):
491+
shape = paddle.to_tensor(np.array([3, 1, 8, 256]).astype("int32"))
492+
return paddle.expand(inputs, shape=shape)
493+
494+
@paddle.jit.to_static
495+
def expand6(inputs):
496+
shape = paddle.to_tensor(np.array([5, 1, 3, 224, 224]).astype("int32"))
497+
return paddle.expand(inputs, shape=shape)
498+
499+
data = paddle.rand([128], dtype="float32")
500+
verify_model(expand1, input_data=[data])
501+
verify_model(expand4, input_data=[data])
502+
data = paddle.rand([8, 256], dtype="float32")
503+
verify_model(expand2, input_data=[data])
504+
verify_model(expand5, input_data=[data])
505+
data = paddle.rand([1, 3, 224, 224], dtype="float32")
506+
verify_model(expand3, input_data=[data])
507+
verify_model(expand6, input_data=[data])
463508

464509

465510
@tvm.testing.uses_gpu
466511
def test_forward_expand_as():
467-
@paddle.jit.to_static
468-
def expand_as(x, y):
469-
z = paddle.expand_as(x, y)
470-
z += y
471-
return z
512+
class ExpandAs(nn.Layer):
513+
@paddle.jit.to_static
514+
def forward(self, x, y):
515+
z = paddle.expand_as(x, y)
516+
z += y
517+
return z
472518

473-
data_x = paddle.to_tensor([1, 2, 3], dtype="int32")
474-
data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype="float32")
475-
verify_model(expand_as, [data_x, data_y])
519+
x_shapes = [[1], [8, 128], [8, 1, 1], [2, 3, 229, 229], [2, 3, 3, 224, 1]]
520+
y_shapes = [[128], [8, 128], [8, 200, 300], [2, 3, 229, 229], [2, 3, 3, 224, 224]]
521+
for x_shape, y_shape in zip(x_shapes, y_shapes):
522+
x_data = paddle.rand(x_shape, dtype="float32")
523+
y_data = paddle.rand(y_shape, dtype="float32")
524+
verify_model(ExpandAs(), [x_data, y_data])
476525

477526

478527
@tvm.testing.uses_gpu
@@ -591,11 +640,14 @@ def forward(self, x, y):
591640
z = self.func(x, y, out=out)
592641
return paddle.cast(z, "int32")
593642

594-
x = paddle.to_tensor([True])
595-
y = paddle.to_tensor([True, False, True, False])
596-
verify_model(LogicalAPI("logical_and"), [x, y])
597-
verify_model(LogicalAPI("logical_or"), [x, y])
598-
verify_model(LogicalAPI("logical_xor"), [x, y])
643+
x_shapes = [[128], [8, 128], [8, 200, 300], [2, 3, 229, 229], [2, 3, 3, 224, 224]]
644+
y_shapes = [[1], [8, 128], [8, 1, 1], [2, 3, 229, 229], [2, 3, 3, 224, 1]]
645+
for x_shape, y_shape in zip(x_shapes, y_shapes):
646+
x_data = paddle.randint(0, 2, x_shape).astype("bool")
647+
y_data = paddle.randint(0, 2, y_shape).astype("bool")
648+
verify_model(LogicalAPI("logical_and"), [x_data, y_data])
649+
verify_model(LogicalAPI("logical_or"), [x_data, y_data])
650+
verify_model(LogicalAPI("logical_xor"), [x_data, y_data])
599651

600652

601653
@tvm.testing.uses_gpu
@@ -796,11 +848,13 @@ def forward(self, inputs):
796848
"relu",
797849
"tanh",
798850
]
799-
input_shape = [1, 3, 10, 10]
800-
input_data = paddle.rand(input_shape, dtype="float32")
801-
for api_name in api_list:
802-
verify_model(MathAPI(api_name), input_data=input_data)
851+
input_shapes = [[128], [2, 256], [1000, 128, 32], [7, 3, 256, 256]]
852+
for input_shape in input_shapes:
853+
input_data = paddle.rand(input_shape, dtype="float32")
854+
for api_name in api_list:
855+
verify_model(MathAPI(api_name), input_data=input_data)
803856

804857

805858
if __name__ == "__main__":
806-
pytest.main([__file__])
859+
# pytest.main([__file__])
860+
test_forward_math_api()

0 commit comments

Comments
 (0)