Skip to content

Commit 5936fa6

Browse files
authored
Add yaml for reduce_sum OP (PaddlePaddle#41295)
* Add yaml for reduce_sum OP * Fix CI errors * Fix CI errors * Fix CI errors * Fix CI errors
1 parent 50f8e97 commit 5936fa6

File tree

5 files changed

+55
-18
lines changed

5 files changed

+55
-18
lines changed

python/paddle/fluid/tests/unittests/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1077,7 +1077,7 @@ set_tests_properties(test_generator_dataloader PROPERTIES TIMEOUT 120)
10771077
set_tests_properties(test_partial_concat_op PROPERTIES TIMEOUT 120)
10781078
set_tests_properties(test_fuse_optimizer_pass PROPERTIES TIMEOUT 120)
10791079
set_tests_properties(test_softmax_with_cross_entropy_op PROPERTIES TIMEOUT 120)
1080-
set_tests_properties(test_reduce_op PROPERTIES TIMEOUT 120)
1080+
set_tests_properties(test_reduce_op PROPERTIES TIMEOUT 500)
10811081
set_tests_properties(test_adam_optimizer_fp32_fp64 PROPERTIES TIMEOUT 120)
10821082
set_tests_properties(test_elementwise_nn_grad PROPERTIES TIMEOUT 120)
10831083
set_tests_properties(test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass PROPERTIES TIMEOUT 120)

python/paddle/fluid/tests/unittests/test_reduce_op.py

Lines changed: 29 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -26,19 +26,22 @@
2626

2727
class TestSumOp(OpTest):
2828
def setUp(self):
29+
self.python_api = paddle.sum
2930
self.op_type = "reduce_sum"
3031
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
3132
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
33+
self.attrs = {'dim': [0]}
3234

3335
def test_check_output(self):
34-
self.check_output()
36+
self.check_output(check_eager=True)
3537

3638
def test_check_grad(self):
37-
self.check_grad(['X'], 'Out')
39+
self.check_grad(['X'], 'Out', check_eager=True)
3840

3941

4042
class TestSumOp_fp16(OpTest):
4143
def setUp(self):
44+
self.python_api = paddle.sum
4245
self.op_type = "reduce_sum"
4346
self.inputs = {
4447
'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
@@ -50,22 +53,24 @@ def setUp(self):
5053
self.gradient = self.calc_gradient()
5154

5255
def test_check_output(self):
53-
self.check_output()
56+
self.check_output(check_eager=True)
5457

5558
def calc_gradient(self):
5659
x = self.inputs["X"]
5760
grad = np.ones(x.shape, dtype=x.dtype)
5861
return grad,
5962

6063
def test_check_grad(self):
61-
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
64+
self.check_grad(
65+
['X'], 'Out', user_defined_grads=self.gradient, check_eager=True)
6266

6367

6468
@unittest.skipIf(not core.is_compiled_with_cuda(),
6569
"core is not compiled with CUDA")
6670
class TestSumOp_bf16(OpTest):
6771
def setUp(self):
6872
np.random.seed(100)
73+
self.python_api = paddle.sum
6974
self.op_type = "reduce_sum"
7075
self.dtype = np.uint16
7176
self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
@@ -79,12 +84,15 @@ def setUp(self):
7984

8085
def test_check_output(self):
8186
place = core.CUDAPlace(0)
82-
self.check_output_with_place(place)
87+
self.check_output_with_place(place, check_eager=True)
8388

8489
def test_check_grad(self):
8590
place = core.CUDAPlace(0)
8691
self.check_grad_with_place(
87-
place, ['X'], 'Out', user_defined_grads=self.gradient)
92+
place, ['X'],
93+
'Out',
94+
user_defined_grads=self.gradient,
95+
check_eager=True)
8896

8997
def calc_gradient(self):
9098
x = self.x
@@ -94,6 +102,7 @@ def calc_gradient(self):
94102

95103
class TestSumOp_fp16_withInt(OpTest):
96104
def setUp(self):
105+
self.python_api = paddle.sum
97106
self.op_type = "reduce_sum"
98107
self.inputs = {
99108
# ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
@@ -107,49 +116,55 @@ def setUp(self):
107116
self.gradient = self.calc_gradient()
108117

109118
def test_check_output(self):
110-
self.check_output()
119+
self.check_output(check_eager=True)
111120

112121
def calc_gradient(self):
113122
x = self.inputs["X"]
114123
grad = np.ones(x.shape, dtype=x.dtype)
115124
return grad,
116125

117126
def test_check_grad(self):
118-
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
127+
self.check_grad(
128+
['X'], 'Out', user_defined_grads=self.gradient, check_eager=True)
119129

120130

121131
class TestSumOp5D(OpTest):
122132
def setUp(self):
133+
self.python_api = paddle.sum
123134
self.op_type = "reduce_sum"
124135
self.inputs = {
125136
'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
126137
}
138+
self.attrs = {'dim': [0]}
127139
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
128140

129141
def test_check_output(self):
130-
self.check_output()
142+
self.check_output(check_eager=True)
131143

132144
def test_check_grad(self):
133-
self.check_grad(['X'], 'Out')
145+
self.check_grad(['X'], 'Out', check_eager=True)
134146

135147

136148
class TestSumOp6D(OpTest):
137149
def setUp(self):
150+
self.python_api = paddle.sum
138151
self.op_type = "reduce_sum"
139152
self.inputs = {
140153
'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
141154
}
155+
self.attrs = {'dim': [0]}
142156
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
143157

144158
def test_check_output(self):
145-
self.check_output()
159+
self.check_output(check_eager=True)
146160

147161
def test_check_grad(self):
148-
self.check_grad(['X'], 'Out')
162+
self.check_grad(['X'], 'Out', check_eager=True)
149163

150164

151165
class TestSumOp8D(OpTest):
152166
def setUp(self):
167+
self.python_api = paddle.sum
153168
self.op_type = "reduce_sum"
154169
self.inputs = {
155170
'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
@@ -158,10 +173,10 @@ def setUp(self):
158173
self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}
159174

160175
def test_check_output(self):
161-
self.check_output()
176+
self.check_output(check_eager=True)
162177

163178
def test_check_grad(self):
164-
self.check_grad(['X'], 'Out')
179+
self.check_grad(['X'], 'Out', check_eager=True)
165180

166181

167182
@skip_check_grad_ci(

python/paddle/tensor/math.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -904,7 +904,18 @@ def get_dtype(x, dtype):
904904
return (False, src_type)
905905

906906
dtype_flag, dtype = get_dtype(x, dtype)
907-
if paddle.in_dynamic_mode():
907+
908+
if in_dygraph_mode():
909+
if reduce_all_flag:
910+
axis = range(len(x.shape))
911+
else:
912+
axis = axis if axis != None and axis != [] else [0]
913+
914+
out_dtype = convert_np_dtype_to_dtype_(dtype)
915+
out = _C_ops.final_state_sum(x, axis, out_dtype, keepdim)
916+
return out
917+
918+
if _in_legacy_dygraph():
908919
axis = axis if axis != None and axis != [] else [0]
909920
if dtype_flag:
910921
return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,

python/paddle/utils/code_gen/api.yaml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1596,13 +1596,14 @@
15961596
# no_need_buffer : x, y
15971597

15981598
- api : sum
1599-
args : (Tensor x, int64_t[] axis={}, DataType dtype=DataType::UNDEFINED, bool keep_dim=false)
1600-
output : Tensor
1599+
args : (Tensor x, int64_t[] dims={}, DataType out_dtype=paddle::experimental::DataType::UNDEFINED, bool keep_dim=false)
1600+
output : Tensor(out)
16011601
infer_meta :
16021602
func : SumInferMeta
16031603
kernel :
16041604
func : sum
16051605
data_type : x
1606+
backward : sum_grad
16061607

16071608
# take_along_axis
16081609
- api : take_along_axis

python/paddle/utils/code_gen/backward.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1152,6 +1152,16 @@
11521152
kernel :
11531153
func : subtract_grad
11541154

1155+
- backward_api : sum_grad
1156+
forward : sum (Tensor x, int64_t[] dims={}, DataType out_dtype=paddle::experimental::DataType::UNDEFINED, bool keep_dim=false) -> Tensor(out)
1157+
args : (Tensor x, Tensor out_grad, int64_t[] dims, bool keep_dim, bool reduce_all=false)
1158+
output : Tensor(x_grad)
1159+
infer_meta :
1160+
func : UnchangedInferMeta
1161+
param : [x]
1162+
kernel :
1163+
func : sum_grad
1164+
11551165
- backward_api : take_along_axis_grad
11561166
forward : take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out)
11571167
args : (Tensor x, Tensor index, Tensor out_grad, int axis)

0 commit comments

Comments
 (0)