2626
2727class TestSumOp (OpTest ):
2828 def setUp (self ):
29+ self .python_api = paddle .sum
2930 self .op_type = "reduce_sum"
3031 self .inputs = {'X' : np .random .random ((5 , 6 , 10 )).astype ("float64" )}
3132 self .outputs = {'Out' : self .inputs ['X' ].sum (axis = 0 )}
33+ self .attrs = {'dim' : [0 ]}
3234
3335 def test_check_output (self ):
34- self .check_output ()
36+ self .check_output (check_eager = True )
3537
3638 def test_check_grad (self ):
37- self .check_grad (['X' ], 'Out' )
39+ self .check_grad (['X' ], 'Out' , check_eager = True )
3840
3941
4042class TestSumOp_fp16 (OpTest ):
4143 def setUp (self ):
44+ self .python_api = paddle .sum
4245 self .op_type = "reduce_sum"
4346 self .inputs = {
4447 'X' : np .random .uniform (0 , 0.1 , (5 , 6 , 10 )).astype ("float16" )
@@ -50,22 +53,24 @@ def setUp(self):
5053 self .gradient = self .calc_gradient ()
5154
5255 def test_check_output (self ):
53- self .check_output ()
56+ self .check_output (check_eager = True )
5457
5558 def calc_gradient (self ):
5659 x = self .inputs ["X" ]
5760 grad = np .ones (x .shape , dtype = x .dtype )
5861 return grad ,
5962
6063 def test_check_grad (self ):
61- self .check_grad (['X' ], 'Out' , user_defined_grads = self .gradient )
64+ self .check_grad (
65+ ['X' ], 'Out' , user_defined_grads = self .gradient , check_eager = True )
6266
6367
6468@unittest .skipIf (not core .is_compiled_with_cuda (),
6569 "core is not compiled with CUDA" )
6670class TestSumOp_bf16 (OpTest ):
6771 def setUp (self ):
6872 np .random .seed (100 )
73+ self .python_api = paddle .sum
6974 self .op_type = "reduce_sum"
7075 self .dtype = np .uint16
7176 self .x = np .random .uniform (0 , 0.1 , (2 , 5 , 10 )).astype (np .float32 )
@@ -79,12 +84,15 @@ def setUp(self):
7984
8085 def test_check_output (self ):
8186 place = core .CUDAPlace (0 )
82- self .check_output_with_place (place )
87+ self .check_output_with_place (place , check_eager = True )
8388
8489 def test_check_grad (self ):
8590 place = core .CUDAPlace (0 )
8691 self .check_grad_with_place (
87- place , ['X' ], 'Out' , user_defined_grads = self .gradient )
92+ place , ['X' ],
93+ 'Out' ,
94+ user_defined_grads = self .gradient ,
95+ check_eager = True )
8896
8997 def calc_gradient (self ):
9098 x = self .x
@@ -94,6 +102,7 @@ def calc_gradient(self):
94102
95103class TestSumOp_fp16_withInt (OpTest ):
96104 def setUp (self ):
105+ self .python_api = paddle .sum
97106 self .op_type = "reduce_sum"
98107 self .inputs = {
99108 # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
@@ -107,49 +116,55 @@ def setUp(self):
107116 self .gradient = self .calc_gradient ()
108117
109118 def test_check_output (self ):
110- self .check_output ()
119+ self .check_output (check_eager = True )
111120
112121 def calc_gradient (self ):
113122 x = self .inputs ["X" ]
114123 grad = np .ones (x .shape , dtype = x .dtype )
115124 return grad ,
116125
117126 def test_check_grad (self ):
118- self .check_grad (['X' ], 'Out' , user_defined_grads = self .gradient )
127+ self .check_grad (
128+ ['X' ], 'Out' , user_defined_grads = self .gradient , check_eager = True )
119129
120130
121131class TestSumOp5D (OpTest ):
122132 def setUp (self ):
133+ self .python_api = paddle .sum
123134 self .op_type = "reduce_sum"
124135 self .inputs = {
125136 'X' : np .random .random ((1 , 2 , 5 , 6 , 10 )).astype ("float64" )
126137 }
138+ self .attrs = {'dim' : [0 ]}
127139 self .outputs = {'Out' : self .inputs ['X' ].sum (axis = 0 )}
128140
129141 def test_check_output (self ):
130- self .check_output ()
142+ self .check_output (check_eager = True )
131143
132144 def test_check_grad (self ):
133- self .check_grad (['X' ], 'Out' )
145+ self .check_grad (['X' ], 'Out' , check_eager = True )
134146
135147
136148class TestSumOp6D (OpTest ):
137149 def setUp (self ):
150+ self .python_api = paddle .sum
138151 self .op_type = "reduce_sum"
139152 self .inputs = {
140153 'X' : np .random .random ((1 , 1 , 2 , 5 , 6 , 10 )).astype ("float64" )
141154 }
155+ self .attrs = {'dim' : [0 ]}
142156 self .outputs = {'Out' : self .inputs ['X' ].sum (axis = 0 )}
143157
144158 def test_check_output (self ):
145- self .check_output ()
159+ self .check_output (check_eager = True )
146160
147161 def test_check_grad (self ):
148- self .check_grad (['X' ], 'Out' )
162+ self .check_grad (['X' ], 'Out' , check_eager = True )
149163
150164
151165class TestSumOp8D (OpTest ):
152166 def setUp (self ):
167+ self .python_api = paddle .sum
153168 self .op_type = "reduce_sum"
154169 self .inputs = {
155170 'X' : np .random .random ((1 , 3 , 1 , 2 , 1 , 4 , 3 , 10 )).astype ("float64" )
@@ -158,10 +173,10 @@ def setUp(self):
158173 self .outputs = {'Out' : self .inputs ['X' ].sum (axis = (0 , 3 ))}
159174
160175 def test_check_output (self ):
161- self .check_output ()
176+ self .check_output (check_eager = True )
162177
163178 def test_check_grad (self ):
164- self .check_grad (['X' ], 'Out' )
179+ self .check_grad (['X' ], 'Out' , check_eager = True )
165180
166181
167182@skip_check_grad_ci (
0 commit comments