Skip to content

Commit f95d44a

Browse files
authored
Added BF16 Pool2d grad (#37081)
* Added BF16 Pool2d grad * upstream pulled * fix for CI * fixes after review
1 parent 62ec644 commit f95d44a

File tree

3 files changed

+97
-10
lines changed

3 files changed

+97
-10
lines changed

paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc

+2-1
Original file line numberDiff line numberDiff line change
@@ -387,4 +387,5 @@ REGISTER_OP_KERNEL(pool2d, MKLDNN, ::paddle::platform::CPUPlace,
387387
ops::PoolMKLDNNOpKernel<paddle::platform::bfloat16>);
388388

389389
REGISTER_OP_KERNEL(pool2d_grad, MKLDNN, ::paddle::platform::CPUPlace,
390-
ops::PoolMKLDNNGradOpKernel<float>);
390+
ops::PoolMKLDNNGradOpKernel<float>,
391+
ops::PoolMKLDNNGradOpKernel<paddle::platform::bfloat16>);

python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py

+89-7
Original file line numberDiff line numberDiff line change
@@ -15,22 +15,63 @@
1515
from __future__ import print_function
1616

1717
import unittest
18-
import os
1918
import numpy as np
2019
import paddle.fluid.core as core
21-
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
22-
from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive
20+
from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16
21+
from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op_Mixin, max_pool2D_forward_naive
22+
from paddle.fluid.tests.unittests.npu.test_pool2d_op_npu import pool2d_backward_navie as pool2d_backward_naive
2323
from paddle import enable_static
2424

2525

26-
@unittest.skipIf(not core.supports_bfloat16(),
27-
"place does not support BF16 evaluation")
28-
class TestPoolBf16MklDNNOp(TestPool2D_Op):
26+
@OpTestTool.skip_if_not_cpu_bf16()
27+
class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest):
28+
def init_kernel_type(self):
29+
self.use_mkldnn = True
30+
31+
def init_data_type(self):
32+
self.dtype = np.uint16
33+
34+
def setUp(self):
35+
super(TestPoolBf16MklDNNOpGrad, self).setUp()
36+
self.attrs['mkldnn_data_type'] = "bfloat16"
37+
self.x_fp32 = np.random.random(self.shape).astype(np.float32)
38+
39+
output = self.pool2D_forward_naive(
40+
self.x_fp32, self.ksize, self.strides, self.paddings,
41+
self.global_pool, self.ceil_mode, self.exclusive, self.adaptive,
42+
"float32").astype(np.float32)
43+
44+
self.inputs = {'X': convert_float_to_uint16(self.x_fp32)}
45+
self.outputs = {'Out': convert_float_to_uint16(output)}
46+
47+
def test_check_output(self):
48+
self.check_output_with_place(core.CPUPlace())
49+
50+
def test_check_grad(self):
51+
x_grad = pool2d_backward_naive(
52+
self.x_fp32,
53+
ksize=self.ksize,
54+
strides=self.strides,
55+
paddings=self.paddings,
56+
global_pool=self.global_pool,
57+
ceil_mode=False,
58+
exclusive=self.exclusive,
59+
adaptive=self.adaptive,
60+
data_format=self.data_format,
61+
pool_type=self.pool_type,
62+
padding_algorithm=self.padding_algorithm)
63+
x_grad = x_grad / np.prod(self.outputs['Out'].shape)
64+
self.check_grad_with_place(
65+
core.CPUPlace(), set(['X']), 'Out', user_defined_grads=[x_grad])
66+
67+
68+
@OpTestTool.skip_if_not_cpu_bf16()
69+
class TestPoolBf16MklDNNOp(TestPool2D_Op_Mixin, OpTest):
2970
def init_kernel_type(self):
3071
self.use_mkldnn = True
3172

3273
def setUp(self):
33-
TestPool2D_Op.setUp(self)
74+
TestPool2D_Op_Mixin.setUp(self)
3475
self.dtype = np.uint16
3576

3677
input = np.random.random(self.shape).astype(np.float32)
@@ -95,6 +136,47 @@ def init_pool_type(self):
95136
self.pool2D_forward_naive = max_pool2D_forward_naive
96137

97138

139+
class TestCase1PadZeroExclusiveAvgGrad(TestPoolBf16MklDNNOpGrad):
140+
def init_test_case(self):
141+
self.ksize = [3, 3]
142+
self.strides = [1, 1]
143+
144+
def init_shape(self):
145+
self.shape = [2, 3, 7, 7]
146+
147+
def init_paddings(self):
148+
self.paddings = [0, 0]
149+
150+
def init_global_pool(self):
151+
self.global_pool = False
152+
153+
def init_exclusive(self):
154+
self.exclusive = True
155+
156+
157+
class TestCase2PadOneNonExclusiveAvgGrad(TestCase1PadZeroExclusiveAvgGrad):
158+
def init_exclusive(self):
159+
self.exclusive = False
160+
161+
162+
class TestCase0InitialMaxGrad(TestPoolBf16MklDNNOpGrad):
163+
def init_pool_type(self):
164+
self.pool_type = "max"
165+
self.pool2D_forward_naive = max_pool2D_forward_naive
166+
167+
168+
class TestCase1PadZeroExclusiveMaxGrad(TestCase1PadZeroExclusiveAvgGrad):
169+
def init_pool_type(self):
170+
self.pool_type = "max"
171+
self.pool2D_forward_naive = max_pool2D_forward_naive
172+
173+
174+
class TestCase2PadOneNonExclusiveMaxGrad(TestCase2PadOneNonExclusiveAvgGrad):
175+
def init_pool_type(self):
176+
self.pool_type = "max"
177+
self.pool2D_forward_naive = max_pool2D_forward_naive
178+
179+
98180
if __name__ == "__main__":
99181
enable_static()
100182
unittest.main()

python/paddle/fluid/tests/unittests/test_pool2d_op.py

+6-2
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
import numpy as np
2020

2121
import paddle.fluid.core as core
22-
from op_test import OpTest
22+
from paddle.fluid.tests.unittests.op_test import OpTest
2323
import paddle.fluid as fluid
2424
from paddle.fluid import Program, program_guard
2525

@@ -252,7 +252,7 @@ def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
252252
return out
253253

254254

255-
class TestPool2D_Op(OpTest):
255+
class TestPool2D_Op_Mixin(object):
256256
def setUp(self):
257257
self.op_type = "pool2d"
258258
self.use_cudnn = False
@@ -363,6 +363,10 @@ def init_adaptive(self):
363363
self.adaptive = False
364364

365365

366+
class TestPool2D_Op(TestPool2D_Op_Mixin, OpTest):
367+
pass
368+
369+
366370
class TestCase1(TestPool2D_Op):
367371
def init_test_case(self):
368372
self.ksize = [3, 3]

0 commit comments

Comments
 (0)