|
15 | 15 | from __future__ import print_function
|
16 | 16 |
|
17 | 17 | import unittest
|
18 |
| -import os |
19 | 18 | import numpy as np
|
20 | 19 | import paddle.fluid.core as core
|
21 |
| -from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16 |
22 |
| -from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive |
| 20 | +from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 |
| 21 | +from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op_Mixin, max_pool2D_forward_naive |
| 22 | +from paddle.fluid.tests.unittests.npu.test_pool2d_op_npu import pool2d_backward_navie as pool2d_backward_naive |
23 | 23 | from paddle import enable_static
|
24 | 24 |
|
25 | 25 |
|
26 |
| -@unittest.skipIf(not core.supports_bfloat16(), |
27 |
| - "place does not support BF16 evaluation") |
28 |
| -class TestPoolBf16MklDNNOp(TestPool2D_Op): |
| 26 | +@OpTestTool.skip_if_not_cpu_bf16() |
| 27 | +class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest): |
| 28 | + def init_kernel_type(self): |
| 29 | + self.use_mkldnn = True |
| 30 | + |
| 31 | + def init_data_type(self): |
| 32 | + self.dtype = np.uint16 |
| 33 | + |
| 34 | + def setUp(self): |
| 35 | + super(TestPoolBf16MklDNNOpGrad, self).setUp() |
| 36 | + self.attrs['mkldnn_data_type'] = "bfloat16" |
| 37 | + self.x_fp32 = np.random.random(self.shape).astype(np.float32) |
| 38 | + |
| 39 | + output = self.pool2D_forward_naive( |
| 40 | + self.x_fp32, self.ksize, self.strides, self.paddings, |
| 41 | + self.global_pool, self.ceil_mode, self.exclusive, self.adaptive, |
| 42 | + "float32").astype(np.float32) |
| 43 | + |
| 44 | + self.inputs = {'X': convert_float_to_uint16(self.x_fp32)} |
| 45 | + self.outputs = {'Out': convert_float_to_uint16(output)} |
| 46 | + |
| 47 | + def test_check_output(self): |
| 48 | + self.check_output_with_place(core.CPUPlace()) |
| 49 | + |
| 50 | + def test_check_grad(self): |
| 51 | + x_grad = pool2d_backward_naive( |
| 52 | + self.x_fp32, |
| 53 | + ksize=self.ksize, |
| 54 | + strides=self.strides, |
| 55 | + paddings=self.paddings, |
| 56 | + global_pool=self.global_pool, |
| 57 | + ceil_mode=False, |
| 58 | + exclusive=self.exclusive, |
| 59 | + adaptive=self.adaptive, |
| 60 | + data_format=self.data_format, |
| 61 | + pool_type=self.pool_type, |
| 62 | + padding_algorithm=self.padding_algorithm) |
| 63 | + x_grad = x_grad / np.prod(self.outputs['Out'].shape) |
| 64 | + self.check_grad_with_place( |
| 65 | + core.CPUPlace(), set(['X']), 'Out', user_defined_grads=[x_grad]) |
| 66 | + |
| 67 | + |
| 68 | +@OpTestTool.skip_if_not_cpu_bf16() |
| 69 | +class TestPoolBf16MklDNNOp(TestPool2D_Op_Mixin, OpTest): |
29 | 70 | def init_kernel_type(self):
|
30 | 71 | self.use_mkldnn = True
|
31 | 72 |
|
32 | 73 | def setUp(self):
|
33 |
| - TestPool2D_Op.setUp(self) |
| 74 | + TestPool2D_Op_Mixin.setUp(self) |
34 | 75 | self.dtype = np.uint16
|
35 | 76 |
|
36 | 77 | input = np.random.random(self.shape).astype(np.float32)
|
@@ -95,6 +136,47 @@ def init_pool_type(self):
|
95 | 136 | self.pool2D_forward_naive = max_pool2D_forward_naive
|
96 | 137 |
|
97 | 138 |
|
| 139 | +class TestCase1PadZeroExclusiveAvgGrad(TestPoolBf16MklDNNOpGrad): |
| 140 | + def init_test_case(self): |
| 141 | + self.ksize = [3, 3] |
| 142 | + self.strides = [1, 1] |
| 143 | + |
| 144 | + def init_shape(self): |
| 145 | + self.shape = [2, 3, 7, 7] |
| 146 | + |
| 147 | + def init_paddings(self): |
| 148 | + self.paddings = [0, 0] |
| 149 | + |
| 150 | + def init_global_pool(self): |
| 151 | + self.global_pool = False |
| 152 | + |
| 153 | + def init_exclusive(self): |
| 154 | + self.exclusive = True |
| 155 | + |
| 156 | + |
| 157 | +class TestCase2PadOneNonExclusiveAvgGrad(TestCase1PadZeroExclusiveAvgGrad): |
| 158 | + def init_exclusive(self): |
| 159 | + self.exclusive = False |
| 160 | + |
| 161 | + |
| 162 | +class TestCase0InitialMaxGrad(TestPoolBf16MklDNNOpGrad): |
| 163 | + def init_pool_type(self): |
| 164 | + self.pool_type = "max" |
| 165 | + self.pool2D_forward_naive = max_pool2D_forward_naive |
| 166 | + |
| 167 | + |
| 168 | +class TestCase1PadZeroExclusiveMaxGrad(TestCase1PadZeroExclusiveAvgGrad): |
| 169 | + def init_pool_type(self): |
| 170 | + self.pool_type = "max" |
| 171 | + self.pool2D_forward_naive = max_pool2D_forward_naive |
| 172 | + |
| 173 | + |
| 174 | +class TestCase2PadOneNonExclusiveMaxGrad(TestCase2PadOneNonExclusiveAvgGrad): |
| 175 | + def init_pool_type(self): |
| 176 | + self.pool_type = "max" |
| 177 | + self.pool2D_forward_naive = max_pool2D_forward_naive |
| 178 | + |
| 179 | + |
98 | 180 | if __name__ == "__main__":
|
99 | 181 | enable_static()
|
100 | 182 | unittest.main()
|
0 commit comments