Skip to content

Commit 7c1441d

Browse files
committed
add unit test for conv+hard_swish
1 parent a3abf4a commit 7c1441d

File tree

3 files changed

+57
-10
lines changed

3 files changed

+57
-10
lines changed

paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,4 +42,4 @@ class FuseFCActOneDNNPass : public FusePassBase {
4242

4343
} // namespace ir
4444
} // namespace framework
45-
} // namespace paddlea
45+
} // namespace paddle

python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -425,6 +425,7 @@ def _optimize_fp32_graph(self, graph):
425425
graph = self._apply_pass(graph, 'conv_elementwise_add_mkldnn_fuse_pass')
426426
graph = self._apply_pass(graph, 'conv_relu_mkldnn_fuse_pass')
427427
graph = self._apply_pass(graph, 'conv_relu6_mkldnn_fuse_pass')
428+
graph = self._apply_pass(graph, 'conv_hard_swish_mkldnn_fuse_pass')
428429
graph = self._apply_pass(graph, 'fc_fuse_pass',
429430
['use_gpu', 'use_fc_padding'], [False, False])
430431
graph = self._apply_pass(graph, 'repeated_fc_relu_fuse_pass')

python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py

Lines changed: 55 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def setUp(self):
4343
self.init_group()
4444
self.init_dilation()
4545
self.init_test_case()
46-
self.init_fuse_relu()
46+
self.init_fuse_activation()
4747
self.init_fuse_residual()
4848
self.init_data_type()
4949

@@ -54,7 +54,9 @@ def setUp(self):
5454
}
5555
# This implementation of convolution quantization is based on OneDNN documentation
5656
# https://oneapi-src.github.io/oneDNN/dev_guide_int8_computations.html#doxid-dev-guide-int8-computations-1dg-i8-comp-s11
57-
scale_output_shift = (self.scale_out /
57+
inner_scale = 1. if self.fuse_activation != "" else self.scale_out
58+
activation_scale = self.scale_out if self.fuse_activation != "" else 1.
59+
scale_output_shift = (inner_scale /
5860
(self.scale_in * self.scale_weights[0]))
5961
filter = np.random.random(self.filter_size).astype(self.weighttype)
6062

@@ -78,7 +80,7 @@ def residual_helper(init_low, init_high, output_):
7880
init_low, init_high,
7981
self.input_residual_size).astype(self.srctype)
8082
return (output_ + input_residual_ *
81-
(self.scale_out / self.scale_in_eltwise)), input_residual_
83+
(inner_scale / self.scale_in_eltwise)), input_residual_
8284

8385
if self.srctype == np.int8:
8486
init_low, init_high = (-5, 5)
@@ -101,12 +103,24 @@ def residual_helper(init_low, init_high, output_):
101103
output, input_residual = residual_helper(init_low, init_high,
102104
output)
103105

104-
output = np.round(output)
105-
106-
if self.fuse_activation == "relu":
107-
output = np.maximum(output, 0)
106+
if self.fuse_activation == "":
107+
pass
108+
elif self.fuse_activation == "relu":
109+
output = activation_scale * np.maximum(output, 0)
110+
elif self.fuse_activation == "hard_swish":
111+
output = activation_scale * output / 6. * np.minimum(
112+
np.maximum(0, output + 3.), 6)
113+
elif self.fuse_activation == "relu6":
114+
output = activation_scale * np.maximum(0, np.minimum(6, output))
115+
elif self.fuse_activation == "swish":
116+
output = activation_scale * output / (1. + np.exp(-1. * output))
117+
elif self.fuse_activation == "leaky_relu":
118+
output = activation_scale * np.maximum(output, 0.02 * output)
119+
else:
120+
raise NotImplementedError("test for " + self.fuse_activation +
121+
" activation not implemented")
108122

109-
output = output.astype(self.dsttype)
123+
output = np.round(output).astype(self.dsttype)
110124

111125
self.inputs = {
112126
'Input':
@@ -131,6 +145,8 @@ def residual_helper(init_low, init_high, output_):
131145
'Scale_weights': self.scale_weights,
132146
'Scale_in_eltwise': self.scale_in_eltwise,
133147
'fuse_activation': self.fuse_activation,
148+
'fuse_alpha': self.fuse_alpha,
149+
'fuse_beta': self.fuse_beta,
134150
'fuse_residual_connection': self.fuse_residual,
135151
'mkldnn_data_type': self.mkldnn_data_type
136152
}
@@ -165,8 +181,10 @@ def init_data_type(self):
165181
self.srctype = np.uint8
166182
self.dsttype = np.int8
167183

168-
def init_fuse_relu(self):
184+
def init_fuse_activation(self):
169185
self.fuse_activation = "relu"
186+
self.fuse_alpha = 0
187+
self.fuse_beta = 0
170188

171189
def init_fuse_residual(self):
172190
self.fuse_residual = True
@@ -190,6 +208,34 @@ def init_test_case(self):
190208
self.scale_in_eltwise = 0.6
191209

192210

211+
class TestWithHardSwish(TestConv2D):
212+
def init_fuse_activation(self):
213+
self.fuse_activation = "hard_swish"
214+
self.fuse_alpha = 0
215+
self.fuse_beta = 0
216+
217+
218+
class TestWithRelu6(TestConv2D):
219+
def init_fuse_activation(self):
220+
self.fuse_activation = "relu6"
221+
self.fuse_alpha = 6
222+
self.fuse_beta = 0
223+
224+
225+
class TestWithSwish(TestConv2D):
226+
def init_fuse_activation(self):
227+
self.fuse_activation = "swish"
228+
self.fuse_alpha = 1
229+
self.fuse_beta = 0
230+
231+
232+
class TestWithLeakyRelu(TestConv2D):
233+
def init_fuse_activation(self):
234+
self.fuse_activation = "leaky_relu"
235+
self.fuse_alpha = 0.02
236+
self.fuse_beta = 0
237+
238+
193239
class TestWithPad(TestConv2D):
194240
def init_test_case(self):
195241
TestConv2D.init_test_case(self)

0 commit comments

Comments
 (0)