@@ -43,7 +43,7 @@ def setUp(self):
43
43
self .init_group ()
44
44
self .init_dilation ()
45
45
self .init_test_case ()
46
- self .init_fuse_relu ()
46
+ self .init_fuse_activation ()
47
47
self .init_fuse_residual ()
48
48
self .init_data_type ()
49
49
@@ -54,7 +54,9 @@ def setUp(self):
54
54
}
55
55
# This implementation of convolution quantization is based on OneDNN documentation
56
56
# https://oneapi-src.github.io/oneDNN/dev_guide_int8_computations.html#doxid-dev-guide-int8-computations-1dg-i8-comp-s11
57
- scale_output_shift = (self .scale_out /
57
+ inner_scale = 1. if self .fuse_activation != "" else self .scale_out
58
+ activation_scale = self .scale_out if self .fuse_activation != "" else 1.
59
+ scale_output_shift = (inner_scale /
58
60
(self .scale_in * self .scale_weights [0 ]))
59
61
filter = np .random .random (self .filter_size ).astype (self .weighttype )
60
62
@@ -78,7 +80,7 @@ def residual_helper(init_low, init_high, output_):
78
80
init_low , init_high ,
79
81
self .input_residual_size ).astype (self .srctype )
80
82
return (output_ + input_residual_ *
81
- (self . scale_out / self .scale_in_eltwise )), input_residual_
83
+ (inner_scale / self .scale_in_eltwise )), input_residual_
82
84
83
85
if self .srctype == np .int8 :
84
86
init_low , init_high = (- 5 , 5 )
@@ -101,12 +103,24 @@ def residual_helper(init_low, init_high, output_):
101
103
output , input_residual = residual_helper (init_low , init_high ,
102
104
output )
103
105
104
- output = np .round (output )
105
-
106
- if self .fuse_activation == "relu" :
107
- output = np .maximum (output , 0 )
106
+ if self .fuse_activation == "" :
107
+ pass
108
+ elif self .fuse_activation == "relu" :
109
+ output = activation_scale * np .maximum (output , 0 )
110
+ elif self .fuse_activation == "hard_swish" :
111
+ output = activation_scale * output / 6. * np .minimum (
112
+ np .maximum (0 , output + 3. ), 6 )
113
+ elif self .fuse_activation == "relu6" :
114
+ output = activation_scale * np .maximum (0 , np .minimum (6 , output ))
115
+ elif self .fuse_activation == "swish" :
116
+ output = activation_scale * output / (1. + np .exp (- 1. * output ))
117
+ elif self .fuse_activation == "leaky_relu" :
118
+ output = activation_scale * np .maximum (output , 0.02 * output )
119
+ else :
120
+ raise NotImplementedError ("test for " + self .fuse_activation +
121
+ " activation not implemented" )
108
122
109
- output = output .astype (self .dsttype )
123
+ output = np . round ( output ) .astype (self .dsttype )
110
124
111
125
self .inputs = {
112
126
'Input' :
@@ -131,6 +145,8 @@ def residual_helper(init_low, init_high, output_):
131
145
'Scale_weights' : self .scale_weights ,
132
146
'Scale_in_eltwise' : self .scale_in_eltwise ,
133
147
'fuse_activation' : self .fuse_activation ,
148
+ 'fuse_alpha' : self .fuse_alpha ,
149
+ 'fuse_beta' : self .fuse_beta ,
134
150
'fuse_residual_connection' : self .fuse_residual ,
135
151
'mkldnn_data_type' : self .mkldnn_data_type
136
152
}
@@ -165,8 +181,10 @@ def init_data_type(self):
165
181
self .srctype = np .uint8
166
182
self .dsttype = np .int8
167
183
168
- def init_fuse_relu (self ):
184
+ def init_fuse_activation (self ):
169
185
self .fuse_activation = "relu"
186
+ self .fuse_alpha = 0
187
+ self .fuse_beta = 0
170
188
171
189
def init_fuse_residual (self ):
172
190
self .fuse_residual = True
@@ -190,6 +208,34 @@ def init_test_case(self):
190
208
self .scale_in_eltwise = 0.6
191
209
192
210
211
+ class TestWithHardSwish (TestConv2D ):
212
+ def init_fuse_activation (self ):
213
+ self .fuse_activation = "hard_swish"
214
+ self .fuse_alpha = 0
215
+ self .fuse_beta = 0
216
+
217
+
218
+ class TestWithRelu6 (TestConv2D ):
219
+ def init_fuse_activation (self ):
220
+ self .fuse_activation = "relu6"
221
+ self .fuse_alpha = 6
222
+ self .fuse_beta = 0
223
+
224
+
225
+ class TestWithSwish (TestConv2D ):
226
+ def init_fuse_activation (self ):
227
+ self .fuse_activation = "swish"
228
+ self .fuse_alpha = 1
229
+ self .fuse_beta = 0
230
+
231
+
232
+ class TestWithLeakyRelu (TestConv2D ):
233
+ def init_fuse_activation (self ):
234
+ self .fuse_activation = "leaky_relu"
235
+ self .fuse_alpha = 0.02
236
+ self .fuse_beta = 0
237
+
238
+
193
239
class TestWithPad (TestConv2D ):
194
240
def init_test_case (self ):
195
241
TestConv2D .init_test_case (self )
0 commit comments