1+ from qkeras import quantized_bits
2+ from qkeras import QDense , QActivation , QConv2D
3+ from tensorflow .keras .models import Sequential
4+ from tensorflow .keras .layers import Dense , Conv2D , MaxPooling2D , Softmax , BatchNormalization , ReLU , Flatten , AveragePooling2D
5+ from hls4ml .optimization .keras .reduction import reduce_model
6+ from hls4ml .optimization .keras .utils import get_model_sparsity
7+
8+ '''
9+ Set some neurons / filters to zero and verify that these are removed
10+ Even is some neurons (columns) in the output layer are zero, these should not be removed (to match data set labels)
11+ Test verify the above property, by setting some zeros in the last layer and verifying these remain in place
12+ '''
13+
14+ def test_keras_model_reduction ():
15+ model = Sequential ()
16+ model .add (Conv2D (8 , (3 , 3 ), input_shape = (64 , 64 , 1 ), name = 'conv2d_1' , padding = 'same' ))
17+ model .add (MaxPooling2D ())
18+ model .add (BatchNormalization ())
19+ model .add (ReLU ())
20+ model .add (Conv2D (32 , (5 , 5 ), padding = 'same' , name = 'conv2d_2' ))
21+ model .add (AveragePooling2D ())
22+ model .add (BatchNormalization ())
23+ model .add (ReLU ())
24+ model .add (Flatten ())
25+ model .add (Dense (32 , input_shape = (16 , ), name = 'dense_1' , activation = 'relu' ))
26+ model .add (BatchNormalization ())
27+ model .add (Dense (14 , name = 'dense_2' , activation = 'relu' ))
28+ model .add (BatchNormalization ())
29+ model .add (Dense (5 , name = 'dense_3' ))
30+ model .add (Softmax ())
31+
32+ indices = {
33+ 'conv2d_1' : [2 , 4 , 7 ],
34+ 'conv2d_2' : [0 , 1 , 2 , 3 , 4 , 5 ],
35+ 'dense_1' : [0 , 5 , 17 , 28 ],
36+ 'dense_2' : [1 , 9 , 4 ],
37+ 'dense_3' : [3 ],
38+ }
39+ for layer in model .layers :
40+ if isinstance (layer , Dense ):
41+ weights = layer .get_weights ()
42+ weights [0 ][:, indices [layer .name ]] = 0
43+ layer .set_weights (weights )
44+ if isinstance (layer , Conv2D ):
45+ weights = layer .get_weights ()
46+ weights [0 ][:, :, :, indices [layer .name ]] = 0
47+ layer .set_weights (weights )
48+
49+ sparsity , _ = get_model_sparsity (model )
50+ assert (sparsity > 0 )
51+
52+ reduced = reduce_model (model )
53+ assert (reduced .get_layer ('conv2d_1' ).get_weights ()[0 ].shape == (3 , 3 , 1 , 5 ))
54+ assert (reduced .get_layer ('conv2d_2' ).get_weights ()[0 ].shape == (5 , 5 , 5 , 26 ))
55+ assert (reduced .get_layer ('dense_1' ).get_weights ()[0 ].shape == (6656 , 28 ))
56+ assert (reduced .get_layer ('dense_2' ).get_weights ()[0 ].shape == (28 , 11 ))
57+ assert (reduced .get_layer ('dense_3' ).get_weights ()[0 ].shape == (11 , 5 ))
58+
59+ _ , layer_sparsity = get_model_sparsity (reduced )
60+ assert (layer_sparsity ['conv2d_1' ] == 0 )
61+ assert (layer_sparsity ['conv2d_2' ] == 0 )
62+ assert (layer_sparsity ['dense_1' ] == 0 )
63+ assert (layer_sparsity ['dense_2' ] == 0 )
64+ assert (layer_sparsity ['dense_3' ] > 0 )
65+
66+ def test_qkeras_model_reduction ():
67+ bits = 8
68+ activation = 'quantized_relu(4)'
69+ quantizer = quantized_bits (bits , 0 )
70+
71+ model = Sequential ()
72+ model .add (QConv2D (8 , (3 , 3 ), input_shape = (64 , 64 , 1 ), name = 'qconv2d_1' , padding = 'same' , kernel_quantizer = quantizer ))
73+ model .add (MaxPooling2D ())
74+ model .add (BatchNormalization ())
75+ model .add (QActivation (activation , name = 'qrelu_1' ))
76+ model .add (QConv2D (32 , (5 , 5 ), padding = 'same' , name = 'qconv2d_2' , kernel_quantizer = quantizer ))
77+ model .add (AveragePooling2D ())
78+ model .add (BatchNormalization ())
79+ model .add (QActivation (activation , name = 'qrelu_2' ))
80+ model .add (Flatten ())
81+ model .add (QDense (32 , input_shape = (16 , ), name = 'qdense_1' , kernel_quantizer = quantizer ))
82+ model .add (QActivation (activation , name = 'qrelu_3' ))
83+ model .add (BatchNormalization ())
84+ model .add (QDense (14 , name = 'qdense_2' , kernel_quantizer = quantizer ))
85+ model .add (QActivation (activation , name = 'qrelu_4' ))
86+ model .add (BatchNormalization ())
87+ model .add (QDense (5 , name = 'qdense_3' , kernel_quantizer = quantizer ))
88+ model .add (Softmax ())
89+
90+ indices = {
91+ 'qconv2d_1' : [2 , 4 , 7 ],
92+ 'qconv2d_2' : [0 , 1 , 2 , 3 , 4 , 5 ],
93+ 'qdense_1' : [0 , 5 , 17 , 28 ],
94+ 'qdense_2' : [1 , 9 , 4 ],
95+ 'qdense_3' : [3 ],
96+ }
97+ for layer in model .layers :
98+ if isinstance (layer , QDense ):
99+ weights = layer .get_weights ()
100+ weights [0 ][:, indices [layer .name ]] = 0
101+ layer .set_weights (weights )
102+ if isinstance (layer , QConv2D ):
103+ weights = layer .get_weights ()
104+ weights [0 ][:, :, :, indices [layer .name ]] = 0
105+ layer .set_weights (weights )
106+
107+ sparsity , _ = get_model_sparsity (model )
108+ assert (sparsity > 0 )
109+
110+ reduced = reduce_model (model )
111+ assert (reduced .get_layer ('qconv2d_1' ).get_weights ()[0 ].shape == (3 , 3 , 1 , 5 ))
112+ assert (reduced .get_layer ('qconv2d_2' ).get_weights ()[0 ].shape == (5 , 5 , 5 , 26 ))
113+ assert (reduced .get_layer ('qdense_1' ).get_weights ()[0 ].shape == (6656 , 28 ))
114+ assert (reduced .get_layer ('qdense_2' ).get_weights ()[0 ].shape == (28 , 11 ))
115+ assert (reduced .get_layer ('qdense_3' ).get_weights ()[0 ].shape == (11 , 5 ))
116+
117+ _ , layer_sparsity = get_model_sparsity (reduced )
118+ assert (layer_sparsity ['qconv2d_1' ] == 0 )
119+ assert (layer_sparsity ['qconv2d_2' ] == 0 )
120+ assert (layer_sparsity ['qdense_1' ] == 0 )
121+ assert (layer_sparsity ['qdense_2' ] == 0 )
122+ assert (layer_sparsity ['qdense_3' ] > 0 )
123+
124+ # Verify network surgery has no impact on quantization
125+ assert (isinstance (reduced .get_layer ('qrelu_1' ), QActivation ))
126+ assert (isinstance (reduced .get_layer ('qrelu_2' ), QActivation ))
127+ assert (isinstance (reduced .get_layer ('qrelu_3' ), QActivation ))
128+ assert (isinstance (reduced .get_layer ('qrelu_4' ), QActivation ))
129+ assert (reduced .get_layer ('qconv2d_1' ).kernel_quantizer ['config' ]['bits' ] == bits )
130+ assert (reduced .get_layer ('qconv2d_2' ).kernel_quantizer ['config' ]['bits' ] == bits )
131+ assert (reduced .get_layer ('qdense_1' ).kernel_quantizer ['config' ]['bits' ] == bits )
132+ assert (reduced .get_layer ('qdense_2' ).kernel_quantizer ['config' ]['bits' ] == bits )
133+ assert (reduced .get_layer ('qdense_3' ).kernel_quantizer ['config' ]['bits' ] == bits )
134+
0 commit comments