@@ -118,13 +118,13 @@ def pack_qact_layer(name, model):
118118
119119 QuantLinear = auto_round .export .export_to_autoround .qlinear_triton_act .QuantLinear
120120
121- if isinstance (layer , nn .Linear ) :
121+ if type (layer ) == nn .Linear :
122122 in_features = layer .in_features
123123 out_features = layer .out_features
124- elif isinstance (layer , nn .Conv2d ) :
124+ elif type (layer ) == nn .Conv2d :
125125 in_features = layer .in_channels
126126 out_features = layer .out_channels
127- elif isinstance (layer , transformers .pytorch_utils .Conv1D ) :
127+ elif type (layer ) == transformers .pytorch_utils .Conv1D :
128128 in_features = layer .weight .shape [0 ]
129129 out_features = layer .weight .shape [1 ]
130130 bias = layer .bias is not None
@@ -181,7 +181,7 @@ def pack_layer(layer_name, model, backend, device=None):
181181 if hasattr (layer , "orig_layer" ):
182182 layer = layer .orig_layer
183183
184- if not isinstance (layer , SUPPORTED_LAYER_TYPES ) : ##already packed
184+ if type (layer ) not in SUPPORTED_LAYER_TYPES : ##already packed
185185 return
186186
187187 if int (layer .act_bits ) <= 8 :
@@ -200,13 +200,13 @@ def pack_layer(layer_name, model, backend, device=None):
200200 zp = layer .zp
201201 QuantLinear = dynamic_import_quant_linear_for_packing (backend , bits , group_size , sym , act_bits )
202202
203- if isinstance (layer , nn .Linear ) :
203+ if type (layer ) == nn .Linear :
204204 in_features = layer .in_features
205205 out_features = layer .out_features
206- elif isinstance (layer , nn .Conv2d ) :
206+ elif type (layer ) == nn .Conv2d :
207207 in_features = layer .in_channels
208208 out_features = layer .out_channels
209- elif isinstance (layer , transformers .pytorch_utils .Conv1D ) :
209+ elif type (layer ) == transformers .pytorch_utils .Conv1D :
210210 in_features = layer .weight .shape [0 ]
211211 out_features = layer .weight .shape [1 ]
212212 bias = layer .bias is not None
0 commit comments