Skip to content

Commit 307a338

Browse files
committed
Fix: condition for run_compressed
1 parent 844368c commit 307a338

File tree

1 file changed

+6
-0
lines changed

1 file changed

+6
-0
lines changed

src/transformers/quantizers/quantizer_compressed_tensors.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,12 @@ def _process_model_before_weight_loading(self, model, **kwargs):
123123
" and not for sparsified models. Setting `run_compressed=False`"
124124
)
125125
self.run_compressed = False
126+
elif self.is_quantized and not self.is_quantization_compressed:
127+
logger.warn(
128+
"`run_compressed` is only supported for compressed models."
129+
"Setting `run_compressed=False`"
130+
)
131+
self.run_compressed = False
126132
apply_quantization_config(model, ct_quantization_config, run_compressed=self.run_compressed)
127133
elif self.is_quantized and not self.is_quantization_compressed:
128134
apply_quantization_config(model, ct_quantization_config)

0 commit comments

Comments
 (0)