@@ -254,6 +254,14 @@ def load(self):
254
254
self ._model = FluxKontextPipeline .from_pretrained (
255
255
self ._model_path , ** self ._kwargs
256
256
)
257
+ elif "qwen" in self ._model_spec .model_name .lower ():
258
+ # TODO: remove this branch when auto pipeline supports
259
+ # Qwen-Image
260
+ from diffusers import DiffusionPipeline
261
+
262
+ self ._model = DiffusionPipeline .from_pretrained (
263
+ self ._model_path , ** self ._kwargs
264
+ )
257
265
else :
258
266
raise
259
267
self ._load_to_device (self ._model )
@@ -348,11 +356,19 @@ def _quantize_text_encoder(self, quantize_text_encoder: Optional[str]):
348
356
return
349
357
350
358
if not quantize_text_encoder :
359
+ logger .debug ("No text encoder quantization" )
351
360
return
352
361
353
362
quantization_method = self ._kwargs .pop ("text_encoder_quantize_method" , "bnb" )
354
363
quantization = self ._kwargs .pop ("text_encoder_quantization" , "8-bit" )
355
364
365
+ logger .debug (
366
+ "Quantize text encoder %s with method %s, quantization %s" ,
367
+ quantize_text_encoder ,
368
+ quantization_method ,
369
+ quantization ,
370
+ )
371
+
356
372
torch_dtype = self ._torch_dtype
357
373
for text_encoder_name in quantize_text_encoder .split ("," ):
358
374
quantization_kwargs : Dict [str , Any ] = {}
@@ -389,8 +405,13 @@ def _quantize_transformer(self):
389
405
390
406
if not quantization :
391
407
# skip if no quantization specified
408
+ logger .debug ("No transformer quantization" )
392
409
return
393
410
411
+ logger .debug (
412
+ "Quantize transformer with %s, quantization %s" , method , quantization
413
+ )
414
+
394
415
torch_dtype = self ._torch_dtype
395
416
transformer_cls = self ._get_layer_cls ("transformer" )
396
417
quantization_config = self ._get_quantize_config (
@@ -409,6 +430,7 @@ def _quantize_transformer_gguf(self):
409
430
410
431
# GGUF transformer
411
432
torch_dtype = self ._torch_dtype
433
+ logger .debug ("Quantize transformer with gguf file %s" , self ._gguf_model_path )
412
434
self ._kwargs ["transformer" ] = self ._get_layer_cls (
413
435
"transformer"
414
436
).from_single_file (
0 commit comments