Skip to content

Commit 3cd7b32

Browse files
Support gemma 12B with quant weights. (Comfy-Org#11696)
1 parent c0c9720 commit 3cd7b32

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

comfy/text_encoders/lt.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,10 @@ def __init__(self, embedding_directory=None, tokenizer_data={}):
3636

3737
class Gemma3_12BModel(sd1_clip.SDClipModel):
3838
def __init__(self, device="cpu", layer="all", layer_idx=None, dtype=None, attention_mask=True, model_options={}):
39-
llama_scaled_fp8 = model_options.get("gemma_scaled_fp8", None)
40-
if llama_scaled_fp8 is not None:
39+
llama_quantization_metadata = model_options.get("llama_quantization_metadata", None)
40+
if llama_quantization_metadata is not None:
4141
model_options = model_options.copy()
42-
model_options["scaled_fp8"] = llama_scaled_fp8
42+
model_options["quantization_metadata"] = llama_quantization_metadata
4343

4444
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 2, "pad": 0}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Gemma3_12B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
4545

@@ -119,12 +119,12 @@ def load_sd(self, sd):
119119
return self.load_state_dict(sdo, strict=False)
120120

121121

122-
def ltxav_te(dtype_llama=None, llama_scaled_fp8=None):
122+
def ltxav_te(dtype_llama=None, llama_quantization_metadata=None):
123123
class LTXAVTEModel_(LTXAVTEModel):
124124
def __init__(self, device="cpu", dtype=None, model_options={}):
125-
if llama_scaled_fp8 is not None and "llama_scaled_fp8" not in model_options:
125+
if llama_quantization_metadata is not None:
126126
model_options = model_options.copy()
127-
model_options["llama_scaled_fp8"] = llama_scaled_fp8
127+
model_options["llama_quantization_metadata"] = llama_quantization_metadata
128128
if dtype_llama is not None:
129129
dtype = dtype_llama
130130
super().__init__(dtype_llama=dtype_llama, device=device, dtype=dtype, model_options=model_options)

0 commit comments

Comments
 (0)