Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/transformers/integrations/ggml.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,8 @@
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"attention.sliding_window": "sliding_window",
"attn_logit_softcapping": "attn_logit_softcapping",
"final_logit_softcapping": "final_logit_softcapping",
"vocab_size": "vocab_size",
},
"gemma3": {
Expand All @@ -261,6 +263,8 @@
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"attention.sliding_window": "sliding_window",
"attn_logit_softcapping": "attn_logit_softcapping",
"final_logit_softcapping": "final_logit_softcapping",
"vocab_size": "vocab_size",
},
"umt5": {
Expand Down
25 changes: 25 additions & 0 deletions tests/quantization/ggml/test_ggml.py
Original file line number Diff line number Diff line change
Expand Up @@ -1054,6 +1054,31 @@ def test_deci_architecture_mapping(self):
self.assertEqual(GGUF_TO_FAST_CONVERTERS["deci"], GGUFLlamaConverter)
self.assertEqual(GGUF_TO_FAST_CONVERTERS["decilm"], GGUFLlamaConverter)

def test_gemma_softcap_config_mapping(self):
"""Test that Gemma2/Gemma3 GGUF config mapping includes softcapping parameters.

Gemma2 and Gemma3 models use attention logit softcapping for numerical stability.
These values are stored in GGUF metadata as 'attn_logit_softcapping' and
'final_logit_softcapping', and must be mapped to the HuggingFace config.
"""
from transformers.integrations.ggml import GGUF_CONFIG_MAPPING

# Test Gemma2
self.assertIn("gemma2", GGUF_CONFIG_MAPPING)
gemma2_mapping = GGUF_CONFIG_MAPPING["gemma2"]
self.assertIn("attn_logit_softcapping", gemma2_mapping)
self.assertEqual(gemma2_mapping["attn_logit_softcapping"], "attn_logit_softcapping")
self.assertIn("final_logit_softcapping", gemma2_mapping)
self.assertEqual(gemma2_mapping["final_logit_softcapping"], "final_logit_softcapping")

# Test Gemma3
self.assertIn("gemma3", GGUF_CONFIG_MAPPING)
gemma3_mapping = GGUF_CONFIG_MAPPING["gemma3"]
self.assertIn("attn_logit_softcapping", gemma3_mapping)
self.assertEqual(gemma3_mapping["attn_logit_softcapping"], "attn_logit_softcapping")
self.assertIn("final_logit_softcapping", gemma3_mapping)
self.assertEqual(gemma3_mapping["final_logit_softcapping"], "final_logit_softcapping")

@require_read_token
@unittest.skipUnless(is_gguf_available("0.16.0"), "test requires gguf version >= 0.16.0")
def test_qwen3_q8_0(self):
Expand Down