Skip to content

Commit 03cabe1

Browse files
authored
CLI - Specify GGML_TYPE to quantize for the main tensors. (#91)
To complement the token_embd.weight and output.weight : attn_v.weight attn_k.weight. attn_q_weight attn_output.weight attn_qkv.weight ffn_gate ffn_down ffn_up
1 parent 76b97c8 commit 03cabe1

File tree

3 files changed

+125
-13
lines changed

3 files changed

+125
-13
lines changed

examples/quantize/quantize.cpp

Lines changed: 69 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -109,19 +109,35 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
109109
//
110110
[[noreturn]]
111111
static void usage(const char * executable) {
112-
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
112+
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--ffn-gate-type] [--ffn-down-type] [--ffn-up-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
113113
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
114114
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
115115
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
116116
printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
117117
printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
118118
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
119-
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n");
120-
printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n");
121-
printf(" --keep-split: will generate quatized model in the same shards as input");
119+
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor.\n");
120+
printf(" --token-embedding-type ggml_type: use this ggml_type for the token_embd.weight tensor.\n\n");
121+
printf("Additional specific tensor quantization types used in the custom quant scheme 'CQS (default is Q2_K):\n");
122+
printf(" --attn-q-type ggml_type: use this ggml_type for the attn_q.weight tensor.\n");
123+
printf(" --attn-k-type ggml_type: use this ggml_type for the attn_k.weight tensor.\n");
124+
printf(" --attn-v-type ggml_type: use this ggml_type for the attn_v.weight tensor.\n");
125+
printf(" --attn-qkv-type ggml_type: use this ggml_type for the attn_qkv.weight tensor.\n");
126+
printf(" --attn-output-type ggml_type: use this ggml_type for the attn_output.weight tensor.\n");
127+
printf(" --ffn-gate-type ggml_type: use this ggml_type for the ffn_gate tensor.\n");
128+
printf(" --ffn-down-type ggml_type: use this ggml_type for the ffn_down tensor.\n");
129+
printf(" --ffn-up-type ggml_type: use this ggml_type for the ffn_up tensor.\n\n");
130+
printf(" --keep-split: will generate quantized model in the same shards as input\n");
122131
printf(" --override-kv KEY=TYPE:VALUE\n");
123-
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
132+
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n\n");
124133
printf("Note: --include-weights and --exclude-weights cannot be used together\n");
134+
printf("Note: The token embeddings tensor is loaded in system RAM, even in case of full GPU/VRAM offload.\n");
135+
printf("Note: The recommanded type for the output tensor is q6_K for the ffn types > iq3_xxs and < q8_0.\n\n");
136+
printf("Note for the Custom Quant Scheme FTYPE:\n");
137+
printf(" Write the specific tensor legacy quants as qN_N, the K-Quants as qN_K, the IQ-Quants as iqN_xx.\n");
138+
printf(" Usually, attn-q-type can be one type below the chosen ffn type, and attn-v-type should be one type above.\n");
139+
printf(" attn-qkv-type replaces the types attn-q, attn-k and attn-v on some models.\n");
140+
//TODO: - eventually - harmonize the CAPS writing of the FTYPEs, and non CAPS writing of the GGML_TYPEs.
125141
printf("\nAllowed quantization types:\n");
126142
for (auto & it : QUANT_OPTIONS) {
127143
if (it.name != "COPY") {
@@ -277,6 +293,54 @@ int main(int argc, char ** argv) {
277293
} else {
278294
usage(argv[0]);
279295
}
296+
} else if (strcmp(argv[arg_idx], "--attn-q-type") == 0) {
297+
if (arg_idx < argc-1) {
298+
params.attn_q_type = parse_ggml_type(argv[++arg_idx]);
299+
} else {
300+
usage(argv[0]);
301+
}
302+
} else if (strcmp(argv[arg_idx], "--attn-k-type") == 0) {
303+
if (arg_idx < argc-1) {
304+
params.attn_k_type = parse_ggml_type(argv[++arg_idx]);
305+
} else {
306+
usage(argv[0]);
307+
}
308+
} else if (strcmp(argv[arg_idx], "--attn-v-type") == 0) {
309+
if (arg_idx < argc-1) {
310+
params.attn_v_type = parse_ggml_type(argv[++arg_idx]);
311+
} else {
312+
usage(argv[0]);
313+
}
314+
} else if (strcmp(argv[arg_idx], "--attn-qkv-type") == 0) {
315+
if (arg_idx < argc-1) {
316+
params.attn_qkv_type = parse_ggml_type(argv[++arg_idx]);
317+
} else {
318+
usage(argv[0]);
319+
}
320+
} else if (strcmp(argv[arg_idx], "--attn-output-type") == 0) {
321+
if (arg_idx < argc-1) {
322+
params.attn_output_type = parse_ggml_type(argv[++arg_idx]);
323+
} else {
324+
usage(argv[0]);
325+
}
326+
} else if (strcmp(argv[arg_idx], "--ffn-gate-type") == 0) {
327+
if (arg_idx < argc-1) {
328+
params.ffn_gate_type = parse_ggml_type(argv[++arg_idx]);
329+
} else {
330+
usage(argv[0]);
331+
}
332+
} else if (strcmp(argv[arg_idx], "--ffn-down-type") == 0) {
333+
if (arg_idx < argc-1) {
334+
params.ffn_down_type = parse_ggml_type(argv[++arg_idx]);
335+
} else {
336+
usage(argv[0]);
337+
}
338+
} else if (strcmp(argv[arg_idx], "--ffn-up-type") == 0) {
339+
if (arg_idx < argc-1) {
340+
params.ffn_up_type = parse_ggml_type(argv[++arg_idx]);
341+
} else {
342+
usage(argv[0]);
343+
}
280344
} else if (strcmp(argv[arg_idx], "--override-kv") == 0) {
281345
if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) {
282346
usage(argv[0]);

include/llama.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -361,6 +361,14 @@ extern "C" {
361361
enum llama_ftype ftype; // quantize to this llama_ftype
362362
enum ggml_type output_tensor_type; // output tensor type
363363
enum ggml_type token_embedding_type; // token embeddings tensor type
364+
enum ggml_type attn_q_type; // attention query tensor type
365+
enum ggml_type attn_k_type; // attention key tensor type
366+
enum ggml_type attn_v_type; // attention value tensor type
367+
enum ggml_type attn_qkv_type; // attention query-key-value tensor type
368+
enum ggml_type attn_output_type; // attention output tensor type
369+
enum ggml_type ffn_gate_type; // feedforward network gate type
370+
enum ggml_type ffn_down_type; // feedforward network down type
371+
enum ggml_type ffn_up_type; // feedforward network up type
364372
bool allow_requantize; // allow quantizing non-f32/f16 tensors
365373
bool quantize_output_tensor; // quantize output.weight
366374
bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored

src/llama.cpp

Lines changed: 48 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15716,7 +15716,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1571615716
}
1571715717
}
1571815718
} else if (name.find("attn_v.weight") != std::string::npos) {
15719-
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
15719+
if (qs.params->attn_v_type < GGML_TYPE_COUNT) new_type = qs.params->attn_v_type;
15720+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
1572015721
new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
1572115722
}
1572215723
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_K) {
@@ -15775,7 +15776,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1577515776
}
1577615777
++qs.i_attention_wv;
1577715778
} else if (name.find("attn_k.weight") != std::string::npos) {
15778-
if (qs.model.hparams.n_expert == 8) {
15779+
if (qs.params->attn_k_type < GGML_TYPE_COUNT) new_type = qs.params->attn_k_type;
15780+
else if (qs.model.hparams.n_expert == 8) {
1577915781
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
1578015782
// TODO: explore better strategies
1578115783
new_type = GGML_TYPE_Q8_0;
@@ -15787,7 +15789,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1578715789
new_type = GGML_TYPE_IQ2_S;
1578815790
}
1578915791
} else if (name.find("attn_q.weight") != std::string::npos) {
15790-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
15792+
if (qs.params->attn_q_type < GGML_TYPE_COUNT) new_type = qs.params->attn_q_type;
15793+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
1579115794
new_type = GGML_TYPE_IQ3_XXS;
1579215795
}
1579315796
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
@@ -15796,7 +15799,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1579615799
} else if (name.find("ffn_down") != std::string::npos) {
1579715800
auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
1579815801
int i_layer = info.first, n_layer = info.second;
15799-
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
15802+
if (qs.params->ffn_down_type < GGML_TYPE_COUNT) new_type = qs.params->ffn_down_type;
15803+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
1580015804
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
1580115805
if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
1580215806
}
@@ -15843,7 +15847,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1584315847
}
1584415848
++qs.i_ffn_down;
1584515849
} else if (name.find("attn_output.weight") != std::string::npos) {
15846-
if (arch != LLM_ARCH_FALCON) {
15850+
if (qs.params->attn_output_type < GGML_TYPE_COUNT) new_type = qs.params->attn_output_type;
15851+
else if (arch != LLM_ARCH_FALCON) {
1584715852
if (qs.model.hparams.n_expert >= 8) {
1584815853
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
1584915854
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
@@ -15866,7 +15871,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1586615871
}
1586715872
}
1586815873
else if (name.find("attn_qkv.weight") != std::string::npos) {
15869-
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
15874+
if (qs.params->attn_qkv_type < GGML_TYPE_COUNT) new_type = qs.params->attn_qkv_type;
15875+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
1587015876
new_type = GGML_TYPE_Q4_K;
1587115877
}
1587215878
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M ) new_type = GGML_TYPE_IQ4_K;
@@ -15876,7 +15882,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1587615882
else if (name.find("ffn_gate") != std::string::npos) {
1587715883
auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
1587815884
int i_layer = info.first, n_layer = info.second;
15879-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
15885+
if (qs.params->ffn_gate_type < GGML_TYPE_COUNT) new_type = qs.params->ffn_gate_type;
15886+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
1588015887
new_type = GGML_TYPE_IQ3_XXS;
1588115888
}
1588215889
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_KL && use_more_bits(i_layer, n_layer)) {
@@ -15887,7 +15894,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1588715894
else if (name.find("ffn_up") != std::string::npos) {
1588815895
auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
1588915896
int i_layer = info.first, n_layer = info.second;
15890-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
15897+
if (qs.params->ffn_up_type < GGML_TYPE_COUNT) new_type = qs.params->ffn_up_type;
15898+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
1589115899
new_type = GGML_TYPE_IQ3_XXS;
1589215900
}
1589315901
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_KL && use_more_bits(i_layer, n_layer)) {
@@ -16323,6 +16331,30 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1632316331
if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
1632416332
new_type = params->output_tensor_type;
1632516333
}
16334+
if (params->attn_q_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_q.weight") == 0) {
16335+
new_type = params->attn_q_type;
16336+
}
16337+
if (params->attn_k_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_k.weight") == 0) {
16338+
new_type = params->attn_k_type;
16339+
}
16340+
if (params->attn_v_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_v.weight") == 0) {
16341+
new_type = params->attn_v_type;
16342+
}
16343+
if (params->attn_qkv_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_qkv.weight") == 0) {
16344+
new_type = params->attn_qkv_type;
16345+
}
16346+
if (params->attn_output_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_output.weight") == 0) {
16347+
new_type = params->attn_output_type;
16348+
}
16349+
if (params->ffn_gate_type < GGML_TYPE_COUNT && strcmp(tensor->name, "ffn_gate") == 0) {
16350+
new_type = params->ffn_gate_type;
16351+
}
16352+
if (params->ffn_down_type < GGML_TYPE_COUNT && strcmp(tensor->name, "ffn_down") == 0) {
16353+
new_type = params->ffn_down_type;
16354+
}
16355+
if (params->ffn_up_type < GGML_TYPE_COUNT && strcmp(tensor->name, "ffn_up") == 0) {
16356+
new_type = params->ffn_up_type;
16357+
}
1632616358

1632716359
// If we've decided to quantize to the same type the tensor is already
1632816360
// in then there's nothing to do.
@@ -16726,6 +16758,14 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
1672616758
/*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
1672716759
/*.output_tensor_type =*/ GGML_TYPE_COUNT,
1672816760
/*.token_embedding_type =*/ GGML_TYPE_COUNT,
16761+
/*.attn_q_type =*/ GGML_TYPE_COUNT,
16762+
/*.attn_k_type =*/ GGML_TYPE_COUNT,
16763+
/*.attn_v_type =*/ GGML_TYPE_COUNT,
16764+
/*.attn_qkv_type =*/ GGML_TYPE_COUNT,
16765+
/*.attn_output_type =*/ GGML_TYPE_COUNT,
16766+
/*.ffn_gate_type =*/ GGML_TYPE_COUNT,
16767+
/*.ffn_down_type =*/ GGML_TYPE_COUNT,
16768+
/*.ffn_up_type =*/ GGML_TYPE_COUNT,
1672916769
/*.allow_requantize =*/ false,
1673016770
/*.quantize_output_tensor =*/ true,
1673116771
/*.only_copy =*/ false,

0 commit comments

Comments
 (0)