Skip to content

Commit adf8b58

Browse files
committed
CLI - Specify GGML_TYPE to quantize for the main tensors. (#91)
To complement the token_embd.weight and output.weight : attn_v.weight attn_k.weight. attn_q_weight attn_output.weight attn_qkv.weight ffn_gate ffn_down ffn_up
1 parent beaad81 commit adf8b58

File tree

3 files changed

+157
-15
lines changed

3 files changed

+157
-15
lines changed

include/llama.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -410,6 +410,14 @@ extern "C" {
410410
enum llama_ftype ftype; // quantize to this llama_ftype
411411
enum ggml_type output_tensor_type; // output tensor type
412412
enum ggml_type token_embedding_type; // token embeddings tensor type
413+
enum ggml_type attn_q_type; // attention query tensor type
414+
enum ggml_type attn_k_type; // attention key tensor type
415+
enum ggml_type attn_v_type; // attention value tensor type
416+
enum ggml_type attn_qkv_type; // attention query-key-value tensor type
417+
enum ggml_type attn_output_type; // attention output tensor type
418+
enum ggml_type ffn_gate_type; // feedforward network gate type
419+
enum ggml_type ffn_down_type; // feedforward network down type
420+
enum ggml_type ffn_up_type; // feedforward network up type
413421
bool allow_requantize; // allow quantizing non-f32/f16 tensors
414422
bool quantize_output_tensor; // quantize output.weight
415423
bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored

src/llama-quant.cpp

Lines changed: 57 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -255,29 +255,33 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t
255255
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
256256
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
257257
if (name.find("attn_v.weight") != std::string::npos) {
258-
if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
258+
if (qs.params->attn_v_type < GGML_TYPE_COUNT) new_type = qs.params->attn_v_type;
259+
else if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
259260
else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
260261
++qs.i_attention_wv;
261262
}
262263
else if (qs.model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) {
263264
new_type = GGML_TYPE_Q4_K;
264265
}
265266
else if (name.find("ffn_down") != std::string::npos) {
266-
if (qs.i_ffn_down < qs.n_ffn_down/8) {
267+
if (qs.params->ffn_down_type < GGML_TYPE_COUNT) new_type = qs.params->ffn_down_type;
268+
else if (qs.i_ffn_down < qs.n_ffn_down/8) {
267269
new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
268270
}
269271
++qs.i_ffn_down;
270272
}
271273
else if (name.find("attn_output.weight") != std::string::npos) {
272-
if (qs.model.hparams.n_expert == 8) {
274+
if (qs.params->attn_output_type < GGML_TYPE_COUNT) new_type = qs.params->attn_output_type;
275+
else if (qs.model.hparams.n_expert == 8) {
273276
new_type = GGML_TYPE_Q5_K;
274277
} else {
275278
if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS;
276279
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S;
277280
}
278281
}
279282
} else if (name.find("attn_v.weight") != std::string::npos) {
280-
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
283+
if (qs.params->attn_v_type < GGML_TYPE_COUNT) new_type = qs.params->attn_v_type;
284+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
281285
new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
282286
}
283287
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
@@ -315,7 +319,8 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t
315319
}
316320
++qs.i_attention_wv;
317321
} else if (name.find("attn_k.weight") != std::string::npos) {
318-
if (qs.model.hparams.n_expert == 8) {
322+
if (qs.params->attn_k_type < GGML_TYPE_COUNT) new_type = qs.params->attn_k_type;
323+
else if (qs.model.hparams.n_expert == 8) {
319324
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
320325
// TODO: explore better strategies
321326
new_type = GGML_TYPE_Q8_0;
@@ -327,7 +332,8 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t
327332
new_type = GGML_TYPE_IQ2_S;
328333
}
329334
} else if (name.find("attn_q.weight") != std::string::npos) {
330-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
335+
if (qs.params->attn_q_type < GGML_TYPE_COUNT) new_type = qs.params->attn_q_type;
336+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
331337
new_type = GGML_TYPE_IQ3_XXS;
332338
}
333339
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
@@ -336,7 +342,8 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t
336342
} else if (name.find("ffn_down") != std::string::npos) {
337343
auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
338344
int i_layer = info.first, n_layer = info.second;
339-
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
345+
if (qs.params->ffn_down_type < GGML_TYPE_COUNT) new_type = qs.params->ffn_down_type;
346+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
340347
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
341348
if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
342349
}
@@ -379,7 +386,8 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t
379386
}
380387
++qs.i_ffn_down;
381388
} else if (name.find("attn_output.weight") != std::string::npos) {
382-
if (arch != LLM_ARCH_FALCON) {
389+
if (qs.params->attn_output_type < GGML_TYPE_COUNT) new_type = qs.params->attn_output_type;
390+
else if (arch != LLM_ARCH_FALCON) {
383391
if (qs.model.hparams.n_expert == 8) {
384392
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
385393
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
@@ -399,7 +407,8 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t
399407
}
400408
}
401409
else if (name.find("attn_qkv.weight") != std::string::npos) {
402-
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
410+
if (qs.params->attn_qkv_type < GGML_TYPE_COUNT) new_type = qs.params->attn_qkv_type;
411+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
403412
new_type = GGML_TYPE_Q4_K;
404413
}
405414
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
@@ -408,15 +417,17 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t
408417
else if (name.find("ffn_gate") != std::string::npos) {
409418
auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
410419
int i_layer = info.first, n_layer = info.second;
411-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
420+
if (qs.params->ffn_gate_type < GGML_TYPE_COUNT) new_type = qs.params->ffn_up_type;
421+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
412422
new_type = GGML_TYPE_IQ3_XXS;
413423
}
414424
++qs.i_ffn_gate;
415425
}
416426
else if (name.find("ffn_up") != std::string::npos) {
417427
auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
418428
int i_layer = info.first, n_layer = info.second;
419-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
429+
if (qs.params->ffn_up_type < GGML_TYPE_COUNT) new_type = qs.params->ffn_up_type;
430+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
420431
new_type = GGML_TYPE_IQ3_XXS;
421432
}
422433
++qs.i_ffn_up;
@@ -974,6 +985,30 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
974985
if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
975986
new_type = params->output_tensor_type;
976987
}
988+
if (params->attn_q_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_q.weight") == 0) {
989+
new_type = params->attn_q_type;
990+
}
991+
if (params->attn_k_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_k.weight") == 0) {
992+
new_type = params->attn_k_type;
993+
}
994+
if (params->attn_v_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_v.weight") == 0) {
995+
new_type = params->attn_v_type;
996+
}
997+
if (params->attn_qkv_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_qkv.weight") == 0) {
998+
new_type = params->attn_qkv_type;
999+
}
1000+
if (params->attn_output_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_output.weight") == 0) {
1001+
new_type = params->attn_output_type;
1002+
}
1003+
if (params->ffn_gate_type < GGML_TYPE_COUNT && strcmp(tensor->name, "ffn_gate") == 0) {
1004+
new_type = params->ffn_gate_type;
1005+
}
1006+
if (params->ffn_down_type < GGML_TYPE_COUNT && strcmp(tensor->name, "ffn_down") == 0) {
1007+
new_type = params->ffn_down_type;
1008+
}
1009+
if (params->ffn_up_type < GGML_TYPE_COUNT && strcmp(tensor->name, "ffn_up") == 0) {
1010+
new_type = params->ffn_up_type;
1011+
}
9771012

9781013
// If we've decided to quantize to the same type the tensor is already
9791014
// in then there's nothing to do.
@@ -1012,9 +1047,9 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
10121047
}
10131048
}
10141049
if ((new_type == GGML_TYPE_IQ2_XXS ||
1015-
new_type == GGML_TYPE_IQ2_XS ||
1016-
new_type == GGML_TYPE_IQ2_S ||
1017-
new_type == GGML_TYPE_IQ1_S ||
1050+
new_type == GGML_TYPE_IQ2_XS ||
1051+
new_type == GGML_TYPE_IQ2_S ||
1052+
new_type == GGML_TYPE_IQ1_S ||
10181053
(new_type == GGML_TYPE_IQ1_M && strcmp(tensor->name, "token_embd.weight") && strcmp(tensor->name, "output.weight")) ||
10191054
(new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) {
10201055
LLAMA_LOG_ERROR("\n\n============================================================\n");
@@ -1120,6 +1155,14 @@ llama_model_quantize_params llama_model_quantize_default_params() {
11201155
/*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
11211156
/*.output_tensor_type =*/ GGML_TYPE_COUNT,
11221157
/*.token_embedding_type =*/ GGML_TYPE_COUNT,
1158+
/*.attn_q_type =*/ GGML_TYPE_COUNT,
1159+
/*.attn_k_type =*/ GGML_TYPE_COUNT,
1160+
/*.attn_v_type =*/ GGML_TYPE_COUNT,
1161+
/*.attn_qkv_type =*/ GGML_TYPE_COUNT,
1162+
/*.attn_output_type =*/ GGML_TYPE_COUNT,
1163+
/*.ffn_gate_type =*/ GGML_TYPE_COUNT,
1164+
/*.ffn_down_type =*/ GGML_TYPE_COUNT,
1165+
/*.ffn_up_type =*/ GGML_TYPE_COUNT,
11231166
/*.allow_requantize =*/ false,
11241167
/*.quantize_output_tensor =*/ true,
11251168
/*.only_copy =*/ false,

tools/quantize/quantize.cpp

Lines changed: 92 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,9 @@ static const std::vector<quant_option> QUANT_OPTIONS = {
5555
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 5.33G, +0.0569 ppl @ Llama-3-8B", },
5656
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 6.14G, +0.0217 ppl @ Llama-3-8B", },
5757
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 7.96G, +0.0026 ppl @ Llama-3-8B", },
58+
// { "Q4_0_4_4", LLAMA_FTYPE_MOSTLY_Q4_0_4_4, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
59+
// { "Q4_0_4_8", LLAMA_FTYPE_MOSTLY_Q4_0_4_8, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
60+
// { "Q4_0_8_8", LLAMA_FTYPE_MOSTLY_Q4_0_8_8, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
5861
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", },
5962
{ "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
6063
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
@@ -170,6 +173,7 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
170173
static void usage(const char * executable) {
171174
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights]\n", executable);
172175
printf(" [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n");
176+
printf(" [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--ffn-gate-type] [--ffn-down-type] [--ffn-up-type]\n\n");
173177
printf(" model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
174178
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
175179
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
@@ -183,10 +187,25 @@ static void usage(const char * executable) {
183187
printf(" Advanced option to selectively quantize tensors. May be specified multiple times.\n");
184188
printf(" --prune-layers L0,L1,L2...comma-separated list of layer numbers to prune from the model\n");
185189
printf(" Advanced option to remove all tensors from the given layers\n");
190+
printf(" Additional specific tensor quantization types used in the custom quant scheme 'CQS (default is Q2_K):\n");
191+
printf(" --attn-q-type ggml_type: use this ggml_type for the attn_q.weight tensor.\n");
192+
printf(" --attn-k-type ggml_type: use this ggml_type for the attn_k.weight tensor.\n");
193+
printf(" --attn-v-type ggml_type: use this ggml_type for the attn_v.weight tensor.\n");
194+
printf(" --attn-qkv-type ggml_type: use this ggml_type for the attn_qkv.weight tensor.\n");
195+
printf(" --attn-output-type ggml_type: use this ggml_type for the attn_output.weight tensor.\n");
196+
printf(" --ffn-gate-type ggml_type: use this ggml_type for the ffn_gate tensor.\n");
197+
printf(" --ffn-down-type ggml_type: use this ggml_type for the ffn_down tensor.\n");
198+
printf(" --ffn-up-type ggml_type: use this ggml_type for the ffn_up tensor.\n\n");
186199
printf(" --keep-split: will generate quantized model in the same shards as input\n");
187200
printf(" --override-kv KEY=TYPE:VALUE\n");
188-
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
201+
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n\n");
189202
printf("Note: --include-weights and --exclude-weights cannot be used together\n");
203+
printf("Note: The token embeddings tensor is loaded in system RAM, even in case of full GPU/VRAM offload.\n");
204+
printf("Note: The recommanded type for the output tensor is q6_K for the ffn types > iq3_xxs and < q8_0.\n\n");
205+
printf("Note for the Custom Quant Scheme FTYPE:\n");
206+
printf(" Write the specific tensor legacy quants as qN_N, the K-Quants as qN_K, the IQ-Quants as iqN_xx.\n");
207+
printf(" Usually, attn-q-type can be one type below the chosen ffn type, and attn-v-type should be one type above.\n");
208+
printf(" attn-qkv-type replaces the types attn-q, attn-k and attn-v on some models.\n");
190209
printf("\nAllowed quantization types:\n");
191210
for (const auto & it : QUANT_OPTIONS) {
192211
if (it.name != "COPY") {
@@ -526,6 +545,78 @@ int main(int argc, char ** argv) {
526545
} else {
527546
usage(argv[0]);
528547
}
548+
} else if (strcmp(argv[arg_idx], "--attn-q-type") == 0) {
549+
if (arg_idx < argc-1) {
550+
params.attn_q_type = parse_ggml_type(argv[++arg_idx]);
551+
if (params.attn_q_type == GGML_TYPE_COUNT) {
552+
usage(argv[0]);
553+
}
554+
} else {
555+
usage(argv[0]);
556+
}
557+
} else if (strcmp(argv[arg_idx], "--attn-k-type") == 0) {
558+
if (arg_idx < argc-1) {
559+
params.attn_k_type = parse_ggml_type(argv[++arg_idx]);
560+
if (params.attn_k_type == GGML_TYPE_COUNT) {
561+
usage(argv[0]);
562+
}
563+
} else {
564+
usage(argv[0]);
565+
}
566+
} else if (strcmp(argv[arg_idx], "--attn-v-type") == 0) {
567+
if (arg_idx < argc-1) {
568+
params.attn_v_type = parse_ggml_type(argv[++arg_idx]);
569+
if (params.attn_v_type == GGML_TYPE_COUNT) {
570+
usage(argv[0]);
571+
}
572+
} else {
573+
usage(argv[0]);
574+
}
575+
} else if (strcmp(argv[arg_idx], "--attn-qkv-type") == 0) {
576+
if (arg_idx < argc-1) {
577+
params.attn_qkv_type = parse_ggml_type(argv[++arg_idx]);
578+
if (params.attn_qkv_type == GGML_TYPE_COUNT) {
579+
usage(argv[0]);
580+
}
581+
} else {
582+
usage(argv[0]);
583+
}
584+
} else if (strcmp(argv[arg_idx], "--attn-output-type") == 0) {
585+
if (arg_idx < argc-1) {
586+
params.attn_output_type = parse_ggml_type(argv[++arg_idx]);
587+
if (params.attn_output_type == GGML_TYPE_COUNT) {
588+
usage(argv[0]);
589+
}
590+
} else {
591+
usage(argv[0]);
592+
}
593+
} else if (strcmp(argv[arg_idx], "--ffn-gate-type") == 0) {
594+
if (arg_idx < argc-1) {
595+
params.ffn_gate_type = parse_ggml_type(argv[++arg_idx]);
596+
if (params.ffn_gate_type == GGML_TYPE_COUNT) {
597+
usage(argv[0]);
598+
}
599+
} else {
600+
usage(argv[0]);
601+
}
602+
} else if (strcmp(argv[arg_idx], "--ffn-down-type") == 0) {
603+
if (arg_idx < argc-1) {
604+
params.ffn_down_type = parse_ggml_type(argv[++arg_idx]);
605+
if (params.ffn_down_type == GGML_TYPE_COUNT) {
606+
usage(argv[0]);
607+
}
608+
} else {
609+
usage(argv[0]);
610+
}
611+
} else if (strcmp(argv[arg_idx], "--ffn-up-type") == 0) {
612+
if (arg_idx < argc-1) {
613+
params.ffn_up_type = parse_ggml_type(argv[++arg_idx]);
614+
if (params.ffn_up_type == GGML_TYPE_COUNT) {
615+
usage(argv[0]);
616+
}
617+
} else {
618+
usage(argv[0]);
619+
}
529620
} else if (strcmp(argv[arg_idx], "--tensor-type") == 0) {
530621
if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) {
531622
usage(argv[0]);

0 commit comments

Comments
 (0)