diff --git a/docs/source/basic_tutorials/launcher.md b/docs/source/basic_tutorials/launcher.md index 9590e463214..bafe3669219 100644 --- a/docs/source/basic_tutorials/launcher.md +++ b/docs/source/basic_tutorials/launcher.md @@ -60,9 +60,9 @@ Options: [env: QUANTIZE=] Possible values: - - awq: 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=awq. Should replace GPTQ models whereever possible because of the better latency + - awq: 4 bit quantization. Requires a specific AWQ quantized model: https://hf.co/models?search=awq. Should replace GPTQ models wherever possible because of the better latency - eetq: 8 bit quantization, doesn't require specific model. Should be a drop-in replacement to bitsandbytes with much better performance. Kernels are from https://github.com/NetEase-FuXi/EETQ.git - - gptq: 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq. text-generation-inference will use exllama (faster) kernels whereever possible, and use triton kernel (wider support) when it's not. AWQ has faster kernels + - gptq: 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq. text-generation-inference will use exllama (faster) kernels wherever possible, and use triton kernel (wider support) when it's not. AWQ has faster kernels - bitsandbytes: Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half, but it is known that the model will be much slower to run than the native f16 - bitsandbytes-nf4: Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, but it is known that the model will be much slower to run than the native f16 - bitsandbytes-fp4: Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better perplexity performance for you model diff --git a/launcher/src/main.rs b/launcher/src/main.rs index e635a721876..09657c91550 100644 --- a/launcher/src/main.rs +++ b/launcher/src/main.rs @@ -21,16 +21,16 @@ mod env_runtime; #[derive(Clone, Copy, Debug, ValueEnum)] enum Quantization { - /// 4 bit quantization. Requires a specific GTPQ quantized model: + /// 4 bit quantization. Requires a specific AWQ quantized model: /// https://hf.co/models?search=awq. - /// Should replace GPTQ models whereever possible because of the better latency + /// Should replace GPTQ models wherever possible because of the better latency Awq, /// 8 bit quantization, doesn't require specific model. /// Should be a drop-in replacement to bitsandbytes with much better performance. /// Kernels are from https://github.com/NetEase-FuXi/EETQ.git Eetq, /// 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq. - /// text-generation-inference will use exllama (faster) kernels whereever possible, and use + /// text-generation-inference will use exllama (faster) kernels wherever possible, and use /// triton kernel (wider support) when it's not. /// AWQ has faster kernels. Gptq,