We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 29df4d0 commit 99f23f2Copy full SHA for 99f23f2
vllm/model_executor/layers/quantization/moe_quant_int.py
@@ -5,8 +5,8 @@
5
from vllm.distributed import get_tensor_model_parallel_rank, get_tp_group
6
from vllm.model_executor.layers.fused_moe.layer import (
7
FusedMoE, FusedMoEMethodBase, FusedMoeWeightScaleSupported)
8
-from vllm.model_executor.layers.linear import (
9
- LinearBase, UnquantizedLinearMethod)
+from vllm.model_executor.layers.linear import (LinearBase,
+ UnquantizedLinearMethod)
10
from vllm.model_executor.layers.quantization.awq_marlin import (
11
AWQMarlinConfig, AWQMarlinLinearMethod)
12
from vllm.model_executor.layers.quantization.base_config import (
0 commit comments