Skip to content

Commit

Permalink
[Kernel] Use CUTLASS kernels for the FP8 layers with Bias (vllm-proje…
Browse files Browse the repository at this point in the history
  • Loading branch information
tlrmchlsmth authored and jimpang committed Jul 24, 2024
1 parent b3ebd19 commit 0f9ff6d
Showing 1 changed file with 3 additions and 2 deletions.
5 changes: 3 additions & 2 deletions vllm/model_executor/layers/quantization/utils/w8a8_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,15 +112,16 @@ def apply_fp8_linear(
# If dynamic, layer.input_scale is None and x_scale computed from x.
# If static, layer.input_scale is scalar and x_scale is input_scale.

if bias is None and cutlass_fp8_supported:
if cutlass_fp8_supported:
qinput, x_scale = ops.scaled_fp8_quant(input, input_scale)

# Fused GEMM_DQ
output = ops.cutlass_scaled_mm(qinput,
weight,
out_dtype=input.dtype,
scale_a=x_scale,
scale_b=weight_scale)
scale_b=weight_scale,
bias=bias)

else:
qinput, x_scale = ops.scaled_fp8_quant(input,
Expand Down

0 comments on commit 0f9ff6d

Please sign in to comment.