Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Commit

Permalink
Revert "[CI/Build] Add is_quant_method_supported to control quantiz…
Browse files Browse the repository at this point in the history
…ation test configurations" (vllm-project#5463)
  • Loading branch information
simon-mo authored and robertgshaw2-redhat committed Jun 16, 2024
1 parent 0b02164 commit ef29fa3
Show file tree
Hide file tree
Showing 7 changed files with 32 additions and 22 deletions.
3 changes: 2 additions & 1 deletion tests/models/test_aqlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
"""

import pytest
import torch

from tests.nm_utils.utils_skip import should_skip_test_group
from tests.quantization.utils import is_quant_method_supported
Expand Down Expand Up @@ -63,7 +64,7 @@
]


@pytest.mark.skipif(not is_quant_method_supported("aqlm"),
@pytest.mark.skipif(aqlm_not_supported,
reason="AQLM is not supported on this GPU type.")
@pytest.mark.parametrize("model", ["ISTA-DASLab/Llama-2-7b-AQLM-2Bit-1x16-hf"])
@pytest.mark.parametrize("dtype", ["half"])
Expand Down
10 changes: 9 additions & 1 deletion tests/models/test_fp8.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,16 @@
},
}

fp8_not_supported = True

@pytest.mark.skipif(not is_quant_method_supported("fp8"),
if torch.cuda.is_available():
capability = torch.cuda.get_device_capability()
capability = capability[0] * 10 + capability[1]
fp8_not_supported = (capability <
QUANTIZATION_METHODS["fp8"].get_min_capability())


@pytest.mark.skipif(fp8_not_supported,
reason="fp8 is not supported on this GPU type.")
@pytest.mark.parametrize("model_name", MODELS)
@pytest.mark.parametrize("kv_cache_dtype", ["auto", "fp8"])
Expand Down
11 changes: 10 additions & 1 deletion tests/models/test_gptq_marlin.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import os

import pytest
import torch

from tests.models.utils import check_logprobs_close
from tests.nm_utils.utils_skip import should_skip_test_group
Expand All @@ -25,6 +26,14 @@

MAX_MODEL_LEN = 1024

gptq_marlin_not_supported = True

if torch.cuda.is_available():
capability = torch.cuda.get_device_capability()
capability = capability[0] * 10 + capability[1]
gptq_marlin_not_supported = (
capability < QUANTIZATION_METHODS["gptq_marlin"].get_min_capability())

MODELS = [
# act_order==False, group_size=channelwise
("robertgshaw2/zephyr-7b-beta-channelwise-gptq", "main"),
Expand All @@ -48,7 +57,7 @@


@pytest.mark.flaky(reruns=3)
@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
@pytest.mark.skipif(gptq_marlin_not_supported,
reason="gptq_marlin is not supported on this GPU type.")
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", ["half", "bfloat16"])
Expand Down
3 changes: 2 additions & 1 deletion tests/models/test_gptq_marlin_24.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from dataclasses import dataclass

import pytest
import torch

from tests.models.utils import check_logprobs_close
from tests.nm_utils.utils_skip import should_skip_test_group
Expand Down Expand Up @@ -44,7 +45,7 @@ class ModelPair:


@pytest.mark.flaky(reruns=2)
@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin_24"),
@pytest.mark.skipif(marlin_not_supported,
reason="Marlin24 is not supported on this GPU type.")
@pytest.mark.parametrize("model_pair", model_pairs)
@pytest.mark.parametrize("dtype", ["half"])
Expand Down
3 changes: 2 additions & 1 deletion tests/models/test_marlin.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import pytest
import torch
import torch

from tests.models.utils import check_logprobs_close
from tests.nm_utils.utils_skip import should_skip_test_group
Expand Down Expand Up @@ -46,7 +47,7 @@ class ModelPair:


@pytest.mark.flaky(reruns=2)
@pytest.mark.skipif(not is_quant_method_supported("marlin"),
@pytest.mark.skipif(marlin_not_supported,
reason="Marlin is not supported on this GPU type.")
@pytest.mark.parametrize("model_pair", model_pairs)
@pytest.mark.parametrize("dtype", ["half"])
Expand Down
10 changes: 7 additions & 3 deletions tests/quantization/test_bitsandbytes.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,16 @@
import pytest
import torch

from tests.quantization.utils import is_quant_method_supported
from vllm import SamplingParams
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS

capability = torch.cuda.get_device_capability()
capability = capability[0] * 10 + capability[1]

@pytest.mark.skipif(not is_quant_method_supported("bitsandbytes"),
reason='bitsandbytes is not supported on this GPU type.')

@pytest.mark.skipif(
capability < QUANTIZATION_METHODS['bitsandbytes'].get_min_capability(),
reason='bitsandbytes is not supported on this GPU type.')
def test_load_bnb_model(vllm_runner) -> None:
with vllm_runner('huggyllama/llama-7b',
quantization='bitsandbytes',
Expand Down
14 changes: 0 additions & 14 deletions tests/quantization/utils.py

This file was deleted.

0 comments on commit ef29fa3

Please sign in to comment.