forked from vllm-project/vllm
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[CI/Build] Add text-only test for Qwen models (vllm-project#7475)
Signed-off-by: Alex-Brooks <Alex.Brooks@ibm.com>
- Loading branch information
1 parent
a472d75
commit 7c141a0
Showing
3 changed files
with
52 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
from typing import Type | ||
|
||
import pytest | ||
|
||
from ..conftest import HfRunner, VllmRunner | ||
from .utils import check_logprobs_close | ||
|
||
models = ["qwen/qwen-vl"] | ||
|
||
|
||
@pytest.mark.parametrize("dtype", ["half"]) | ||
@pytest.mark.parametrize("max_tokens", [32]) | ||
@pytest.mark.parametrize("num_logprobs", [5]) | ||
@pytest.mark.parametrize("model", models) | ||
def test_text_only_qwen_model( | ||
hf_runner: Type[HfRunner], | ||
vllm_runner: Type[VllmRunner], | ||
example_prompts, | ||
model: str, | ||
*, | ||
dtype: str, | ||
max_tokens: int, | ||
num_logprobs: int, | ||
): | ||
# This test checks language inputs only, since the visual component | ||
# for qwen-vl is still unsupported in VLLM. In the near-future, the | ||
# implementation and this test will be extended to consider | ||
# visual inputs as well. | ||
with hf_runner(model, dtype=dtype, is_vision_model=False) as hf_model: | ||
hf_outputs = hf_model.generate_greedy_logprobs_limit( | ||
example_prompts, | ||
max_tokens, | ||
num_logprobs=num_logprobs, | ||
) | ||
|
||
with vllm_runner(model, dtype=dtype) as vllm_model: | ||
vllm_outputs = vllm_model.generate_greedy_logprobs( | ||
example_prompts, | ||
max_tokens, | ||
num_logprobs=num_logprobs, | ||
) | ||
|
||
check_logprobs_close( | ||
outputs_0_lst=hf_outputs, | ||
outputs_1_lst=vllm_outputs, | ||
name_0="hf", | ||
name_1="vllm", | ||
) |