Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions tests/distributed/test_pipeline_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,7 @@ def iter_params(self, model_id: str):
"OrionStarAI/Orion-14B-Chat": PPTestSettings.fast(),
"adept/persimmon-8b-chat": PPTestSettings.fast(),
"microsoft/phi-2": PPTestSettings.fast(),
"Qwen/Qwen2.5-1.5B-Instruct": PPTestSettings.fast(),
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we keep this in alphabetical order (by model architecture)? Also, the test isn't actually run unless you include the model name in TEST_MODELS

"microsoft/Phi-3-small-8k-instruct": PPTestSettings.fast(),
"microsoft/Phi-3.5-MoE-instruct": PPTestSettings.detailed(multi_node_only=True, load_format="dummy"), # noqa: E501
"Qwen/Qwen-7B-Chat": PPTestSettings.fast(),
Expand Down
2 changes: 1 addition & 1 deletion tests/entrypoints/llm/test_guided_generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from vllm.outputs import RequestOutput
from vllm.sampling_params import GuidedDecodingParams, SamplingParams

MODEL_NAME = "Qwen/Qwen2.5-7B-Instruct"
MODEL_NAME = "Qwen/Qwen2.5-1.5B-Instruct"
GUIDED_DECODING_BACKENDS = ["outlines", "lm-format-enforcer", "xgrammar"]


Expand Down
4 changes: 2 additions & 2 deletions tests/entrypoints/openai/correctness/test_lmeval.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@

from ....utils import RemoteOpenAIServer

MODEL_NAME = "Qwen/Qwen2-1.5B-Instruct"
MODEL_NAME = "Qwen/Qwen2.5-1.5B-Instruct"
NUM_CONCURRENT = 500
TASK = "gsm8k"
FILTER = "exact_match,strict-match"
RTOL = 0.03
EXPECTED_VALUE = 0.58
EXPECTED_VALUE = 0.54
DEFAULT_ARGS = ["--max-model-len", "2048", "--disable-log-requests"]
MORE_ARGS_LIST = [
[], # Default
Expand Down
2 changes: 1 addition & 1 deletion tests/models/decoder_only/language/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
marks=[pytest.mark.core_model],
),
pytest.param(
Copy link
Member

@DarkLight1337 DarkLight1337 Feb 14, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can remove this now that #13157 has been merged.

"Qwen/Qwen-7B", # qwen (text-only)
"Qwen/Qwen2.5-1.5B", # qwen (text-only)
),
pytest.param(
"Qwen/Qwen2.5-0.5B-Instruct", # qwen2
Expand Down
2 changes: 1 addition & 1 deletion tests/prefix_caching/test_disable_sliding_window.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

# Confirm model with sliding window works.
# config has "use_sliding_window": false
("Qwen/Qwen1.5-0.5B-Chat", 32768, 32768),
("Qwen/Qwen2.5-1.5B-Instruct", 32768, 32768),
# config has no sliding window attribute.
("TinyLlama/TinyLlama-1.1B-Chat-v1.0", 2048, 2048),
]
Expand Down
6 changes: 3 additions & 3 deletions tests/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def test_incorrect_task(model_id, bad_task):


MODEL_IDS_EXPECTED = [
("Qwen/Qwen1.5-7B", 32768),
("Qwen/Qwen2.5-1.5B", 131072),
("mistralai/Mistral-7B-v0.1", 4096),
("mistralai/Mistral-7B-Instruct-v0.2", 32768),
]
Expand Down Expand Up @@ -81,9 +81,9 @@ def test_get_sliding_window():
# For Qwen1.5/Qwen2, get_sliding_window() should be None
# when use_sliding_window is False.
qwen2_model_config = ModelConfig(
"Qwen/Qwen1.5-7B",
"Qwen/Qwen2.5-1.5B",
task="auto",
tokenizer="Qwen/Qwen1.5-7B",
tokenizer="Qwen/Qwen2.5-1.5B",
tokenizer_mode="auto",
trust_remote_code=False,
seed=0,
Expand Down
4 changes: 2 additions & 2 deletions tests/test_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ def test_gc():


def test_model_from_modelscope(monkeypatch):
# model: https://modelscope.cn/models/qwen/Qwen1.5-0.5B-Chat/summary
MODELSCOPE_MODEL_NAME = "qwen/Qwen1.5-0.5B-Chat"
# model: https://modelscope.cn/models/qwen/Qwen2.5-1.5B-Instruct/summary
MODELSCOPE_MODEL_NAME = "qwen/Qwen2.5-1.5B-Instruct"
monkeypatch.setenv("VLLM_USE_MODELSCOPE", "True")
try:
llm = LLM(model=MODELSCOPE_MODEL_NAME)
Expand Down