Skip to content

Commit d32fee7

Browse files
DarkLight1337alhridoy
authored andcommitted
[CI/Build] Fix model nightly tests (vllm-project#26466)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
1 parent bf66d14 commit d32fee7

File tree

5 files changed

+9
-7
lines changed

5 files changed

+9
-7
lines changed

tests/models/language/generation/test_common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@
100100
"allenai/OLMoE-1B-7B-0924-Instruct",
101101
marks=[pytest.mark.cpu_model],
102102
),
103-
pytest.param("swiss-ai/Apertus-8B-2509"), # apertus
103+
pytest.param("swiss-ai/Apertus-8B-Instruct-2509"), # apertus
104104
],
105105
)
106106
@pytest.mark.parametrize("max_tokens", [32])

tests/models/language/pooling/test_token_classification.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,4 +67,4 @@ def test_modernbert_models(
6767
for hf_output, vllm_output in zip(hf_outputs, vllm_outputs):
6868
hf_output = torch.tensor(hf_output).cpu().float()
6969
vllm_output = torch.tensor(vllm_output).cpu().float()
70-
assert torch.allclose(hf_output, vllm_output, 1e-2)
70+
assert torch.allclose(hf_output, vllm_output, atol=1e-2)

tests/models/multimodal/generation/test_common.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -749,6 +749,7 @@
749749
max_num_seqs=2,
750750
auto_cls=AutoModelForImageTextToText,
751751
hf_output_post_proc=model_utils.smolvlm_trunc_hf_output,
752+
num_logprobs=10,
752753
),
753754
"tarsier": VLMTestInfo(
754755
models=["omni-research/Tarsier-7b"],

tests/models/multimodal/pooling/test_clip.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,14 +45,16 @@ def _run_test(
4545

4646
all_outputs = []
4747
for inputs in all_inputs:
48+
inputs = hf_model.wrap_device(inputs)
49+
4850
if "pixel_values" in inputs:
49-
inputs.pop("input_ids")
5051
pooled_output = hf_model.model.get_image_features(
51-
**hf_model.wrap_device(inputs)
52+
pixel_values=inputs.pixel_values,
5253
).squeeze(0)
5354
else:
5455
pooled_output = hf_model.model.get_text_features(
55-
**hf_model.wrap_device(inputs)
56+
input_ids=inputs.input_ids,
57+
attention_mask=inputs.attention_mask,
5658
).squeeze(0)
5759

5860
all_outputs.append(pooled_output.tolist())

tests/models/registry.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -172,9 +172,8 @@ def check_available_online(
172172
_TEXT_GENERATION_EXAMPLE_MODELS = {
173173
# [Decoder-only]
174174
"ApertusForCausalLM": _HfExamplesInfo(
175-
"swiss-ai/Apertus-8B-2509",
175+
"swiss-ai/Apertus-8B-Instruct-2509",
176176
min_transformers_version="4.56.0",
177-
trust_remote_code=True,
178177
),
179178
"AquilaModel": _HfExamplesInfo("BAAI/AquilaChat-7B", trust_remote_code=True),
180179
"AquilaForCausalLM": _HfExamplesInfo("BAAI/AquilaChat2-7B", trust_remote_code=True),

0 commit comments

Comments
 (0)