Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Commit

Permalink
[CI/Build] Cleanup VLM tests (vllm-project#6107)
Browse files Browse the repository at this point in the history
  • Loading branch information
DarkLight1337 authored and robertgshaw2-redhat committed Jul 7, 2024
1 parent bd542ca commit d3f94d5
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 8 deletions.
7 changes: 2 additions & 5 deletions tests/models/test_llava_next.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import re
from typing import List, Optional, Tuple

import pytest
Expand Down Expand Up @@ -41,17 +40,15 @@ def vllm_to_hf_output(vllm_output: Tuple[List[int], str,
output_ids, output_str, out_logprobs = vllm_output

tokenizer = AutoTokenizer.from_pretrained(model)
image_token_str = tokenizer.decode(IMAGE_TOKEN_ID)
eos_token_id = tokenizer.eos_token_id

hf_output_ids = [
token_id for idx, token_id in enumerate(output_ids)
if token_id != IMAGE_TOKEN_ID or output_ids[idx - 1] != IMAGE_TOKEN_ID
]

hf_output_str = re.sub(fr"({image_token_str})+", "", output_str)
assert hf_output_str[0] == " "
hf_output_str = hf_output_str[1:]
assert output_str[0] == " "
hf_output_str = output_str[1:]
if hf_output_ids[-1] == eos_token_id:
hf_output_str = hf_output_str + tokenizer.decode(eos_token_id)

Expand Down
3 changes: 1 addition & 2 deletions tests/models/test_phi3v.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,7 @@ def vllm_to_hf_output(vllm_output: Tuple[List[int], str,
assert output_str_without_image[0] == " "
output_str_without_image = output_str_without_image[1:]

hf_output_str = output_str_without_image.replace("<|user|>", "") \
.replace("<|end|>\n<|assistant|>", " ")
hf_output_str = output_str_without_image + "<|end|><|endoftext|>"

tokenizer = AutoTokenizer.from_pretrained(model)
hf_output_ids = tokenizer.encode(output_str_without_image)
Expand Down
1 change: 1 addition & 0 deletions tests/models/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ def check_logprobs_close(
# Each predicted token must be in top N logprobs of the other
fail_msg = (
f"Test{prompt_idx}:"
f"\nMatched tokens:\t{output_ids_0[:idx]}"
f"\n{name_0}:\t{output_str_0!r}\t{logprobs_elem_0}"
f"\n{name_1}:\t{output_str_1!r}\t{logprobs_elem_1}")

Expand Down
2 changes: 1 addition & 1 deletion vllm/multimodal/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def _default_input_mapper(self, ctx: InputContext,
if isinstance(data, Image.Image):
image_processor = self._get_hf_image_processor(model_config)
if image_processor is None:
raise RuntimeError("No HuggingFace processor is available"
raise RuntimeError("No HuggingFace processor is available "
"to process the image object")
try:
batch_data = image_processor \
Expand Down

0 comments on commit d3f94d5

Please sign in to comment.