Skip to content

Commit

Permalink
[CI/Build] [3/3] Reorganize entrypoints tests (#5966)
Browse files Browse the repository at this point in the history
  • Loading branch information
DarkLight1337 authored Jun 30, 2024
1 parent cff6a1f commit 9d47f64
Show file tree
Hide file tree
Showing 17 changed files with 19 additions and 48 deletions.
4 changes: 2 additions & 2 deletions .buildkite/test-pipeline.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ steps:
mirror_hardwares: [amd]

commands:
- pytest -v -s entrypoints -m llm
- pytest -v -s entrypoints -m openai
- pytest -v -s entrypoints/llm
- pytest -v -s entrypoints/openai

- label: Examples Test
working_dir: "/vllm-workspace/examples"
Expand Down
2 changes: 0 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,5 @@ skip_gitignore = true
[tool.pytest.ini_options]
markers = [
"skip_global_cleanup",
"llm: run tests for vLLM API only",
"openai: run tests for OpenAI API only",
"vlm: run tests for vision language models only",
]
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from vllm import LLM, EmbeddingRequestOutput, PoolingParams

from ..conftest import cleanup
from ...conftest import cleanup

MODEL_NAME = "intfloat/e5-mistral-7b-instruct"

Expand All @@ -25,8 +25,6 @@
[1000, 1003, 1001, 1002],
]

pytestmark = pytest.mark.llm


@pytest.fixture(scope="module")
def llm():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from vllm import LLM, RequestOutput, SamplingParams

from ..conftest import cleanup
from ...conftest import cleanup

MODEL_NAME = "facebook/opt-125m"

Expand All @@ -23,8 +23,6 @@
[0, 3, 1, 2],
]

pytestmark = pytest.mark.llm


@pytest.fixture(scope="module")
def llm():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from vllm import LLM
from vllm.lora.request import LoRARequest

from ..conftest import cleanup
from ...conftest import cleanup

MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"

Expand All @@ -20,8 +20,6 @@

LORA_NAME = "typeof/zephyr-7b-beta-lora"

pytestmark = pytest.mark.llm


@pytest.fixture(scope="module")
def llm():
Expand Down
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from huggingface_hub import snapshot_download
from openai import BadRequestError

from ..utils import RemoteOpenAIServer
from ...utils import RemoteOpenAIServer

# any model with a chat template should work here
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
Expand Down Expand Up @@ -69,8 +69,6 @@
"Swift", "Kotlin"
]

pytestmark = pytest.mark.openai


@pytest.fixture(scope="module")
def zephyr_lora_files():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

from vllm.transformers_utils.tokenizer import get_tokenizer

from ..utils import RemoteOpenAIServer
from ...utils import RemoteOpenAIServer

# any model with a chat template should work here
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
Expand Down Expand Up @@ -71,8 +71,6 @@
"Swift", "Kotlin"
]

pytestmark = pytest.mark.openai


@pytest.fixture(scope="module")
def zephyr_lora_files():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,10 @@
import pytest
import ray

from ..utils import RemoteOpenAIServer
from ...utils import RemoteOpenAIServer

EMBEDDING_MODEL_NAME = "intfloat/e5-mistral-7b-instruct"

pytestmark = pytest.mark.openai


@pytest.fixture(scope="module")
def ray_ctx():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,6 @@
TEST_REGEX = (r"((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.){3}"
r"(25[0-5]|(2[0-4]|1\d|[1-9]|)\d)")

pytestmark = pytest.mark.openai


def test_guided_logits_processors():
"""Basic unit test for RegexLogitsProcessor and JSONLogitsProcessor."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,14 @@
# downloading lora to test lora requests
from huggingface_hub import snapshot_download

from ..utils import RemoteOpenAIServer
from ...utils import RemoteOpenAIServer

# any model with a chat template should work here
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
# technically this needs Mistral-7B-v0.1 as base, but we're not testing
# generation quality here
LORA_NAME = "typeof/zephyr-7b-beta-lora"

pytestmark = pytest.mark.openai


@pytest.fixture(scope="module")
def zephyr_lora_files():
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import sys
import time

import pytest
import torch
from openai import OpenAI, OpenAIError

Expand All @@ -10,8 +9,6 @@
from vllm.model_executor.sampling_metadata import SamplingMetadata
from vllm.utils import get_open_port

pytestmark = pytest.mark.openai


class MyOPTForCausalLM(OPTForCausalLM):

Expand Down
File renamed without changes.
4 changes: 0 additions & 4 deletions tests/entrypoints/openai/test_serving_chat.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,11 @@
import asyncio
from dataclasses import dataclass

import pytest

from vllm.entrypoints.openai.serving_chat import OpenAIServingChat

MODEL_NAME = "openai-community/gpt2"
CHAT_TEMPLATE = "Dummy chat template for testing {}"

pytestmark = pytest.mark.openai


@dataclass
class MockModelConfig:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from pathlib import Path
from typing import Dict, List

import openai
Expand All @@ -8,12 +7,12 @@

from vllm.multimodal.utils import ImageFetchAiohttp, encode_image_base64

from ..utils import RemoteOpenAIServer
from ...utils import VLLM_PATH, RemoteOpenAIServer

MODEL_NAME = "llava-hf/llava-1.5-7b-hf"
LLAVA_CHAT_TEMPLATE = (Path(__file__).parent.parent.parent /
"examples/template_llava.jinja")
LLAVA_CHAT_TEMPLATE = VLLM_PATH / "examples/template_llava.jinja"
assert LLAVA_CHAT_TEMPLATE.exists()

# Test different image extensions (JPG/PNG) and formats (gray/RGB/RGBA)
TEST_IMAGE_URLS = [
"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
Expand All @@ -22,8 +21,6 @@
"https://upload.wikimedia.org/wikipedia/commons/0/0b/RGBA_comp.png",
]

pytestmark = pytest.mark.openai


@pytest.fixture(scope="module")
def ray_ctx():
Expand Down Expand Up @@ -279,7 +276,3 @@ async def test_multi_image_input(client: openai.AsyncOpenAI, model_name: str,
)
completion = completion.choices[0].text
assert completion is not None and len(completion) >= 0


if __name__ == "__main__":
pytest.main([__file__])
11 changes: 7 additions & 4 deletions tests/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
import time
import warnings
from contextlib import contextmanager
from typing import Dict, List
from pathlib import Path
from typing import Any, Dict, List

import openai
import ray
Expand Down Expand Up @@ -40,8 +41,8 @@ def _nvml():
nvmlShutdown()


# Path to root of repository so that utilities can be imported by ray workers
VLLM_PATH = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))
VLLM_PATH = Path(__file__).parent.parent
"""Path to root of the vLLM repository."""


class RemoteOpenAIServer:
Expand Down Expand Up @@ -153,10 +154,12 @@ def init_test_distributed_environment(
def multi_process_parallel(
tp_size: int,
pp_size: int,
test_target,
test_target: Any,
) -> None:
# Using ray helps debugging the error when it failed
# as compared to multiprocessing.
# NOTE: We need to set working_dir for distributed tests,
# otherwise we may get import errors on ray workers
ray.init(runtime_env={"working_dir": VLLM_PATH})

distributed_init_port = get_open_port()
Expand Down

0 comments on commit 9d47f64

Please sign in to comment.