Skip to content

Commit 9d47f64

Browse files
[CI/Build] [3/3] Reorganize entrypoints tests (#5966)
1 parent cff6a1f commit 9d47f64

17 files changed

+19
-48
lines changed

.buildkite/test-pipeline.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,8 +89,8 @@ steps:
8989
mirror_hardwares: [amd]
9090

9191
commands:
92-
- pytest -v -s entrypoints -m llm
93-
- pytest -v -s entrypoints -m openai
92+
- pytest -v -s entrypoints/llm
93+
- pytest -v -s entrypoints/openai
9494

9595
- label: Examples Test
9696
working_dir: "/vllm-workspace/examples"

pyproject.toml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,5 @@ skip_gitignore = true
6969
[tool.pytest.ini_options]
7070
markers = [
7171
"skip_global_cleanup",
72-
"llm: run tests for vLLM API only",
73-
"openai: run tests for OpenAI API only",
7472
"vlm: run tests for vision language models only",
7573
]

tests/entrypoints/llm/__init__.py

Whitespace-only changes.

tests/entrypoints/test_llm_encode.py renamed to tests/entrypoints/llm/test_encode.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
from vllm import LLM, EmbeddingRequestOutput, PoolingParams
77

8-
from ..conftest import cleanup
8+
from ...conftest import cleanup
99

1010
MODEL_NAME = "intfloat/e5-mistral-7b-instruct"
1111

@@ -25,8 +25,6 @@
2525
[1000, 1003, 1001, 1002],
2626
]
2727

28-
pytestmark = pytest.mark.llm
29-
3028

3129
@pytest.fixture(scope="module")
3230
def llm():

tests/entrypoints/test_llm_generate.py renamed to tests/entrypoints/llm/test_generate.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
from vllm import LLM, RequestOutput, SamplingParams
77

8-
from ..conftest import cleanup
8+
from ...conftest import cleanup
99

1010
MODEL_NAME = "facebook/opt-125m"
1111

@@ -23,8 +23,6 @@
2323
[0, 3, 1, 2],
2424
]
2525

26-
pytestmark = pytest.mark.llm
27-
2826

2927
@pytest.fixture(scope="module")
3028
def llm():

tests/entrypoints/test_llm_generate_multiple_loras.py renamed to tests/entrypoints/llm/test_generate_multiple_loras.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from vllm import LLM
88
from vllm.lora.request import LoRARequest
99

10-
from ..conftest import cleanup
10+
from ...conftest import cleanup
1111

1212
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
1313

@@ -20,8 +20,6 @@
2020

2121
LORA_NAME = "typeof/zephyr-7b-beta-lora"
2222

23-
pytestmark = pytest.mark.llm
24-
2523

2624
@pytest.fixture(scope="module")
2725
def llm():

tests/entrypoints/openai/__init__.py

Whitespace-only changes.

tests/entrypoints/test_openai_chat.py renamed to tests/entrypoints/openai/test_chat.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
from huggingface_hub import snapshot_download
1515
from openai import BadRequestError
1616

17-
from ..utils import RemoteOpenAIServer
17+
from ...utils import RemoteOpenAIServer
1818

1919
# any model with a chat template should work here
2020
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
@@ -69,8 +69,6 @@
6969
"Swift", "Kotlin"
7070
]
7171

72-
pytestmark = pytest.mark.openai
73-
7472

7573
@pytest.fixture(scope="module")
7674
def zephyr_lora_files():

tests/entrypoints/test_openai_completion.py renamed to tests/entrypoints/openai/test_completion.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
from vllm.transformers_utils.tokenizer import get_tokenizer
1818

19-
from ..utils import RemoteOpenAIServer
19+
from ...utils import RemoteOpenAIServer
2020

2121
# any model with a chat template should work here
2222
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
@@ -71,8 +71,6 @@
7171
"Swift", "Kotlin"
7272
]
7373

74-
pytestmark = pytest.mark.openai
75-
7674

7775
@pytest.fixture(scope="module")
7876
def zephyr_lora_files():

tests/entrypoints/test_openai_embedding.py renamed to tests/entrypoints/openai/test_embedding.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,10 @@
22
import pytest
33
import ray
44

5-
from ..utils import RemoteOpenAIServer
5+
from ...utils import RemoteOpenAIServer
66

77
EMBEDDING_MODEL_NAME = "intfloat/e5-mistral-7b-instruct"
88

9-
pytestmark = pytest.mark.openai
10-
119

1210
@pytest.fixture(scope="module")
1311
def ray_ctx():

tests/entrypoints/test_guided_processors.py renamed to tests/entrypoints/openai/test_guided_processors.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,6 @@
5252
TEST_REGEX = (r"((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.){3}"
5353
r"(25[0-5]|(2[0-4]|1\d|[1-9]|)\d)")
5454

55-
pytestmark = pytest.mark.openai
56-
5755

5856
def test_guided_logits_processors():
5957
"""Basic unit test for RegexLogitsProcessor and JSONLogitsProcessor."""

tests/entrypoints/test_openai_server.py renamed to tests/entrypoints/openai/test_models.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,14 @@
66
# downloading lora to test lora requests
77
from huggingface_hub import snapshot_download
88

9-
from ..utils import RemoteOpenAIServer
9+
from ...utils import RemoteOpenAIServer
1010

1111
# any model with a chat template should work here
1212
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
1313
# technically this needs Mistral-7B-v0.1 as base, but we're not testing
1414
# generation quality here
1515
LORA_NAME = "typeof/zephyr-7b-beta-lora"
1616

17-
pytestmark = pytest.mark.openai
18-
1917

2018
@pytest.fixture(scope="module")
2119
def zephyr_lora_files():

tests/entrypoints/test_server_oot_registration.py renamed to tests/entrypoints/openai/test_oot_registration.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import sys
22
import time
33

4-
import pytest
54
import torch
65
from openai import OpenAI, OpenAIError
76

@@ -10,8 +9,6 @@
109
from vllm.model_executor.sampling_metadata import SamplingMetadata
1110
from vllm.utils import get_open_port
1211

13-
pytestmark = pytest.mark.openai
14-
1512

1613
class MyOPTForCausalLM(OPTForCausalLM):
1714

tests/entrypoints/openai/test_serving_chat.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,11 @@
11
import asyncio
22
from dataclasses import dataclass
33

4-
import pytest
5-
64
from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
75

86
MODEL_NAME = "openai-community/gpt2"
97
CHAT_TEMPLATE = "Dummy chat template for testing {}"
108

11-
pytestmark = pytest.mark.openai
12-
139

1410
@dataclass
1511
class MockModelConfig:

tests/entrypoints/test_openai_vision.py renamed to tests/entrypoints/openai/test_vision.py

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
from pathlib import Path
21
from typing import Dict, List
32

43
import openai
@@ -8,12 +7,12 @@
87

98
from vllm.multimodal.utils import ImageFetchAiohttp, encode_image_base64
109

11-
from ..utils import RemoteOpenAIServer
10+
from ...utils import VLLM_PATH, RemoteOpenAIServer
1211

1312
MODEL_NAME = "llava-hf/llava-1.5-7b-hf"
14-
LLAVA_CHAT_TEMPLATE = (Path(__file__).parent.parent.parent /
15-
"examples/template_llava.jinja")
13+
LLAVA_CHAT_TEMPLATE = VLLM_PATH / "examples/template_llava.jinja"
1614
assert LLAVA_CHAT_TEMPLATE.exists()
15+
1716
# Test different image extensions (JPG/PNG) and formats (gray/RGB/RGBA)
1817
TEST_IMAGE_URLS = [
1918
"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
@@ -22,8 +21,6 @@
2221
"https://upload.wikimedia.org/wikipedia/commons/0/0b/RGBA_comp.png",
2322
]
2423

25-
pytestmark = pytest.mark.openai
26-
2724

2825
@pytest.fixture(scope="module")
2926
def ray_ctx():
@@ -279,7 +276,3 @@ async def test_multi_image_input(client: openai.AsyncOpenAI, model_name: str,
279276
)
280277
completion = completion.choices[0].text
281278
assert completion is not None and len(completion) >= 0
282-
283-
284-
if __name__ == "__main__":
285-
pytest.main([__file__])

tests/utils.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,8 @@
44
import time
55
import warnings
66
from contextlib import contextmanager
7-
from typing import Dict, List
7+
from pathlib import Path
8+
from typing import Any, Dict, List
89

910
import openai
1011
import ray
@@ -40,8 +41,8 @@ def _nvml():
4041
nvmlShutdown()
4142

4243

43-
# Path to root of repository so that utilities can be imported by ray workers
44-
VLLM_PATH = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))
44+
VLLM_PATH = Path(__file__).parent.parent
45+
"""Path to root of the vLLM repository."""
4546

4647

4748
class RemoteOpenAIServer:
@@ -153,10 +154,12 @@ def init_test_distributed_environment(
153154
def multi_process_parallel(
154155
tp_size: int,
155156
pp_size: int,
156-
test_target,
157+
test_target: Any,
157158
) -> None:
158159
# Using ray helps debugging the error when it failed
159160
# as compared to multiprocessing.
161+
# NOTE: We need to set working_dir for distributed tests,
162+
# otherwise we may get import errors on ray workers
160163
ray.init(runtime_env={"working_dir": VLLM_PATH})
161164

162165
distributed_init_port = get_open_port()

0 commit comments

Comments
 (0)