Skip to content

Commit

Permalink
Add vLLM version info to logs and openai API server (#3161)
Browse files Browse the repository at this point in the history
  • Loading branch information
jasonacox authored Mar 3, 2024
1 parent ce4f5a2 commit d65fac2
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 1 deletion.
3 changes: 2 additions & 1 deletion vllm/engine/llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from typing import (TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple,
Union)

import vllm
from vllm.lora.request import LoRARequest
from vllm.config import (CacheConfig, DeviceConfig, ModelConfig,
ParallelConfig, SchedulerConfig, LoRAConfig)
Expand Down Expand Up @@ -85,7 +86,7 @@ def __init__(
log_stats: bool,
) -> None:
logger.info(
"Initializing an LLM engine with config: "
f"Initializing an LLM engine (v{vllm.__version__}) with config: "
f"model={model_config.model!r}, "
f"tokenizer={model_config.tokenizer!r}, "
f"tokenizer_mode={model_config.tokenizer_mode}, "
Expand Down
8 changes: 8 additions & 0 deletions vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, StreamingResponse, Response

import vllm
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine
from vllm.entrypoints.openai.protocol import CompletionRequest, ChatCompletionRequest, ErrorResponse
Expand Down Expand Up @@ -168,6 +169,12 @@ async def show_available_models():
return JSONResponse(content=models.model_dump())


@app.get("/version")
async def show_version():
ver = {"version": vllm.__version__}
return JSONResponse(content=ver)


@app.post("/v1/chat/completions")
async def create_chat_completion(request: ChatCompletionRequest,
raw_request: Request):
Expand Down Expand Up @@ -231,6 +238,7 @@ async def authentication(request: Request, call_next):
f"Invalid middleware {middleware}. Must be a function or a class."
)

logger.info(f"vLLM API server version {vllm.__version__}")
logger.info(f"args: {args}")

if args.served_model_name is not None:
Expand Down

2 comments on commit d65fac2

@78
Copy link

@78 78 commented on d65fac2 Mar 4, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

'vllm.version' is undefined if you run or install from the source directory:

# pip install -e .

You will get this error if you try to run the api server.

AttributeError: module 'vllm' has no attribute '__version__'

@jasonacox
Copy link
Contributor Author

@jasonacox jasonacox commented on d65fac2 Mar 11, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @78 - If you look, you are probably running in a directory that contains a folder called "vllm". Python is attempting to source the module from that instead of the installed library.

Try:

# Create a fresh env - optional
python -m venv venv
source venv/bin/activate

# From the vllm source directory
git clone https://github.com/vllm-project/vllm.git
cd vllm
pip install -e .

# Create a new directory
cd ..
mkdir mytest
cd mytest
python -m vllm.entrypoints.openai.api_server --model facebook/opt-125m

I get this:

$ python -m vllm.entrypoints.openai.api_server     --model facebook/opt-125m
INFO 03-10 23:07:49 api_server.py:241] vLLM API server version 0.3.3
INFO 03-10 23:07:49 api_server.py:242] args: Namespace(host=None, port=8000, uvicorn_log_level='info', allow_credentials=False, allowed_origins=['*'], allowed_methods=['*'], allowed_headers=['*'], api_key=None, served_model_name=None, lora_modules=None, chat_template=None, response_role='assistant', ssl_keyfile=None, ssl_certfile=None, root_path=None, middleware=[], model='facebook/opt-125m', tokenizer=None, revision=None, code_revision=None, tokenizer_revision=None, tokenizer_mode='auto', trust_remote_code=False, download_dir=None, load_format='auto', dtype='auto', kv_cache_dtype='auto', max_model_len=None, worker_use_ray=False, pipeline_parallel_size=1, tensor_parallel_size=1, max_parallel_loading_workers=None, ray_workers_use_nsight=False, block_size=16, enable_prefix_caching=False, seed=0, swap_space=4, gpu_memory_utilization=0.9, max_num_batched_tokens=None, max_num_seqs=256, max_paddings=256, max_logprobs=5, disable_log_stats=False, quantization=None, enforce_eager=False, max_context_len_to_capture=8192, disable_custom_all_reduce=False, enable_lora=False, max_loras=1, max_lora_rank=16, lora_extra_vocab_size=256, lora_dtype='auto', max_cpu_loras=None, device='auto', engine_use_ray=False, disable_log_requests=False, max_log_len=None)
INFO 03-10 23:07:50 llm_engine.py:90] Initializing an LLM engine (v0.3.3) with config: model='facebook/opt-125m', tokenizer='facebook/opt-125m', tokenizer_mode=auto, revision=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, seed=0)
INFO 03-10 23:07:52 attention.py:66] flash_attn is not found. Using xformers backend.

Please sign in to comment.