Skip to content

[Misc] Upgrade to Python 3.9 typing for additional directories #14492

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Mar 8, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,8 @@ exclude = [
"vllm/distributed/**/*.py" = ["UP006", "UP035"]
"vllm/engine/**/*.py" = ["UP006", "UP035"]
"vllm/executor/**/*.py" = ["UP006", "UP035"]
"vllm/inputs/**/*.py" = ["UP006", "UP035"]
"vllm/logging_utils/**/*.py" = ["UP006", "UP035"]
"vllm/lora/**/*.py" = ["UP006", "UP035"]
"vllm/model_executor/**/*.py" = ["UP006", "UP035"]
"vllm/multimodal/**/*.py" = ["UP006", "UP035"]
"vllm/platforms/**/*.py" = ["UP006", "UP035"]
"vllm/plugins/**/*.py" = ["UP006", "UP035"]
"vllm/profiler/**/*.py" = ["UP006", "UP035"]
Expand All @@ -87,9 +84,7 @@ exclude = [
"vllm/third_party/**/*.py" = ["UP006", "UP035"]
"vllm/transformers_utils/**/*.py" = ["UP006", "UP035"]
"vllm/triton_utils/**/*.py" = ["UP006", "UP035"]
"vllm/usage/**/*.py" = ["UP006", "UP035"]
"vllm/vllm_flash_attn/**/*.py" = ["UP006", "UP035"]
"vllm/assets/**/*.py" = ["UP006", "UP035"]
"vllm/worker/**/*.py" = ["UP006", "UP035"]

[tool.ruff.lint]
Expand Down
6 changes: 3 additions & 3 deletions vllm/assets/video.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from dataclasses import dataclass
from functools import lru_cache
from typing import List, Literal
from typing import Literal

import cv2
import numpy as np
Expand Down Expand Up @@ -58,7 +58,7 @@ def video_to_ndarrays(path: str, num_frames: int = -1) -> npt.NDArray:


def video_to_pil_images_list(path: str,
num_frames: int = -1) -> List[Image.Image]:
num_frames: int = -1) -> list[Image.Image]:
frames = video_to_ndarrays(path, num_frames)
return [
Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
Expand All @@ -72,7 +72,7 @@ class VideoAsset:
num_frames: int = -1

@property
def pil_images(self) -> List[Image.Image]:
def pil_images(self) -> list[Image.Image]:
video_path = download_video_asset(self.name)
ret = video_to_pil_images_list(video_path, self.num_frames)
return ret
Expand Down
54 changes: 27 additions & 27 deletions vllm/inputs/data.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: Apache-2.0

from collections.abc import Iterable
from dataclasses import dataclass
from functools import cached_property
from typing import (TYPE_CHECKING, Any, Dict, Generic, Iterable, List, Literal,
Optional, Tuple, Union, cast)
from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, Union, cast

import torch
from typing_extensions import NotRequired, TypedDict, TypeVar, assert_never
Expand All @@ -26,7 +26,7 @@ class TextPrompt(TypedDict):
if the model supports it.
"""

mm_processor_kwargs: NotRequired[Dict[str, Any]]
mm_processor_kwargs: NotRequired[dict[str, Any]]
"""
Optional multi-modal processor kwargs to be forwarded to the
multimodal input mapper & processor. Note that if multiple modalities
Expand All @@ -38,10 +38,10 @@ class TextPrompt(TypedDict):
class TokensPrompt(TypedDict):
"""Schema for a tokenized prompt."""

prompt_token_ids: List[int]
prompt_token_ids: list[int]
"""A list of token IDs to pass to the model."""

token_type_ids: NotRequired[List[int]]
token_type_ids: NotRequired[list[int]]
"""A list of token type IDs to pass to the cross encoder model."""

multi_modal_data: NotRequired["MultiModalDataDict"]
Expand All @@ -50,7 +50,7 @@ class TokensPrompt(TypedDict):
if the model supports it.
"""

mm_processor_kwargs: NotRequired[Dict[str, Any]]
mm_processor_kwargs: NotRequired[dict[str, Any]]
"""
Optional multi-modal processor kwargs to be forwarded to the
multimodal input mapper & processor. Note that if multiple modalities
Expand Down Expand Up @@ -115,7 +115,7 @@ class ExplicitEncoderDecoderPrompt(TypedDict, Generic[_T1_co, _T2_co]):

decoder_prompt: Optional[_T2_co]

mm_processor_kwargs: NotRequired[Dict[str, Any]]
mm_processor_kwargs: NotRequired[dict[str, Any]]


PromptType = Union[SingletonPrompt, ExplicitEncoderDecoderPrompt]
Expand All @@ -136,10 +136,10 @@ class TokenInputs(TypedDict):
type: Literal["token"]
"""The type of inputs."""

prompt_token_ids: List[int]
prompt_token_ids: list[int]
"""The token IDs of the prompt."""

token_type_ids: NotRequired[List[int]]
token_type_ids: NotRequired[list[int]]
"""The token type IDs of the prompt."""

prompt: NotRequired[str]
Expand All @@ -164,12 +164,12 @@ class TokenInputs(TypedDict):
Placeholder ranges for the multi-modal data.
"""

multi_modal_hashes: NotRequired[List[str]]
multi_modal_hashes: NotRequired[list[str]]
"""
The hashes of the multi-modal data.
"""

mm_processor_kwargs: NotRequired[Dict[str, Any]]
mm_processor_kwargs: NotRequired[dict[str, Any]]
"""
Optional multi-modal processor kwargs to be forwarded to the
multimodal input mapper & processor. Note that if multiple modalities
Expand All @@ -179,14 +179,14 @@ class TokenInputs(TypedDict):


def token_inputs(
prompt_token_ids: List[int],
token_type_ids: Optional[List[int]] = None,
prompt_token_ids: list[int],
token_type_ids: Optional[list[int]] = None,
prompt: Optional[str] = None,
multi_modal_data: Optional["MultiModalDataDict"] = None,
multi_modal_inputs: Optional["MultiModalKwargs"] = None,
multi_modal_hashes: Optional[List[str]] = None,
multi_modal_hashes: Optional[list[str]] = None,
multi_modal_placeholders: Optional["MultiModalPlaceholderDict"] = None,
mm_processor_kwargs: Optional[Dict[str, Any]] = None,
mm_processor_kwargs: Optional[dict[str, Any]] = None,
) -> TokenInputs:
"""Construct :class:`TokenInputs` from optional values."""
inputs = TokenInputs(type="token", prompt_token_ids=prompt_token_ids)
Expand Down Expand Up @@ -255,7 +255,7 @@ def prompt(self) -> Optional[str]:
assert_never(inputs) # type: ignore[arg-type]

@cached_property
def prompt_token_ids(self) -> List[int]:
def prompt_token_ids(self) -> list[int]:
inputs = self.inputs

if inputs["type"] == "token" or inputs["type"] == "multimodal":
Expand All @@ -264,7 +264,7 @@ def prompt_token_ids(self) -> List[int]:
assert_never(inputs) # type: ignore[arg-type]

@cached_property
def token_type_ids(self) -> List[int]:
def token_type_ids(self) -> list[int]:
inputs = self.inputs

if inputs["type"] == "token" or inputs["type"] == "multimodal":
Expand Down Expand Up @@ -294,7 +294,7 @@ def multi_modal_data(self) -> "MultiModalDataDict":
assert_never(inputs) # type: ignore[arg-type]

@cached_property
def multi_modal_inputs(self) -> Union[Dict, "MultiModalKwargs"]:
def multi_modal_inputs(self) -> Union[dict, "MultiModalKwargs"]:
inputs = self.inputs

if inputs["type"] == "token":
Expand All @@ -306,7 +306,7 @@ def multi_modal_inputs(self) -> Union[Dict, "MultiModalKwargs"]:
assert_never(inputs) # type: ignore[arg-type]

@cached_property
def multi_modal_hashes(self) -> List[str]:
def multi_modal_hashes(self) -> list[str]:
inputs = self.inputs

if inputs["type"] == "token":
Expand All @@ -331,7 +331,7 @@ def multi_modal_placeholders(self) -> "MultiModalPlaceholderDict":
assert_never(inputs) # type: ignore[arg-type]

@cached_property
def mm_processor_kwargs(self) -> Dict[str, Any]:
def mm_processor_kwargs(self) -> dict[str, Any]:
inputs = self.inputs

if inputs["type"] == "token":
Expand All @@ -355,7 +355,7 @@ def mm_processor_kwargs(self) -> Dict[str, Any]:
def build_explicit_enc_dec_prompt(
encoder_prompt: _T1,
decoder_prompt: Optional[_T2],
mm_processor_kwargs: Optional[Dict[str, Any]] = None,
mm_processor_kwargs: Optional[dict[str, Any]] = None,
) -> ExplicitEncoderDecoderPrompt[_T1, _T2]:
if mm_processor_kwargs is None:
mm_processor_kwargs = {}
Expand All @@ -368,9 +368,9 @@ def build_explicit_enc_dec_prompt(
def zip_enc_dec_prompts(
enc_prompts: Iterable[_T1],
dec_prompts: Iterable[Optional[_T2]],
mm_processor_kwargs: Optional[Union[Iterable[Dict[str, Any]],
Dict[str, Any]]] = None,
) -> List[ExplicitEncoderDecoderPrompt[_T1, _T2]]:
mm_processor_kwargs: Optional[Union[Iterable[dict[str, Any]],
dict[str, Any]]] = None,
) -> list[ExplicitEncoderDecoderPrompt[_T1, _T2]]:
"""
Zip encoder and decoder prompts together into a list of
:class:`ExplicitEncoderDecoderPrompt` instances.
Expand All @@ -380,12 +380,12 @@ def zip_enc_dec_prompts(
provided, it will be zipped with the encoder/decoder prompts.
"""
if mm_processor_kwargs is None:
mm_processor_kwargs = cast(Dict[str, Any], {})
mm_processor_kwargs = cast(dict[str, Any], {})
if isinstance(mm_processor_kwargs, dict):
return [
build_explicit_enc_dec_prompt(
encoder_prompt, decoder_prompt,
cast(Dict[str, Any], mm_processor_kwargs))
cast(dict[str, Any], mm_processor_kwargs))
for (encoder_prompt,
decoder_prompt) in zip(enc_prompts, dec_prompts)
]
Expand All @@ -399,7 +399,7 @@ def zip_enc_dec_prompts(

def to_enc_dec_tuple_list(
enc_dec_prompts: Iterable[ExplicitEncoderDecoderPrompt[_T1, _T2]],
) -> List[Tuple[_T1, Optional[_T2]]]:
) -> list[tuple[_T1, Optional[_T2]]]:
return [(enc_dec_prompt["encoder_prompt"],
enc_dec_prompt["decoder_prompt"])
for enc_dec_prompt in enc_dec_prompts]
17 changes: 9 additions & 8 deletions vllm/inputs/parse.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: Apache-2.0

from typing import List, Literal, Sequence, TypedDict, Union, cast, overload
from collections.abc import Sequence
from typing import Literal, TypedDict, Union, cast, overload

from typing_extensions import TypeIs

Expand All @@ -17,24 +18,24 @@ class ParsedText(TypedDict):


class ParsedTokens(TypedDict):
content: List[int]
content: list[int]
is_tokens: Literal[True]


@overload
def parse_and_batch_prompt(
prompt: Union[str, List[str]]) -> Sequence[ParsedText]:
prompt: Union[str, list[str]]) -> Sequence[ParsedText]:
...


@overload
def parse_and_batch_prompt(
prompt: Union[List[int], List[List[int]]]) -> Sequence[ParsedTokens]:
prompt: Union[list[int], list[list[int]]]) -> Sequence[ParsedTokens]:
...


def parse_and_batch_prompt(
prompt: Union[str, List[str], List[int], List[List[int]]],
prompt: Union[str, list[str], list[int], list[list[int]]],
) -> Union[Sequence[ParsedText], Sequence[ParsedTokens]]:
if isinstance(prompt, str):
# case 1: a string
Expand All @@ -46,16 +47,16 @@ def parse_and_batch_prompt(

if is_list_of(prompt, str):
# case 2: array of strings
prompt = cast(List[str], prompt)
prompt = cast(list[str], prompt)
return [
ParsedText(content=elem, is_tokens=False) for elem in prompt
]
if is_list_of(prompt, int):
# case 3: array of tokens
prompt = cast(List[int], prompt)
prompt = cast(list[int], prompt)
return [ParsedTokens(content=prompt, is_tokens=True)]
if is_list_of(prompt, list):
prompt = cast(List[List[int]], prompt)
prompt = cast(list[list[int]], prompt)
if len(prompt[0]) == 0:
raise ValueError("please provide at least one prompt")

Expand Down
23 changes: 12 additions & 11 deletions vllm/inputs/preprocess.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: Apache-2.0

import asyncio
from typing import List, Mapping, Optional, Tuple, Union, cast
from collections.abc import Mapping
from typing import Optional, Union, cast

from typing_extensions import assert_never

Expand Down Expand Up @@ -92,7 +93,7 @@ def get_decoder_start_token_id(self) -> Optional[int]:

return dec_start_token_id

def _get_default_enc_dec_decoder_prompt(self) -> List[int]:
def _get_default_enc_dec_decoder_prompt(self) -> list[int]:
'''
Specifically for encoder/decoder models:
generate a default decoder prompt for when
Expand Down Expand Up @@ -130,8 +131,8 @@ def _get_default_enc_dec_decoder_prompt(self) -> List[int]:

def _prepare_decoder_input_ids_for_generation(
self,
decoder_input_ids: Optional[List[int]],
) -> List[int]:
decoder_input_ids: Optional[list[int]],
) -> list[int]:
"""
Prepares `decoder_input_ids` for generation with encoder-decoder models.

Expand Down Expand Up @@ -168,9 +169,9 @@ def _prepare_decoder_input_ids_for_generation(

def _apply_prompt_adapter(
self,
prompt_token_ids: List[int],
prompt_token_ids: list[int],
prompt_adapter_request: Optional[PromptAdapterRequest],
) -> List[int]:
) -> list[int]:
if prompt_adapter_request:
prompt_token_ids = (
[0] * prompt_adapter_request.prompt_adapter_num_virtual_tokens
Expand All @@ -183,7 +184,7 @@ def _tokenize_prompt(
prompt: str,
request_id: str,
lora_request: Optional[LoRARequest],
) -> List[int]:
) -> list[int]:
"""
Apply the model's tokenizer to a text prompt, returning the
corresponding token IDs.
Expand Down Expand Up @@ -211,7 +212,7 @@ async def _tokenize_prompt_async(
prompt: str,
request_id: str,
lora_request: Optional[LoRARequest],
) -> List[int]:
) -> list[int]:
"""Async version of :meth:`_tokenize_prompt`."""
tokenizer = self.get_tokenizer_group()
add_special_tokens = None
Expand Down Expand Up @@ -250,7 +251,7 @@ def _can_process_multimodal(self) -> bool:

def _process_multimodal(
self,
prompt: Union[str, List[int]],
prompt: Union[str, list[int]],
mm_data: MultiModalDataDict,
mm_processor_kwargs: Optional[Mapping[str, object]],
lora_request: Optional[LoRARequest],
Expand Down Expand Up @@ -280,7 +281,7 @@ def _process_multimodal(

async def _process_multimodal_async(
self,
prompt: Union[str, List[int]],
prompt: Union[str, list[int]],
mm_data: MultiModalDataDict,
mm_processor_kwargs: Optional[Mapping[str, object]],
lora_request: Optional[LoRARequest],
Expand Down Expand Up @@ -511,7 +512,7 @@ def _separate_enc_dec_inputs_from_mm_processor_outputs(
self,
inputs: SingletonInputs,
decoder_inputs_to_override: Optional[SingletonInputs] = None,
) -> Tuple[SingletonInputs, SingletonInputs]:
) -> tuple[SingletonInputs, SingletonInputs]:
"""
For encoder/decoder models only:
Separate Encoder/Decoder inputs from a MultiModalEncDecInputs
Expand Down
5 changes: 3 additions & 2 deletions vllm/inputs/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@

import functools
from collections import UserDict
from collections.abc import Mapping
from dataclasses import dataclass
from typing import (TYPE_CHECKING, Any, Callable, Mapping, NamedTuple,
Optional, Protocol, Union)
from typing import (TYPE_CHECKING, Any, Callable, NamedTuple, Optional,
Protocol, Union)

from torch import nn
from transformers import BatchFeature, PretrainedConfig, ProcessorMixin
Expand Down
Loading