Skip to content

[FrontEnd] Support Whisper Transcription Protocol #12458

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions query_transcription.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# SPDX-License-Identifier: Apache-2.0
from openai import OpenAI

from vllm.assets.audio import AudioAsset

mary_had_lamb = AudioAsset('mary_had_lamb').get_asset_path()
winning_call = AudioAsset('winning_call').get_asset_path()

# Modify OpenAI's API key and API base to use vLLM's API server.
openai_api_key = "EMPTY"
openai_api_base = "http://localhost:8000/v1"
client = OpenAI(
api_key=openai_api_key,
base_url=openai_api_base,
)
with open(str(mary_had_lamb), "rb") as f:
transcription = client.audio.transcriptions.create(
file=f,
model="openai/whisper-large-v3",
language="en",
response_format="text",
temperature=0.0)
print("transcription result:", transcription)
5 changes: 5 additions & 0 deletions vllm/assets/audio.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: Apache-2.0

from dataclasses import dataclass
from pathlib import Path
from typing import Literal
from urllib.parse import urljoin

Expand Down Expand Up @@ -28,6 +29,10 @@ def audio_and_sample_rate(self) -> tuple[npt.NDArray, float]:
s3_prefix=ASSET_DIR)
return librosa.load(audio_path, sr=None)

def get_asset_path(self) -> Path:
return get_vllm_public_assets(filename=f"{self.name}.ogg",
s3_prefix=ASSET_DIR)

@property
def url(self) -> str:
return urljoin(VLLM_S3_BUCKET_URL, f"{ASSET_DIR}/{self.name}.ogg")
44 changes: 42 additions & 2 deletions vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
from contextlib import asynccontextmanager
from functools import partial
from http import HTTPStatus
from typing import AsyncIterator, Dict, Optional, Set, Tuple, Union
from typing import Annotated, AsyncIterator, Dict, Optional, Set, Tuple, Union

import uvloop
from fastapi import APIRouter, FastAPI, HTTPException, Request
from fastapi import APIRouter, FastAPI, Form, HTTPException, Request
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, Response, StreamingResponse
Expand Down Expand Up @@ -62,6 +62,8 @@
ScoreRequest, ScoreResponse,
TokenizeRequest,
TokenizeResponse,
TranscriptionRequest,
TranscriptionResponse,
UnloadLoraAdapterRequest)
from vllm.entrypoints.openai.reasoning_parsers import ReasoningParserManager
# yapf: enable
Expand All @@ -76,6 +78,8 @@
from vllm.entrypoints.openai.serving_score import OpenAIServingScores
from vllm.entrypoints.openai.serving_tokenization import (
OpenAIServingTokenization)
from vllm.entrypoints.openai.serving_transcription import (
OpenAIServingTranscription)
from vllm.entrypoints.openai.tool_parsers import ToolParserManager
from vllm.entrypoints.utils import with_cancellation
from vllm.logger import init_logger
Expand Down Expand Up @@ -319,6 +323,10 @@ def tokenization(request: Request) -> OpenAIServingTokenization:
return request.app.state.openai_serving_tokenization


def transcription(request: Request) -> OpenAIServingTranscription:
return request.app.state.openai_serving_transcription


def engine_client(request: Request) -> EngineClient:
return request.app.state.engine_client

Expand Down Expand Up @@ -545,6 +553,32 @@ async def do_rerank_v2(request: RerankRequest, raw_request: Request):
return await do_rerank(request, raw_request)


@router.post("/v1/audio/transcriptions")
@with_cancellation
async def create_transcriptions(request: Annotated[TranscriptionRequest,
Form()],
raw_request: Request):

audio_data = await request.file.read()

handler = transcription(raw_request)
if handler is None:
return base(raw_request).create_error_response(
message="The model does not support Transcriptions API")

generator = await handler.create_transcription(audio_data, request,
raw_request)

if isinstance(generator, ErrorResponse):
return JSONResponse(content=generator.model_dump(),
status_code=generator.code)

elif isinstance(generator, TranscriptionResponse):
return JSONResponse(content=generator.model_dump())

return StreamingResponse(content=generator, media_type="text/event-stream")


TASK_HANDLERS: Dict[str, Dict[str, tuple]] = {
"generate": {
"messages": (ChatCompletionRequest, create_chat_completion),
Expand Down Expand Up @@ -821,6 +855,12 @@ async def init_app_state(
chat_template=resolved_chat_template,
chat_template_content_format=args.chat_template_content_format,
)
state.openai_serving_transcription = OpenAIServingTranscription(
engine_client,
model_config,
state.openai_serving_models,
request_logger=request_logger,
)
state.task = model_config.task


Expand Down
164 changes: 163 additions & 1 deletion vllm/entrypoints/openai/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,11 @@
import re
import time
from argparse import Namespace
from typing import Any, ClassVar, Dict, List, Literal, Optional, Set, Union
from typing import (Any, ClassVar, Dict, List, Literal, Optional, Set,
TypeAlias, Union)

import torch
from fastapi import UploadFile
from pydantic import (BaseModel, ConfigDict, Field, TypeAdapter,
ValidationInfo, field_validator, model_validator)
from typing_extensions import Annotated
Expand Down Expand Up @@ -1426,3 +1428,163 @@ class LoadLoraAdapterRequest(BaseModel):
class UnloadLoraAdapterRequest(BaseModel):
lora_name: str
lora_int_id: Optional[int] = Field(default=None)


## Protocols for Audio
AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json",
"vtt"]


class TranscriptionRequest(OpenAIBaseModel):
# Ordered by official OpenAI API documentation
#https://platform.openai.com/docs/api-reference/audio/createTranscription

file: UploadFile
"""
The audio file object (not file name) to transcribe, in one of these
formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
"""

model: str
"""ID of the model to use.
"""

language: str
"""The language of the input audio.

Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format
will improve accuracy and latency.
"""

prompt: str = Field(default="")
"""An optional text to guide the model's style or continue a previous audio
segment.

The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language.
"""

response_format: AudioResponseFormat = Field(default="json")
"""
The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, or `vtt`.
"""

## TODO (varun) : Support if set to 0, certain thresholds are met !!
temperature: float = Field(default=0.0)
"""The sampling temperature, between 0 and 1.

Higher values like 0.8 will make the output more random, while lower values
like 0.2 will make it more focused / deterministic. If set to 0, the model
will use [log probability](https://en.wikipedia.org/wiki/Log_probability)
to automatically increase the temperature until certain thresholds are hit.
"""

timestamp_granularities: List[Literal["word", "segment"]] = Field(
alias="timestamp_granularities[]", default=[])
"""The timestamp granularities to populate for this transcription.

`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note:
There is no additional latency for segment timestamps, but generating word
timestamps incurs additional latency.
"""

# Default sampling parameters for transcription requests.
_DEFAULT_SAMPLING_PARAMS: dict = {
"temperature": 0,
}

def to_sampling_params(
self,
default_max_tokens: int,
default_sampling_params: Optional[dict] = None) -> SamplingParams:
# TODO(#9845): remove max_tokens when field is removed from OpenAI API
max_tokens = default_max_tokens

if default_sampling_params is None:
default_sampling_params = {}
# Default parameters
if (temperature := self.temperature) is None:
temperature = default_sampling_params.get(
"temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"])

return SamplingParams.from_optional(temperature=temperature,
max_tokens=max_tokens)


# Transcription response objects
class TranscriptionResponse(OpenAIBaseModel):
text: str
"""The transcribed text."""


class TranscriptionWord(OpenAIBaseModel):
end: float
"""End time of the word in seconds."""

start: float
"""Start time of the word in seconds."""

word: str
"""The text content of the word."""


class TranscriptionSegment(OpenAIBaseModel):
id: int
"""Unique identifier of the segment."""

avg_logprob: float
"""Average logprob of the segment.

If the value is lower than -1, consider the logprobs failed.
"""

compression_ratio: float
"""Compression ratio of the segment.

If the value is greater than 2.4, consider the compression failed.
"""

end: float
"""End time of the segment in seconds."""

no_speech_prob: float
"""Probability of no speech in the segment.

If the value is higher than 1.0 and the `avg_logprob` is below -1, consider
this segment silent.
"""

seek: int
"""Seek offset of the segment."""

start: float
"""Start time of the segment in seconds."""

temperature: float
"""Temperature parameter used for generating the segment."""

text: str
"""Text content of the segment."""

tokens: List[int]
"""Array of token IDs for the text content."""


class TranscriptionResponseVerbose(OpenAIBaseModel):
duration: str
"""The duration of the input audio."""

language: str
"""The language of the input audio."""

text: str
"""The transcribed text."""

segments: Optional[List[TranscriptionSegment]] = None
"""Segments of the transcribed text and their corresponding details."""

words: Optional[List[TranscriptionWord]] = None
"""Extracted words and their corresponding timestamps."""
Loading
Loading