We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent fb8bfd1 commit f7430b7Copy full SHA for f7430b7
vllm/entrypoints/openai/serving_transcription.py
@@ -265,7 +265,7 @@ async def create_transcription(
265
logger.exception("Error in preprocessing prompt inputs")
266
return self.create_error_response(str(e))
267
268
- result_generator: AsyncGenerator[RequestOutput, None] = None
+ result_generator: AsyncGenerator[RequestOutput, None] | None = None
269
try:
270
# TODO(rob): subtract len of tokenized prompt.
271
default_max_tokens = self.model_config.max_model_len
0 commit comments