Skip to content

Commit

Permalink
[Frontend]: Support base64 embedding (vllm-project#5935)
Browse files Browse the repository at this point in the history
Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com>
Signed-off-by: Alvant <alvasian@yandex.ru>
  • Loading branch information
2 people authored and Alvant committed Oct 26, 2024
1 parent 8403b99 commit 28672bc
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 14 deletions.
33 changes: 33 additions & 0 deletions tests/entrypoints/openai/test_embedding.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
import base64

import numpy as np
import openai
import pytest
import ray
Expand Down Expand Up @@ -109,3 +112,33 @@ async def test_batch_embedding(embedding_client: openai.AsyncOpenAI,
assert embeddings.usage.completion_tokens == 0
assert embeddings.usage.prompt_tokens == 17
assert embeddings.usage.total_tokens == 17


@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_name",
[EMBEDDING_MODEL_NAME],
)
async def test_batch_base64_embedding(embedding_client: openai.AsyncOpenAI,
model_name: str):
input_texts = [
"Hello my name is",
"The best thing about vLLM is that it supports many different models"
]

responses_float = await embedding_client.embeddings.create(
input=input_texts, model=model_name, encoding_format="float")

responses_base64 = await embedding_client.embeddings.create(
input=input_texts, model=model_name, encoding_format="base64")

decoded_responses_base64_data = []
for data in responses_base64.data:
decoded_responses_base64_data.append(
np.frombuffer(base64.b64decode(data.embedding),
dtype="float").tolist())

assert responses_float.data[0].embedding == decoded_responses_base64_data[
0]
assert responses_float.data[1].embedding == decoded_responses_base64_data[
1]
2 changes: 1 addition & 1 deletion vllm/entrypoints/openai/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -580,7 +580,7 @@ class CompletionStreamResponse(OpenAIBaseModel):
class EmbeddingResponseData(BaseModel):
index: int
object: str = "embedding"
embedding: List[float]
embedding: Union[List[float], str]


class EmbeddingResponse(BaseModel):
Expand Down
26 changes: 13 additions & 13 deletions vllm/entrypoints/openai/serving_embedding.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import base64
import time
from typing import AsyncIterator, List, Optional, Tuple

import numpy as np
from fastapi import Request

from vllm.config import ModelConfig
Expand All @@ -20,19 +22,18 @@


def request_output_to_embedding_response(
final_res_batch: List[EmbeddingRequestOutput],
request_id: str,
created_time: int,
model_name: str,
) -> EmbeddingResponse:
final_res_batch: List[EmbeddingRequestOutput], request_id: str,
created_time: int, model_name: str,
encoding_format: str) -> EmbeddingResponse:
data: List[EmbeddingResponseData] = []
num_prompt_tokens = 0
for idx, final_res in enumerate(final_res_batch):
assert final_res is not None
prompt_token_ids = final_res.prompt_token_ids

embedding_data = EmbeddingResponseData(
index=idx, embedding=final_res.outputs.embedding)
embedding = final_res.outputs.embedding
if encoding_format == "base64":
embedding = base64.b64encode(np.array(embedding))
embedding_data = EmbeddingResponseData(index=idx, embedding=embedding)
data.append(embedding_data)

num_prompt_tokens += len(prompt_token_ids)
Expand Down Expand Up @@ -72,10 +73,8 @@ async def create_embedding(self, request: EmbeddingRequest,
if error_check_ret is not None:
return error_check_ret

# Return error for unsupported features.
if request.encoding_format == "base64":
return self.create_error_response(
"base64 encoding is not currently supported")
encoding_format = (request.encoding_format
if request.encoding_format else "float")
if request.dimensions is not None:
return self.create_error_response(
"dimensions is currently not supported")
Expand Down Expand Up @@ -129,7 +128,8 @@ async def create_embedding(self, request: EmbeddingRequest,
return self.create_error_response("Client disconnected")
final_res_batch[i] = res
response = request_output_to_embedding_response(
final_res_batch, request_id, created_time, model_name)
final_res_batch, request_id, created_time, model_name,
encoding_format)
except ValueError as e:
# TODO: Use a vllm-specific Validation Error
return self.create_error_response(str(e))
Expand Down

0 comments on commit 28672bc

Please sign in to comment.