Skip to content

Commit

Permalink
feat(llm): add document support for message prompts
Browse files Browse the repository at this point in the history
Introduces support for handling document content, specifically PDFs
within prompt messages, enhancing model capabilities with a new feature.

Allows dynamic configuration of headers based on document presence
in prompts, improving flexibility for user interactions.
  • Loading branch information
laipz8200 committed Nov 18, 2024
1 parent 9c427ba commit acbb678
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ features:
- vision
- tool-call
- stream-tool-call
- document
model_properties:
mode: chat
context_size: 200000
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ features:
- vision
- tool-call
- stream-tool-call
- document
model_properties:
mode: chat
context_size: 200000
Expand Down
36 changes: 30 additions & 6 deletions api/core/model_runtime/model_providers/anthropic/llm/llm.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import base64
import io
import json
from collections.abc import Generator
from collections.abc import Generator, Sequence
from typing import Optional, Union, cast

import anthropic
Expand All @@ -21,9 +21,9 @@
from PIL import Image

from core.model_runtime.callbacks.base_callback import Callback
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (
from core.model_runtime.entities import (
AssistantPromptMessage,
DocumentPromptMessageContent,
ImagePromptMessageContent,
PromptMessage,
PromptMessageContentType,
Expand All @@ -33,6 +33,7 @@
ToolPromptMessage,
UserPromptMessage,
)
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.errors.invoke import (
InvokeAuthorizationError,
InvokeBadRequestError,
Expand Down Expand Up @@ -86,10 +87,10 @@ def _chat_generate(
self,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
prompt_messages: Sequence[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stop: Optional[Sequence[str]] = None,
stream: bool = True,
user: Optional[str] = None,
) -> Union[LLMResult, Generator]:
Expand Down Expand Up @@ -130,9 +131,17 @@ def _chat_generate(
# Add the new header for claude-3-5-sonnet-20240620 model
extra_headers = {}
if model == "claude-3-5-sonnet-20240620":
if model_parameters.get("max_tokens") > 4096:
if model_parameters.get("max_tokens", 0) > 4096:
extra_headers["anthropic-beta"] = "max-tokens-3-5-sonnet-2024-07-15"

if any(
isinstance(content, DocumentPromptMessageContent)
for prompt_message in prompt_messages
if isinstance(prompt_message.content, list)
for content in prompt_message.content
):
extra_headers["anthropic-beta"] = "pdfs-2024-09-25"

if tools:
extra_model_kwargs["tools"] = [self._transform_tool_prompt(tool) for tool in tools]
response = client.beta.tools.messages.create(
Expand Down Expand Up @@ -505,6 +514,21 @@ def _convert_prompt_messages(self, prompt_messages: list[PromptMessage]) -> tupl
"source": {"type": "base64", "media_type": mime_type, "data": base64_data},
}
sub_messages.append(sub_message_dict)
elif isinstance(message_content, DocumentPromptMessageContent):
if message_content.mime_type != "application/pdf":
raise ValueError(
f"Unsupported document type {message_content.mime_type}, "
"only support application/pdf"
)
sub_message_dict = {
"type": "document",
"source": {
"type": message_content.encode_format,
"media_type": message_content.mime_type,
"data": message_content.data,
},
}
sub_messages.append(sub_message_dict)
prompt_message_dicts.append({"role": "user", "content": sub_messages})
elif isinstance(message, AssistantPromptMessage):
message = cast(AssistantPromptMessage, message)
Expand Down

0 comments on commit acbb678

Please sign in to comment.