diff --git a/agents-api/agents_api/autogen/Chat.py b/agents-api/agents_api/autogen/Chat.py index d013ae8b7..380476806 100644 --- a/agents-api/agents_api/autogen/Chat.py +++ b/agents-api/agents_api/autogen/Chat.py @@ -165,7 +165,58 @@ class Content(BaseModel): """ +class ContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["image"] = "image" + source: Source + + class ContentModel(BaseModel): + """ + Anthropic image content part + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + tool_use_id: str + type: Literal["tool_result"] = "tool_result" + content: list[ContentItem] + + +class ContentModel1(Content): + pass + + +class ContentModel2(ContentModel): + """ + Anthropic image content part + """ + + +class ContentModel3(Content): + pass + + +class ContentModel4(ContentModel): + """ + Anthropic image content part + """ + + +class ContentModel5(Content): + pass + + +class ContentModel6(ContentModel): + """ + Anthropic image content part + """ + + +class ContentModel7(BaseModel): model_config = ConfigDict( populate_by_name=True, ) @@ -193,7 +244,8 @@ class Delta(BaseModel): """ tool_call_id: str | None = None content: Annotated[ - str | list[str] | list[Content | ContentModel] | None, Field(...) + str | list[str] | list[ContentModel1 | ContentModel7 | ContentModel2] | None, + Field(...), ] = None """ The content parts of the message @@ -258,7 +310,8 @@ class Message(BaseModel): """ tool_call_id: str | None = None content: Annotated[ - str | list[str] | list[Content | ContentModel] | None, Field(...) + str | list[str] | list[Content | ContentModel7 | ContentModel] | None, + Field(...), ] = None """ The content parts of the message @@ -305,7 +358,8 @@ class MessageModel(BaseModel): """ tool_call_id: str | None = None content: Annotated[ - str | list[str] | list[Content | ContentModel] | None, Field(...) + str | list[str] | list[ContentModel3 | ContentModel7 | ContentModel4] | None, + Field(...), ] = None """ The content parts of the message @@ -405,6 +459,15 @@ class SingleChatOutput(BaseChatOutput): message: MessageModel +class Source(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["base64"] = "base64" + media_type: str + data: str + + class TokenLogProb(BaseTokenLogProb): model_config = ConfigDict( populate_by_name=True, diff --git a/agents-api/agents_api/autogen/Entries.py b/agents-api/agents_api/autogen/Entries.py index f001fc880..829818e37 100644 --- a/agents-api/agents_api/autogen/Entries.py +++ b/agents-api/agents_api/autogen/Entries.py @@ -28,7 +28,7 @@ class BaseEntry(BaseModel): """ name: str | None = None content: ( - list[Content | ContentModel] + list[Content | ContentModel3 | ContentModel] | Tool | ChosenFunctionCall | ChosenComputer20241022 @@ -37,7 +37,7 @@ class BaseEntry(BaseModel): | str | ToolResponse | list[ - list[Content | ContentModel] + list[ContentModel1 | ContentModel3 | ContentModel2] | Tool | ChosenFunctionCall | ChosenComputer20241022 @@ -95,7 +95,38 @@ class Content(BaseModel): """ +class ContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["image"] = "image" + source: Source + + class ContentModel(BaseModel): + """ + Anthropic image content part + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + tool_use_id: str + type: Literal["tool_result"] = "tool_result" + content: list[ContentItem] + + +class ContentModel1(Content): + pass + + +class ContentModel2(ContentModel): + """ + Anthropic image content part + """ + + +class ContentModel3(BaseModel): model_config = ConfigDict( populate_by_name=True, ) @@ -168,3 +199,12 @@ class Relation(BaseModel): head: UUID relation: str tail: UUID + + +class Source(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["base64"] = "base64" + media_type: str + data: str diff --git a/agents-api/agents_api/autogen/Tasks.py b/agents-api/agents_api/autogen/Tasks.py index e62e6d3c3..2fad6d63d 100644 --- a/agents-api/agents_api/autogen/Tasks.py +++ b/agents-api/agents_api/autogen/Tasks.py @@ -85,6 +85,14 @@ class Content(BaseModel): """ +class ContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["image"] = "image" + source: Source + + class ContentModel(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -99,14 +107,33 @@ class ContentModel(BaseModel): """ -class ContentModel1(Content): +class ContentModel1(BaseModel): + """ + Anthropic image content part + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + tool_use_id: str + type: Literal["tool_result"] = "tool_result" + content: list[ContentItem] + + +class ContentModel2(Content): pass -class ContentModel2(ContentModel): +class ContentModel3(ContentModel): pass +class ContentModel4(ContentModel1): + """ + Anthropic image content part + """ + + class CreateTaskRequest(BaseModel): """ Payload for creating a task @@ -655,7 +682,8 @@ class PromptItem(BaseModel): """ tool_call_id: str | None = None content: Annotated[ - list[str] | list[Content | ContentModel] | str | None, Field(...) + list[str] | list[Content | ContentModel | ContentModel1] | str | None, + Field(...), ] """ The content parts of the message @@ -861,6 +889,18 @@ class SleepStep(BaseModel): """ +class Source(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["base64"] = "base64" + media_type: str + data: str + """ + A valid jinja template. + """ + + class SwitchStep(BaseModel): model_config = ConfigDict( populate_by_name=True, diff --git a/agents-api/agents_api/routers/sessions/chat.py b/agents-api/agents_api/routers/sessions/chat.py index 17a93799d..5ef4bde56 100644 --- a/agents-api/agents_api/routers/sessions/chat.py +++ b/agents-api/agents_api/routers/sessions/chat.py @@ -1,9 +1,17 @@ -from typing import Annotated, Optional +from datetime import datetime +from typing import Annotated, Callable, Optional from uuid import UUID, uuid4 +from anthropic import AsyncAnthropic +from anthropic.types.beta.beta_message import BetaMessage from fastapi import BackgroundTasks, Depends, Header +from langchain_core.tools import BaseTool +from langchain_core.tools.convert import tool as tool_decorator +from litellm import ChatCompletionMessageToolCall, Function, Message +from litellm.types.utils import Choices, ModelResponse from starlette.status import HTTP_201_CREATED +from ...activities.utils import get_handler_with_filtered_params from ...autogen.openapi_model import ( ChatInput, ChatResponse, @@ -11,18 +19,78 @@ CreateEntryRequest, MessageChatResponse, ) +from ...autogen.Tools import Tool from ...clients import litellm from ...common.protocol.developers import Developer from ...common.protocol.sessions import ChatContext from ...common.utils.datetime import utcnow from ...common.utils.template import render_template from ...dependencies.developer_id import get_developer_data +from ...env import anthropic_api_key from ...models.chat.gather_messages import gather_messages from ...models.chat.prepare_chat_context import prepare_chat_context from ...models.entry.create_entries import create_entries from .metrics import total_tokens_per_user from .router import router +COMPUTER_USE_BETA_FLAG = "computer-use-2024-10-22" + + +def format_tool(tool: Tool) -> dict: + if tool.type == "computer_20241022": + return { + "type": tool.type, + "name": tool.name, + "display_width_px": tool.computer_20241022 + and tool.computer_20241022.display_width_px, + "display_height_px": tool.computer_20241022 + and tool.computer_20241022.display_height_px, + "display_number": tool.computer_20241022 + and tool.computer_20241022.display_number, + } + + if tool.type in ["bash_20241022", "text_editor_20241022"]: + return tool.model_dump(include={"type", "name"}) + + if tool.type == "function": + return { + "type": "function", + "function": { + "name": tool.name, + "description": tool.description, + "parameters": tool.function and tool.function.parameters, + }, + } + + # For other tool types, we need to translate them to the OpenAI function tool format + formatted = { + "type": "function", + "function": {"name": tool.name, "description": tool.description}, + } + + if tool.type == "system": + handler: Callable = get_handler_with_filtered_params(tool.system) + + lc_tool: BaseTool = tool_decorator(handler) + + json_schema: dict = lc_tool.get_input_jsonschema() + + formatted["function"]["description"] = formatted["function"][ + "description" + ] or json_schema.get("description") + + formatted["function"]["parameters"] = json_schema + + # # FIXME: Implement integration tools + # elif tool.type == "integration": + # raise NotImplementedError("Integration tools are not supported") + + # # FIXME: Implement API call tools + # elif tool.type == "api_call": + # raise NotImplementedError("API call tools are not supported") + + return formatted + @router.post( "/sessions/{session_id}/chat", @@ -106,27 +174,9 @@ async def chat( # Get the tools tools = settings.get("tools") or chat_context.get_active_tools() - tools = [tool.model_dump(mode="json") for tool in tools] - - # Convert anthropic tools to `function` - for tool in tools: - if tool.get("type") == "computer_20241022": - tool["function"] = { - "name": tool["name"], - "parameters": tool.pop("computer_20241022"), - } - - elif tool.get("type") == "bash_20241022": - tool["function"] = { - "name": tool["name"], - "parameters": tool.pop("bash_20241022"), - } - - elif tool.get("type") == "text_editor_20241022": - tool["function"] = { - "name": tool["name"], - "parameters": tool.pop("text_editor_20241022"), - } + + # Format tools for litellm + formatted_tools = [format_tool(tool) for tool in tools] # FIXME: Truncate chat messages in the chat context # SCRUM-7 @@ -144,16 +194,104 @@ async def chat( for m in messages ] - # Get the response from the model - model_response = await litellm.acompletion( - messages=messages, - tools=tools or None, - user=str(developer.id), # For tracking usage - tags=developer.tags, # For filtering models in litellm - custom_api_key=x_custom_api_key, - **settings, + # Check if using Claude model and has specific tool types + is_claude_model = settings["model"].lower().startswith("claude-3.5") + has_special_tools = any( + tool["type"] in ["computer_20241022", "bash_20241022", "text_editor_20241022"] + for tool in formatted_tools ) + if is_claude_model and has_special_tools: + # Use Anthropic API directly + client = AsyncAnthropic(api_key=anthropic_api_key) + + # Filter tools for specific types + filtered_tools = [ + tool + for tool in formatted_tools + if tool["type"] + in ["computer_20241022", "bash_20241022", "text_editor_20241022"] + ] + + # Format messages for Claude + claude_messages = [] + for msg in messages: + # Skip messages that are not assistant or user + if msg["role"] not in ["assistant", "user"]: + continue + + claude_messages.append({"role": msg["role"], "content": msg["content"]}) + + # Call Claude API + claude_response: BetaMessage = await client.beta.messages.create( + model="claude-3-5-sonnet-20241022", + messages=claude_messages, + tools=filtered_tools, + max_tokens=settings.get("max_tokens", 1024), + betas=[COMPUTER_USE_BETA_FLAG], + ) + + # Convert Claude response to litellm format + text_block = next( + (block for block in claude_response.content if block.type == "text"), + None, + ) + + if claude_response.stop_reason == "tool_use": + choice = Choices( + message=Message( + role="assistant", + content=text_block.text if text_block else None, + tool_calls=[ + ChatCompletionMessageToolCall( + type="function", + function=Function( + name=block.name, + arguments=block.input, + ), + ) + for block in claude_response.content + if block.type == "tool_use" + ], + ), + finish_reason="tool_calls", + ) + else: + assert ( + text_block + ), "Claude should always return a text block for stop_reason=stop" + choice = Choices( + message=Message( + role="assistant", + content=text_block.text, + ), + finish_reason="stop", + ) + + model_response = ModelResponse( + id=claude_response.id, + choices=[choice], + created=int(datetime.now().timestamp()), + model=claude_response.model, + object="text_completion", + usage={ + "total_tokens": claude_response.usage.input_tokens + + claude_response.usage.output_tokens + }, + ) + else: + # FIXME: hardcoded tool to a None value as the tool calls are not implemented yet + formatted_tools = None + # Use litellm for other models + model_response = await litellm.acompletion( + messages=messages, + tools=formatted_tools or None, + user=str(developer.id), + tags=developer.tags, + custom_api_key=x_custom_api_key, + **settings, + ) + # Save the input and the response to the session history if chat_input.save: new_entries = [ diff --git a/integrations-service/integrations/autogen/Chat.py b/integrations-service/integrations/autogen/Chat.py index d013ae8b7..380476806 100644 --- a/integrations-service/integrations/autogen/Chat.py +++ b/integrations-service/integrations/autogen/Chat.py @@ -165,7 +165,58 @@ class Content(BaseModel): """ +class ContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["image"] = "image" + source: Source + + class ContentModel(BaseModel): + """ + Anthropic image content part + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + tool_use_id: str + type: Literal["tool_result"] = "tool_result" + content: list[ContentItem] + + +class ContentModel1(Content): + pass + + +class ContentModel2(ContentModel): + """ + Anthropic image content part + """ + + +class ContentModel3(Content): + pass + + +class ContentModel4(ContentModel): + """ + Anthropic image content part + """ + + +class ContentModel5(Content): + pass + + +class ContentModel6(ContentModel): + """ + Anthropic image content part + """ + + +class ContentModel7(BaseModel): model_config = ConfigDict( populate_by_name=True, ) @@ -193,7 +244,8 @@ class Delta(BaseModel): """ tool_call_id: str | None = None content: Annotated[ - str | list[str] | list[Content | ContentModel] | None, Field(...) + str | list[str] | list[ContentModel1 | ContentModel7 | ContentModel2] | None, + Field(...), ] = None """ The content parts of the message @@ -258,7 +310,8 @@ class Message(BaseModel): """ tool_call_id: str | None = None content: Annotated[ - str | list[str] | list[Content | ContentModel] | None, Field(...) + str | list[str] | list[Content | ContentModel7 | ContentModel] | None, + Field(...), ] = None """ The content parts of the message @@ -305,7 +358,8 @@ class MessageModel(BaseModel): """ tool_call_id: str | None = None content: Annotated[ - str | list[str] | list[Content | ContentModel] | None, Field(...) + str | list[str] | list[ContentModel3 | ContentModel7 | ContentModel4] | None, + Field(...), ] = None """ The content parts of the message @@ -405,6 +459,15 @@ class SingleChatOutput(BaseChatOutput): message: MessageModel +class Source(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["base64"] = "base64" + media_type: str + data: str + + class TokenLogProb(BaseTokenLogProb): model_config = ConfigDict( populate_by_name=True, diff --git a/integrations-service/integrations/autogen/Entries.py b/integrations-service/integrations/autogen/Entries.py index f001fc880..829818e37 100644 --- a/integrations-service/integrations/autogen/Entries.py +++ b/integrations-service/integrations/autogen/Entries.py @@ -28,7 +28,7 @@ class BaseEntry(BaseModel): """ name: str | None = None content: ( - list[Content | ContentModel] + list[Content | ContentModel3 | ContentModel] | Tool | ChosenFunctionCall | ChosenComputer20241022 @@ -37,7 +37,7 @@ class BaseEntry(BaseModel): | str | ToolResponse | list[ - list[Content | ContentModel] + list[ContentModel1 | ContentModel3 | ContentModel2] | Tool | ChosenFunctionCall | ChosenComputer20241022 @@ -95,7 +95,38 @@ class Content(BaseModel): """ +class ContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["image"] = "image" + source: Source + + class ContentModel(BaseModel): + """ + Anthropic image content part + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + tool_use_id: str + type: Literal["tool_result"] = "tool_result" + content: list[ContentItem] + + +class ContentModel1(Content): + pass + + +class ContentModel2(ContentModel): + """ + Anthropic image content part + """ + + +class ContentModel3(BaseModel): model_config = ConfigDict( populate_by_name=True, ) @@ -168,3 +199,12 @@ class Relation(BaseModel): head: UUID relation: str tail: UUID + + +class Source(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["base64"] = "base64" + media_type: str + data: str diff --git a/integrations-service/integrations/autogen/Tasks.py b/integrations-service/integrations/autogen/Tasks.py index e62e6d3c3..2fad6d63d 100644 --- a/integrations-service/integrations/autogen/Tasks.py +++ b/integrations-service/integrations/autogen/Tasks.py @@ -85,6 +85,14 @@ class Content(BaseModel): """ +class ContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["image"] = "image" + source: Source + + class ContentModel(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -99,14 +107,33 @@ class ContentModel(BaseModel): """ -class ContentModel1(Content): +class ContentModel1(BaseModel): + """ + Anthropic image content part + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + tool_use_id: str + type: Literal["tool_result"] = "tool_result" + content: list[ContentItem] + + +class ContentModel2(Content): pass -class ContentModel2(ContentModel): +class ContentModel3(ContentModel): pass +class ContentModel4(ContentModel1): + """ + Anthropic image content part + """ + + class CreateTaskRequest(BaseModel): """ Payload for creating a task @@ -655,7 +682,8 @@ class PromptItem(BaseModel): """ tool_call_id: str | None = None content: Annotated[ - list[str] | list[Content | ContentModel] | str | None, Field(...) + list[str] | list[Content | ContentModel | ContentModel1] | str | None, + Field(...), ] """ The content parts of the message @@ -861,6 +889,18 @@ class SleepStep(BaseModel): """ +class Source(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["base64"] = "base64" + media_type: str + data: str + """ + A valid jinja template. + """ + + class SwitchStep(BaseModel): model_config = ConfigDict( populate_by_name=True, diff --git a/typespec/entries/models.tsp b/typespec/entries/models.tsp index 61f47bce2..95fbadb55 100644 --- a/typespec/entries/models.tsp +++ b/typespec/entries/models.tsp @@ -50,7 +50,25 @@ model ChatMLImageContentPart { type: "image_url" = "image_url"; } -alias ChatMLContentPart = ChatMLTextContentPart | ChatMLImageContentPart; +model ChatMLAnthropicImageSource { + type: "base64" = "base64"; + media_type: string; + data: T; +} + +model ChatMLAnthropicInnerImageContentPart { + type: "image" = "image"; + source: ChatMLAnthropicImageSource; +} + +/** Anthropic image content part */ +model ChatMLAnthropicImageContentPart { + tool_use_id: string; + type: "tool_result" = "tool_result"; + content: ChatMLAnthropicInnerImageContentPart[]; +} + +alias ChatMLContentPart = ChatMLTextContentPart | ChatMLImageContentPart | ChatMLAnthropicImageContentPart; model ChatMLMessage { /** The role of the message */ @@ -119,4 +137,4 @@ model History { session_id: Session.id; ...HasCreatedAt; -} +} \ No newline at end of file diff --git a/typespec/tsp-output/@typespec/openapi3/openapi-1.0.0.yaml b/typespec/tsp-output/@typespec/openapi3/openapi-1.0.0.yaml index d83ce87f5..93b74354b 100644 --- a/typespec/tsp-output/@typespec/openapi3/openapi-1.0.0.yaml +++ b/typespec/tsp-output/@typespec/openapi3/openapi-1.0.0.yaml @@ -1836,6 +1836,49 @@ components: - image_url description: The type (fixed to 'image_url') default: image_url + - type: object + required: + - tool_use_id + - type + - content + properties: + tool_use_id: + type: string + type: + type: string + enum: + - tool_result + default: tool_result + content: + type: array + items: + type: object + required: + - type + - source + properties: + type: + type: string + enum: + - image + default: image + source: + type: object + required: + - type + - media_type + - data + properties: + type: + type: string + enum: + - base64 + default: base64 + media_type: + type: string + data: + type: string + description: Anthropic image content part nullable: true description: The content parts of the message name: @@ -1936,6 +1979,49 @@ components: - image_url description: The type (fixed to 'image_url') default: image_url + - type: object + required: + - tool_use_id + - type + - content + properties: + tool_use_id: + type: string + type: + type: string + enum: + - tool_result + default: tool_result + content: + type: array + items: + type: object + required: + - type + - source + properties: + type: + type: string + enum: + - image + default: image + source: + type: object + required: + - type + - media_type + - data + properties: + type: + type: string + enum: + - base64 + default: base64 + media_type: + type: string + data: + type: string + description: Anthropic image content part nullable: true description: The content parts of the message name: @@ -2167,6 +2253,49 @@ components: - image_url description: The type (fixed to 'image_url') default: image_url + - type: object + required: + - tool_use_id + - type + - content + properties: + tool_use_id: + type: string + type: + type: string + enum: + - tool_result + default: tool_result + content: + type: array + items: + type: object + required: + - type + - source + properties: + type: + type: string + enum: + - image + default: image + source: + type: object + required: + - type + - media_type + - data + properties: + type: + type: string + enum: + - base64 + default: base64 + media_type: + type: string + data: + type: string + description: Anthropic image content part nullable: true description: The content parts of the message name: @@ -2317,6 +2446,49 @@ components: - image_url description: The type (fixed to 'image_url') default: image_url + - type: object + required: + - tool_use_id + - type + - content + properties: + tool_use_id: + type: string + type: + type: string + enum: + - tool_result + default: tool_result + content: + type: array + items: + type: object + required: + - type + - source + properties: + type: + type: string + enum: + - image + default: image + source: + type: object + required: + - type + - media_type + - data + properties: + type: + type: string + enum: + - base64 + default: base64 + media_type: + type: string + data: + type: string + description: Anthropic image content part nullable: true description: The content parts of the message name: @@ -2809,6 +2981,49 @@ components: - image_url description: The type (fixed to 'image_url') default: image_url + - type: object + required: + - tool_use_id + - type + - content + properties: + tool_use_id: + type: string + type: + type: string + enum: + - tool_result + default: tool_result + content: + type: array + items: + type: object + required: + - type + - source + properties: + type: + type: string + enum: + - image + default: image + source: + type: object + required: + - type + - media_type + - data + properties: + type: + type: string + enum: + - base64 + default: base64 + media_type: + type: string + data: + type: string + description: Anthropic image content part - $ref: '#/components/schemas/Tools.Tool' - $ref: '#/components/schemas/Tools.ChosenFunctionCall' - $ref: '#/components/schemas/Tools.ChosenComputer20241022' @@ -2861,6 +3076,49 @@ components: - image_url description: The type (fixed to 'image_url') default: image_url + - type: object + required: + - tool_use_id + - type + - content + properties: + tool_use_id: + type: string + type: + type: string + enum: + - tool_result + default: tool_result + content: + type: array + items: + type: object + required: + - type + - source + properties: + type: + type: string + enum: + - image + default: image + source: + type: object + required: + - type + - media_type + - data + properties: + type: + type: string + enum: + - base64 + default: base64 + media_type: + type: string + data: + type: string + description: Anthropic image content part - $ref: '#/components/schemas/Tools.Tool' - $ref: '#/components/schemas/Tools.ChosenFunctionCall' - $ref: '#/components/schemas/Tools.ChosenComputer20241022' @@ -4784,6 +5042,49 @@ components: - image_url description: The type (fixed to 'image_url') default: image_url + - type: object + required: + - tool_use_id + - type + - content + properties: + tool_use_id: + type: string + type: + type: string + enum: + - tool_result + default: tool_result + content: + type: array + items: + type: object + required: + - type + - source + properties: + type: + type: string + enum: + - image + default: image + source: + type: object + required: + - type + - media_type + - data + properties: + type: + type: string + enum: + - base64 + default: base64 + media_type: + type: string + data: + $ref: '#/components/schemas/Common.JinjaTemplate' + description: Anthropic image content part nullable: true description: The content parts of the message name: @@ -4934,6 +5235,49 @@ components: - image_url description: The type (fixed to 'image_url') default: image_url + - type: object + required: + - tool_use_id + - type + - content + properties: + tool_use_id: + type: string + type: + type: string + enum: + - tool_result + default: tool_result + content: + type: array + items: + type: object + required: + - type + - source + properties: + type: + type: string + enum: + - image + default: image + source: + type: object + required: + - type + - media_type + - data + properties: + type: + type: string + enum: + - base64 + default: base64 + media_type: + type: string + data: + $ref: '#/components/schemas/Common.JinjaTemplate' + description: Anthropic image content part nullable: true description: The content parts of the message name: