Skip to content

Commit b8939e7

Browse files
authored
models - unsupported content types (#144)
1 parent 947f6b6 commit b8939e7

File tree

9 files changed

+60
-53
lines changed

9 files changed

+60
-53
lines changed

src/strands/models/anthropic.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
"""
55

66
import base64
7-
import json
87
import logging
98
import mimetypes
109
from typing import Any, Iterable, Optional, TypedDict, cast
@@ -95,6 +94,9 @@ def _format_request_message_content(self, content: ContentBlock) -> dict[str, An
9594
9695
Returns:
9796
Anthropic formatted content block.
97+
98+
Raises:
99+
TypeError: If the content block type cannot be converted to an Anthropic-compatible format.
98100
"""
99101
if "document" in content:
100102
mime_type = mimetypes.types_map.get(f".{content['document']['format']}", "application/octet-stream")
@@ -151,7 +153,7 @@ def _format_request_message_content(self, content: ContentBlock) -> dict[str, An
151153
"type": "tool_result",
152154
}
153155

154-
return {"text": json.dumps(content), "type": "text"}
156+
raise TypeError(f"content_type=<{next(iter(content))}> | unsupported type")
155157

156158
def _format_request_messages(self, messages: Messages) -> list[dict[str, Any]]:
157159
"""Format an Anthropic messages array.
@@ -192,6 +194,10 @@ def format_request(
192194
193195
Returns:
194196
An Anthropic streaming request.
197+
198+
Raises:
199+
TypeError: If a message contains a content block type that cannot be converted to an Anthropic-compatible
200+
format.
195201
"""
196202
return {
197203
"max_tokens": self.config["max_tokens"],

src/strands/models/litellm.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,9 @@ def format_request_message_content(content: ContentBlock) -> dict[str, Any]:
7676
7777
Returns:
7878
LiteLLM formatted content block.
79+
80+
Raises:
81+
TypeError: If the content block type cannot be converted to a LiteLLM-compatible format.
7982
"""
8083
if "reasoningContent" in content:
8184
return {

src/strands/models/llamaapi.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,9 @@ def _format_request_message_content(self, content: ContentBlock) -> dict[str, An
9292
9393
Returns:
9494
LllamaAPI formatted content block.
95+
96+
Raises:
97+
TypeError: If the content block type cannot be converted to a LlamaAPI-compatible format.
9598
"""
9699
if "image" in content:
97100
mime_type = mimetypes.types_map.get(f".{content['image']['format']}", "application/octet-stream")
@@ -107,7 +110,7 @@ def _format_request_message_content(self, content: ContentBlock) -> dict[str, An
107110
if "text" in content:
108111
return {"text": content["text"], "type": "text"}
109112

110-
return {"text": json.dumps(content), "type": "text"}
113+
raise TypeError(f"content_type=<{next(iter(content))}> | unsupported type")
111114

112115
def _format_request_message_tool_call(self, tool_use: ToolUse) -> dict[str, Any]:
113116
"""Format a Llama API tool call.
@@ -196,6 +199,10 @@ def format_request(
196199
197200
Returns:
198201
An Llama API chat streaming request.
202+
203+
Raises:
204+
TypeError: If a message contains a content block type that cannot be converted to a LlamaAPI-compatible
205+
format.
199206
"""
200207
request = {
201208
"messages": self._format_request_messages(messages, system_prompt),

src/strands/models/ollama.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,10 @@ def format_request(
105105
106106
Returns:
107107
An Ollama chat streaming request.
108+
109+
Raises:
110+
TypeError: If a message contains a content block type that cannot be converted to an Ollama-compatible
111+
format.
108112
"""
109113

110114
def format_message(message: Message, content: ContentBlock) -> dict[str, Any]:
@@ -153,7 +157,7 @@ def format_message(message: Message, content: ContentBlock) -> dict[str, Any]:
153157
**({"images": result_images} if result_images else {}),
154158
}
155159

156-
return {"role": message["role"], "content": json.dumps(content)}
160+
raise TypeError(f"content_type=<{next(iter(content))}> | unsupported type")
157161

158162
def format_messages() -> list[dict[str, Any]]:
159163
return [format_message(message, content) for message in messages for content in message["content"]]

src/strands/types/models/openai.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,9 @@ def format_request_message_content(content: ContentBlock) -> dict[str, Any]:
4040
4141
Returns:
4242
OpenAI compatible content block.
43+
44+
Raises:
45+
TypeError: If the content block type cannot be converted to an OpenAI-compatible format.
4346
"""
4447
if "document" in content:
4548
mime_type = mimetypes.types_map.get(f".{content['document']['format']}", "application/octet-stream")
@@ -67,7 +70,7 @@ def format_request_message_content(content: ContentBlock) -> dict[str, Any]:
6770
if "text" in content:
6871
return {"text": content["text"], "type": "text"}
6972

70-
return {"text": json.dumps(content), "type": "text"}
73+
raise TypeError(f"content_type=<{next(iter(content))}> | unsupported type")
7174

7275
@staticmethod
7376
def format_request_message_tool_call(tool_use: ToolUse) -> dict[str, Any]:
@@ -163,6 +166,10 @@ def format_request(
163166
164167
Returns:
165168
An OpenAI compatible chat streaming request.
169+
170+
Raises:
171+
TypeError: If a message contains a content block type that cannot be converted to an OpenAI-compatible
172+
format.
166173
"""
167174
return {
168175
"messages": self.format_request_messages(messages, system_prompt),

tests/strands/models/test_anthropic.py

Lines changed: 4 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import json
21
import unittest.mock
32

43
import anthropic
@@ -339,33 +338,16 @@ def test_format_request_with_tool_results(model, model_id, max_tokens):
339338
assert tru_request == exp_request
340339

341340

342-
def test_format_request_with_other(model, model_id, max_tokens):
341+
def test_format_request_with_unsupported_type(model):
343342
messages = [
344343
{
345344
"role": "user",
346-
"content": [{"other": {"a": 1}}],
345+
"content": [{"unsupported": {}}],
347346
},
348347
]
349348

350-
tru_request = model.format_request(messages)
351-
exp_request = {
352-
"max_tokens": max_tokens,
353-
"messages": [
354-
{
355-
"role": "user",
356-
"content": [
357-
{
358-
"text": json.dumps({"other": {"a": 1}}),
359-
"type": "text",
360-
},
361-
],
362-
},
363-
],
364-
"model": model_id,
365-
"tools": [],
366-
}
367-
368-
assert tru_request == exp_request
349+
with pytest.raises(TypeError, match="content_type=<unsupported> | unsupported type"):
350+
model.format_request(messages)
369351

370352

371353
def test_format_request_with_cache_point(model, model_id, max_tokens):

tests/strands/models/test_llamaapi.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,18 @@ def test_format_request_with_empty_content(model, model_id):
233233
assert tru_request == exp_request
234234

235235

236+
def test_format_request_with_unsupported_type(model):
237+
messages = [
238+
{
239+
"role": "user",
240+
"content": [{"unsupported": {}}],
241+
},
242+
]
243+
244+
with pytest.raises(TypeError, match="content_type=<unsupported> | unsupported type"):
245+
model.format_request(messages)
246+
247+
236248
def test_format_chunk_message_start(model):
237249
event = {"chunk_type": "message_start"}
238250

tests/strands/models/test_ollama.py

Lines changed: 5 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -280,29 +280,16 @@ def test_format_request_with_tool_result_other(model, model_id):
280280
assert tru_request == exp_request
281281

282282

283-
def test_format_request_with_other(model, model_id):
283+
def test_format_request_with_unsupported_type(model):
284284
messages = [
285285
{
286286
"role": "user",
287-
"content": [{"other": {"a": 1}}],
288-
}
287+
"content": [{"unsupported": {}}],
288+
},
289289
]
290290

291-
tru_request = model.format_request(messages)
292-
exp_request = {
293-
"messages": [
294-
{
295-
"role": "user",
296-
"content": json.dumps({"other": {"a": 1}}),
297-
}
298-
],
299-
"model": model_id,
300-
"options": {},
301-
"stream": True,
302-
"tools": [],
303-
}
304-
305-
assert tru_request == exp_request
291+
with pytest.raises(TypeError, match="content_type=<unsupported> | unsupported type"):
292+
model.format_request(messages)
306293

307294

308295
def test_format_request_with_tool_specs(model, messages, model_id):

tests/strands/types/models/test_openai.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -101,21 +101,20 @@ def system_prompt():
101101
{"text": "hello"},
102102
{"type": "text", "text": "hello"},
103103
),
104-
# Other
105-
(
106-
{"other": {"a": 1}},
107-
{
108-
"text": json.dumps({"other": {"a": 1}}),
109-
"type": "text",
110-
},
111-
),
112104
],
113105
)
114106
def test_format_request_message_content(content, exp_result):
115107
tru_result = SAOpenAIModel.format_request_message_content(content)
116108
assert tru_result == exp_result
117109

118110

111+
def test_format_request_message_content_unsupported_type():
112+
content = {"unsupported": {}}
113+
114+
with pytest.raises(TypeError, match="content_type=<unsupported> | unsupported type"):
115+
SAOpenAIModel.format_request_message_content(content)
116+
117+
119118
def test_format_request_message_tool_call():
120119
tool_use = {
121120
"input": {"expression": "2+2"},

0 commit comments

Comments
 (0)