Skip to content

Commit 3ead6ef

Browse files
committed
fix the issue
1 parent 4e87914 commit 3ead6ef

File tree

4 files changed

+204
-7
lines changed

4 files changed

+204
-7
lines changed

src/agents/extensions/models/litellm_model.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,6 +266,8 @@ async def _fetch_response(
266266
"role": "system",
267267
},
268268
)
269+
converted_messages = _to_dump_compatible(converted_messages)
270+
269271
if tracing.include_data():
270272
span.span_data.input = converted_messages
271273

@@ -284,16 +286,18 @@ async def _fetch_response(
284286
for handoff in handoffs:
285287
converted_tools.append(Converter.convert_handoff_tool(handoff))
286288

289+
converted_tools = _to_dump_compatible(converted_tools)
290+
287291
if _debug.DONT_LOG_MODEL_DATA:
288292
logger.debug("Calling LLM")
289293
else:
290294
messages_json = json.dumps(
291-
_to_dump_compatible(converted_messages),
295+
converted_messages,
292296
indent=2,
293297
ensure_ascii=False,
294298
)
295299
tools_json = json.dumps(
296-
_to_dump_compatible(converted_tools),
300+
converted_tools,
297301
indent=2,
298302
ensure_ascii=False,
299303
)

src/agents/models/openai_chatcompletions.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,8 @@ async def _fetch_response(
238238
"role": "system",
239239
},
240240
)
241+
converted_messages = _to_dump_compatible(converted_messages)
242+
241243
if tracing.include_data():
242244
span.span_data.input = converted_messages
243245

@@ -256,16 +258,18 @@ async def _fetch_response(
256258
for handoff in handoffs:
257259
converted_tools.append(Converter.convert_handoff_tool(handoff))
258260

261+
converted_tools = _to_dump_compatible(converted_tools)
262+
259263
if _debug.DONT_LOG_MODEL_DATA:
260264
logger.debug("Calling LLM")
261265
else:
262266
messages_json = json.dumps(
263-
_to_dump_compatible(converted_messages),
267+
converted_messages,
264268
indent=2,
265269
ensure_ascii=False,
266270
)
267271
tools_json = json.dumps(
268-
_to_dump_compatible(converted_tools),
272+
converted_tools,
269273
indent=2,
270274
ensure_ascii=False,
271275
)

src/agents/models/openai_responses.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -241,6 +241,7 @@ async def _fetch_response(
241241
prompt: ResponsePromptParam | None = None,
242242
) -> Response | AsyncStream[ResponseStreamEvent]:
243243
list_input = ItemHelpers.input_to_new_input_list(input)
244+
list_input = _to_dump_compatible(list_input)
244245

245246
parallel_tool_calls = (
246247
True
@@ -252,6 +253,7 @@ async def _fetch_response(
252253

253254
tool_choice = Converter.convert_tool_choice(model_settings.tool_choice)
254255
converted_tools = Converter.convert_tools(tools, handoffs)
256+
converted_tools_payload = _to_dump_compatible(converted_tools.tools)
255257
response_format = Converter.get_response_format(output_schema)
256258

257259
include_set: set[str] = set(converted_tools.includes)
@@ -265,12 +267,12 @@ async def _fetch_response(
265267
logger.debug("Calling LLM")
266268
else:
267269
input_json = json.dumps(
268-
_to_dump_compatible(list_input),
270+
list_input,
269271
indent=2,
270272
ensure_ascii=False,
271273
)
272274
tools_json = json.dumps(
273-
_to_dump_compatible(converted_tools.tools),
275+
converted_tools_payload,
274276
indent=2,
275277
ensure_ascii=False,
276278
)
@@ -301,7 +303,7 @@ async def _fetch_response(
301303
model=self.model,
302304
input=list_input,
303305
include=include,
304-
tools=converted_tools.tools,
306+
tools=converted_tools_payload,
305307
prompt=self._non_null_or_not_given(prompt),
306308
temperature=self._non_null_or_not_given(model_settings.temperature),
307309
top_p=self._non_null_or_not_given(model_settings.top_p),
Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
from __future__ import annotations
2+
3+
from collections.abc import Iterable, Iterator
4+
from typing import Any, cast
5+
6+
import httpx
7+
import pytest
8+
from openai import NOT_GIVEN
9+
from openai.types.chat.chat_completion import ChatCompletion
10+
from openai.types.responses import ToolParam
11+
12+
from agents import (
13+
ModelSettings,
14+
ModelTracing,
15+
OpenAIChatCompletionsModel,
16+
OpenAIResponsesModel,
17+
generation_span,
18+
)
19+
from agents.models import (
20+
openai_chatcompletions as chat_module,
21+
openai_responses as responses_module,
22+
)
23+
24+
25+
class _SingleUseIterable:
26+
"""Helper iterable that raises if iterated more than once."""
27+
28+
def __init__(self, values: list[object]) -> None:
29+
self._values = list(values)
30+
self.iterations = 0
31+
32+
def __iter__(self) -> Iterator[object]:
33+
if self.iterations:
34+
raise RuntimeError("Iterable should have been materialized exactly once.")
35+
self.iterations += 1
36+
yield from self._values
37+
38+
39+
def _force_materialization(value: object) -> None:
40+
if isinstance(value, dict):
41+
for nested in value.values():
42+
_force_materialization(nested)
43+
elif isinstance(value, list):
44+
for nested in value:
45+
_force_materialization(nested)
46+
elif isinstance(value, Iterable) and not isinstance(value, (str, bytes, bytearray)):
47+
list(value)
48+
49+
50+
@pytest.mark.allow_call_model_methods
51+
@pytest.mark.asyncio
52+
async def test_chat_completions_materializes_iterator_payload(
53+
monkeypatch: pytest.MonkeyPatch,
54+
) -> None:
55+
message_iter = _SingleUseIterable([{"type": "text", "text": "hi"}])
56+
tool_iter = _SingleUseIterable([{"type": "string"}])
57+
58+
chat_converter = cast(Any, chat_module).Converter
59+
60+
monkeypatch.setattr(
61+
chat_converter,
62+
"items_to_messages",
63+
classmethod(lambda _cls, _input: [{"role": "user", "content": message_iter}]),
64+
)
65+
monkeypatch.setattr(
66+
chat_converter,
67+
"tool_to_openai",
68+
classmethod(
69+
lambda _cls, _tool: {
70+
"type": "function",
71+
"function": {
72+
"name": "dummy",
73+
"parameters": {"properties": tool_iter},
74+
},
75+
}
76+
),
77+
)
78+
79+
captured_kwargs: dict[str, Any] = {}
80+
81+
class DummyCompletions:
82+
async def create(self, **kwargs):
83+
captured_kwargs.update(kwargs)
84+
_force_materialization(kwargs["messages"])
85+
if kwargs["tools"] is not NOT_GIVEN:
86+
_force_materialization(kwargs["tools"])
87+
return ChatCompletion(
88+
id="dummy-id",
89+
created=0,
90+
model="gpt-4",
91+
object="chat.completion",
92+
choices=[],
93+
usage=None,
94+
)
95+
96+
class DummyClient:
97+
def __init__(self) -> None:
98+
self.chat = type("_Chat", (), {"completions": DummyCompletions()})()
99+
self.base_url = httpx.URL("http://example.test")
100+
101+
model = OpenAIChatCompletionsModel(model="gpt-4", openai_client=DummyClient()) # type: ignore[arg-type]
102+
103+
with generation_span(disabled=True) as span:
104+
await cast(Any, model)._fetch_response(
105+
system_instructions=None,
106+
input="ignored",
107+
model_settings=ModelSettings(),
108+
tools=[object()],
109+
output_schema=None,
110+
handoffs=[],
111+
span=span,
112+
tracing=ModelTracing.DISABLED,
113+
stream=False,
114+
)
115+
116+
assert message_iter.iterations == 1
117+
assert tool_iter.iterations == 1
118+
assert isinstance(captured_kwargs["messages"][0]["content"], list)
119+
assert isinstance(captured_kwargs["tools"][0]["function"]["parameters"]["properties"], list)
120+
121+
122+
@pytest.mark.allow_call_model_methods
123+
@pytest.mark.asyncio
124+
async def test_responses_materializes_iterator_payload(monkeypatch: pytest.MonkeyPatch) -> None:
125+
input_iter = _SingleUseIterable([{"type": "input_text", "text": "hello"}])
126+
tool_iter = _SingleUseIterable([{"type": "string"}])
127+
128+
responses_item_helpers = cast(Any, responses_module).ItemHelpers
129+
responses_converter = cast(Any, responses_module).Converter
130+
131+
monkeypatch.setattr(
132+
responses_item_helpers,
133+
"input_to_new_input_list",
134+
classmethod(lambda _cls, _input: [{"role": "user", "content": input_iter}]),
135+
)
136+
137+
converted_tools = responses_module.ConvertedTools(
138+
tools=cast(
139+
list[ToolParam],
140+
[
141+
{
142+
"type": "function",
143+
"name": "dummy",
144+
"parameters": {"properties": tool_iter},
145+
}
146+
],
147+
),
148+
includes=[],
149+
)
150+
monkeypatch.setattr(
151+
responses_converter,
152+
"convert_tools",
153+
classmethod(lambda _cls, _tools, _handoffs: converted_tools),
154+
)
155+
156+
captured_kwargs: dict[str, Any] = {}
157+
158+
class DummyResponses:
159+
async def create(self, **kwargs):
160+
captured_kwargs.update(kwargs)
161+
_force_materialization(kwargs["input"])
162+
_force_materialization(kwargs["tools"])
163+
return object()
164+
165+
class DummyClient:
166+
def __init__(self) -> None:
167+
self.responses = DummyResponses()
168+
169+
model = OpenAIResponsesModel(model="gpt-4.1", openai_client=DummyClient()) # type: ignore[arg-type]
170+
171+
await cast(Any, model)._fetch_response(
172+
system_instructions=None,
173+
input="ignored",
174+
model_settings=ModelSettings(),
175+
tools=[],
176+
output_schema=None,
177+
handoffs=[],
178+
previous_response_id=None,
179+
conversation_id=None,
180+
stream=False,
181+
prompt=None,
182+
)
183+
184+
assert input_iter.iterations == 1
185+
assert tool_iter.iterations == 1
186+
assert isinstance(captured_kwargs["input"][0]["content"], list)
187+
assert isinstance(captured_kwargs["tools"][0]["parameters"]["properties"], list)

0 commit comments

Comments
 (0)