Skip to content

Commit cb7bec5

Browse files
committed
add tests for run hooks
1 parent cdcfa4e commit cb7bec5

File tree

1 file changed

+169
-0
lines changed

1 file changed

+169
-0
lines changed

tests/test_run_hooks.py

Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,169 @@
1+
from collections import defaultdict
2+
from typing import Any, Optional
3+
4+
import pytest
5+
6+
from agents.agent import Agent
7+
from agents.items import ItemHelpers, ModelResponse, TResponseInputItem
8+
from agents.lifecycle import RunHooks
9+
from agents.run import Runner
10+
from agents.run_context import RunContextWrapper, TContext
11+
from agents.tool import Tool
12+
from tests.test_agent_llm_hooks import AgentHooksForTests
13+
14+
from .fake_model import FakeModel
15+
from .test_responses import (
16+
get_function_tool,
17+
get_text_message,
18+
)
19+
20+
21+
class RunHooksForTests(RunHooks):
22+
def __init__(self):
23+
self.events: dict[str, int] = defaultdict(int)
24+
25+
def reset(self):
26+
self.events.clear()
27+
28+
async def on_agent_start(
29+
self, context: RunContextWrapper[TContext], agent: Agent[TContext]
30+
) -> None:
31+
self.events["on_agent_start"] += 1
32+
33+
async def on_agent_end(
34+
self, context: RunContextWrapper[TContext], agent: Agent[TContext], output: Any
35+
) -> None:
36+
self.events["on_agent_end"] += 1
37+
38+
async def on_handoff(
39+
self,
40+
context: RunContextWrapper[TContext],
41+
from_agent: Agent[TContext],
42+
to_agent: Agent[TContext],
43+
) -> None:
44+
self.events["on_handoff"] += 1
45+
46+
async def on_tool_start(
47+
self, context: RunContextWrapper[TContext], agent: Agent[TContext], tool: Tool
48+
) -> None:
49+
self.events["on_tool_start"] += 1
50+
51+
async def on_tool_end(
52+
self,
53+
context: RunContextWrapper[TContext],
54+
agent: Agent[TContext],
55+
tool: Tool,
56+
result: str,
57+
) -> None:
58+
self.events["on_tool_end"] += 1
59+
60+
async def on_llm_start(
61+
self,
62+
context: RunContextWrapper[TContext],
63+
agent: Agent[TContext],
64+
system_prompt: Optional[str],
65+
input_items: list[TResponseInputItem],
66+
) -> None:
67+
self.events["on_llm_start"] += 1
68+
69+
async def on_llm_end(
70+
self,
71+
context: RunContextWrapper[TContext],
72+
agent: Agent[TContext],
73+
response: ModelResponse,
74+
) -> None:
75+
self.events["on_llm_end"] += 1
76+
77+
78+
# Example test using the above hooks
79+
@pytest.mark.asyncio
80+
async def test_async_run_hooks_with_llm():
81+
hooks = RunHooksForTests()
82+
model = FakeModel()
83+
84+
agent = Agent(name="A", model=model, tools=[get_function_tool("f", "res")], handoffs=[])
85+
# Simulate a single LLM call producing an output:
86+
model.set_next_output([get_text_message("hello")])
87+
await Runner.run(agent, input="hello", hooks=hooks)
88+
# Expect one on_agent_start, one on_llm_start, one on_llm_end, and one on_agent_end
89+
assert hooks.events == {
90+
"on_agent_start": 1,
91+
"on_llm_start": 1,
92+
"on_llm_end": 1,
93+
"on_agent_end": 1,
94+
}
95+
96+
97+
# test_sync_run_hook_with_llm()
98+
def test_sync_run_hook_with_llm():
99+
hooks = RunHooksForTests()
100+
model = FakeModel()
101+
agent = Agent(name="A", model=model, tools=[get_function_tool("f", "res")], handoffs=[])
102+
# Simulate a single LLM call producing an output:
103+
model.set_next_output([get_text_message("hello")])
104+
Runner.run_sync(agent, input="hello", hooks=hooks)
105+
# Expect one on_agent_start, one on_llm_start, one on_llm_end, and one on_agent_end
106+
assert hooks.events == {
107+
"on_agent_start": 1,
108+
"on_llm_start": 1,
109+
"on_llm_end": 1,
110+
"on_agent_end": 1,
111+
}
112+
113+
114+
# test_streamed_run_hooks_with_llm():
115+
@pytest.mark.asyncio
116+
async def test_streamed_run_hooks_with_llm():
117+
hooks = RunHooksForTests()
118+
model = FakeModel()
119+
agent = Agent(name="A", model=model, tools=[get_function_tool("f", "res")], handoffs=[])
120+
# Simulate a single LLM call producing an output:
121+
model.set_next_output([get_text_message("hello")])
122+
stream = Runner.run_streamed(agent, input="hello", hooks=hooks)
123+
124+
async for event in stream.stream_events():
125+
if event.type == "raw_response_event":
126+
continue
127+
if event.type == "agent_updated_stream_event":
128+
print(f"[EVENT] agent_updated → {event.new_agent.name}")
129+
elif event.type == "run_item_stream_event":
130+
item = event.item
131+
if item.type == "tool_call_item":
132+
print("[EVENT] tool_call_item")
133+
elif item.type == "tool_call_output_item":
134+
print(f"[EVENT] tool_call_output_item → {item.output}")
135+
elif item.type == "message_output_item":
136+
text = ItemHelpers.text_message_output(item)
137+
print(f"[EVENT] message_output_item → {text}")
138+
139+
# Expect one on_agent_start, one on_llm_start, one on_llm_end, and one on_agent_end
140+
assert hooks.events == {
141+
"on_agent_start": 1,
142+
"on_llm_start": 1,
143+
"on_llm_end": 1,
144+
"on_agent_end": 1,
145+
}
146+
147+
148+
# test_async_run_hooks_with_agent_hooks_with_llm
149+
@pytest.mark.asyncio
150+
async def test_async_run_hooks_with_agent_hooks_with_llm():
151+
hooks = RunHooksForTests()
152+
agent_hooks = AgentHooksForTests()
153+
model = FakeModel()
154+
155+
agent = Agent(
156+
name="A", model=model, tools=[get_function_tool("f", "res")], handoffs=[], hooks=agent_hooks
157+
)
158+
# Simulate a single LLM call producing an output:
159+
model.set_next_output([get_text_message("hello")])
160+
await Runner.run(agent, input="hello", hooks=hooks)
161+
# Expect one on_agent_start, one on_llm_start, one on_llm_end, and one on_agent_end
162+
assert hooks.events == {
163+
"on_agent_start": 1,
164+
"on_llm_start": 1,
165+
"on_llm_end": 1,
166+
"on_agent_end": 1,
167+
}
168+
# Expect one on_start, one on_llm_start, one on_llm_end, and one on_end
169+
assert agent_hooks.events == {"on_start": 1, "on_llm_start": 1, "on_llm_end": 1, "on_end": 1}

0 commit comments

Comments
 (0)