1+ from collections import defaultdict
2+ from typing import Any , Optional
3+
4+ import pytest
5+
6+ from agents .agent import Agent
7+ from agents .items import ItemHelpers , ModelResponse , TResponseInputItem
8+ from agents .lifecycle import RunHooks
9+ from agents .run import Runner
10+ from agents .run_context import RunContextWrapper , TContext
11+ from agents .tool import Tool
12+ from tests .test_agent_llm_hooks import AgentHooksForTests
13+
14+ from .fake_model import FakeModel
15+ from .test_responses import (
16+ get_function_tool ,
17+ get_text_message ,
18+ )
19+
20+ class RunHooksForTests (RunHooks ):
21+ def __init__ (self ):
22+ self .events : dict [str , int ] = defaultdict (int )
23+
24+ def reset (self ):
25+ self .events .clear ()
26+
27+ async def on_agent_start (self , context : RunContextWrapper [TContext ], agent : Agent [TContext ]) -> None :
28+ self .events ["on_agent_start" ] += 1
29+
30+ async def on_agent_end (
31+ self , context : RunContextWrapper [TContext ], agent : Agent [TContext ], output : Any
32+ ) -> None :
33+ self .events ["on_agent_end" ] += 1
34+
35+ async def on_handoff (
36+ self , context : RunContextWrapper [TContext ], from_agent : Agent [TContext ], to_agent : Agent [TContext ]
37+ ) -> None :
38+ self .events ["on_handoff" ] += 1
39+
40+ async def on_tool_start (
41+ self , context : RunContextWrapper [TContext ], agent : Agent [TContext ], tool : Tool
42+ ) -> None :
43+ self .events ["on_tool_start" ] += 1
44+
45+ async def on_tool_end (
46+ self ,
47+ context : RunContextWrapper [TContext ],
48+ agent : Agent [TContext ],
49+ tool : Tool ,
50+ result : str ,
51+ ) -> None :
52+ self .events ["on_tool_end" ] += 1
53+
54+ async def on_llm_start (
55+ self ,
56+ context : RunContextWrapper [TContext ],
57+ agent : Agent [TContext ],
58+ system_prompt : Optional [str ],
59+ input_items : list [TResponseInputItem ],
60+ ) -> None :
61+ self .events ["on_llm_start" ] += 1
62+
63+ async def on_llm_end (
64+ self ,
65+ context : RunContextWrapper [TContext ],
66+ agent : Agent [TContext ],
67+ response : ModelResponse ,
68+ ) -> None :
69+ self .events ["on_llm_end" ] += 1
70+
71+
72+ # Example test using the above hooks
73+ @pytest .mark .asyncio
74+ async def test_async_run_hooks_with_llm ():
75+ hooks = RunHooksForTests ()
76+ model = FakeModel ()
77+
78+ agent = Agent (
79+ name = "A" , model = model , tools = [get_function_tool ("f" , "res" )], handoffs = []
80+ )
81+ # Simulate a single LLM call producing an output:
82+ model .set_next_output ([get_text_message ("hello" )])
83+ await Runner .run (agent , input = "hello" , hooks = hooks )
84+ # Expect one on_agent_start, one on_llm_start, one on_llm_end, and one on_agent_end
85+ assert hooks .events == {"on_agent_start" : 1 , "on_llm_start" : 1 , "on_llm_end" : 1 , "on_agent_end" : 1 }
86+
87+ # test_sync_run_hook_with_llm()
88+ def test_sync_run_hook_with_llm ():
89+ hooks = RunHooksForTests ()
90+ model = FakeModel ()
91+ agent = Agent (
92+ name = "A" , model = model , tools = [get_function_tool ("f" , "res" )], handoffs = []
93+ )
94+ # Simulate a single LLM call producing an output:
95+ model .set_next_output ([get_text_message ("hello" )])
96+ Runner .run_sync (agent , input = "hello" , hooks = hooks )
97+ # Expect one on_agent_start, one on_llm_start, one on_llm_end, and one on_agent_end
98+ assert hooks .events == {"on_agent_start" : 1 , "on_llm_start" : 1 , "on_llm_end" : 1 , "on_agent_end" : 1 }
99+
100+ # test_streamed_run_hooks_with_llm():
101+ @pytest .mark .asyncio
102+ async def test_streamed_run_hooks_with_llm ():
103+ hooks = RunHooksForTests ()
104+ model = FakeModel ()
105+ agent = Agent (
106+ name = "A" , model = model , tools = [get_function_tool ("f" , "res" )], handoffs = []
107+ )
108+ # Simulate a single LLM call producing an output:
109+ model .set_next_output ([get_text_message ("hello" )])
110+ stream = Runner .run_streamed (agent , input = "hello" , hooks = hooks )
111+
112+ async for event in stream .stream_events ():
113+ if event .type == "raw_response_event" :
114+ continue
115+ if event .type == "agent_updated_stream_event" :
116+ print (f"[EVENT] agent_updated → { event .new_agent .name } " )
117+ elif event .type == "run_item_stream_event" :
118+ item = event .item
119+ if item .type == "tool_call_item" :
120+ print ("[EVENT] tool_call_item" )
121+ elif item .type == "tool_call_output_item" :
122+ print (f"[EVENT] tool_call_output_item → { item .output } " )
123+ elif item .type == "message_output_item" :
124+ text = ItemHelpers .text_message_output (item )
125+ print (f"[EVENT] message_output_item → { text } " )
126+
127+ # Expect one on_agent_start, one on_llm_start, one on_llm_end, and one on_agent_end
128+ assert hooks .events == {"on_agent_start" : 1 , "on_llm_start" : 1 , "on_llm_end" : 1 , "on_agent_end" : 1 }
129+
130+ # test_async_run_hooks_with_agent_hooks_with_llm
131+ @pytest .mark .asyncio
132+ async def test_async_run_hooks_with_agent_hooks_with_llm ():
133+ hooks = RunHooksForTests ()
134+ agent_hooks = AgentHooksForTests ()
135+ model = FakeModel ()
136+
137+ agent = Agent (
138+ name = "A" , model = model , tools = [get_function_tool ("f" , "res" )], handoffs = [], hooks = agent_hooks
139+ )
140+ # Simulate a single LLM call producing an output:
141+ model .set_next_output ([get_text_message ("hello" )])
142+ await Runner .run (agent , input = "hello" , hooks = hooks )
143+ # Expect one on_agent_start, one on_llm_start, one on_llm_end, and one on_agent_end
144+ assert hooks .events == {"on_agent_start" : 1 , "on_llm_start" : 1 , "on_llm_end" : 1 , "on_agent_end" : 1 }
145+ # Expect one on_start, one on_llm_start, one on_llm_end, and one on_end
146+ assert agent_hooks .events == {"on_start" : 1 , "on_llm_start" : 1 , "on_llm_end" : 1 , "on_end" : 1 }
0 commit comments