33
44from dotenv import load_dotenv
55from openai import OpenAI
6- from openai .types .chat .chat_completion import ChatCompletion
76from pydantic import BaseModel
87from restack_ai .function import FunctionFailure , function , log
98
@@ -27,7 +26,7 @@ def raise_exception(message: str) -> None:
2726
2827
2928@function .defn ()
30- async def llm_chat (agent_input : LlmChatInput ) -> ChatCompletion :
29+ async def llm_chat (agent_input : LlmChatInput ) -> dict [ str , str ] :
3130 try :
3231 log .info ("llm_chat function started" , agent_input = agent_input )
3332
@@ -44,14 +43,23 @@ async def llm_chat(agent_input: LlmChatInput) -> ChatCompletion:
4443 {"role" : "system" , "content" : agent_input .system_content }
4544 )
4645
47- response = client .chat .completions .create (
46+ assistant_raw_response = client .chat .completions .create (
4847 model = agent_input .model or "gpt-4o-mini" ,
4948 messages = agent_input .messages ,
5049 )
5150 except Exception as e :
5251 log .error ("llm_chat function failed" , error = e )
5352 raise
5453 else :
55- log .info ("llm_chat function completed" , response = response )
54+ log .info (
55+ "llm_chat function completed" , assistant_raw_response = assistant_raw_response
56+ )
57+
58+ assistant_response = {
59+ "role" : assistant_raw_response .choices [0 ].message .role ,
60+ "content" : assistant_raw_response .choices [0 ].message .content ,
61+ }
62+
63+ log .info ("assistant_response" , assistant_response = assistant_response )
5664
57- return response
65+ return assistant_response
0 commit comments