|
1 | 1 | from datetime import timedelta |
2 | 2 |
|
3 | 3 | from pydantic import BaseModel |
4 | | -from restack_ai.agent import agent, import_functions, log |
| 4 | +from restack_ai.agent import NonRetryableError, agent, import_functions, log |
5 | 5 |
|
6 | 6 | from src.workflows.todo_execute import TodoExecute, TodoExecuteParams |
7 | 7 |
|
@@ -47,107 +47,128 @@ async def messages(self, messages_event: MessagesEvent) -> list[Message]: |
47 | 47 | description="Execute a todo, needs to be created first and need confirmation from user before executing.", |
48 | 48 | ), |
49 | 49 | ] |
50 | | - |
51 | | - completion = await agent.step( |
52 | | - function=llm_chat, |
53 | | - function_input=LlmChatInput(messages=self.messages, tools=tools), |
54 | | - start_to_close_timeout=timedelta(seconds=120), |
55 | | - ) |
56 | | - |
57 | | - log.info(f"completion: {completion}") |
58 | | - |
59 | | - tool_calls = completion.choices[0].message.tool_calls |
60 | | - self.messages.append( |
61 | | - Message( |
62 | | - role="assistant", |
63 | | - content=completion.choices[0].message.content or "", |
64 | | - tool_calls=tool_calls, |
| 50 | + try: |
| 51 | + completion = await agent.step( |
| 52 | + function=llm_chat, |
| 53 | + function_input=LlmChatInput(messages=self.messages, tools=tools), |
| 54 | + start_to_close_timeout=timedelta(seconds=120), |
65 | 55 | ) |
66 | | - ) |
67 | | - |
68 | | - log.info(f"tool_calls: {tool_calls}") |
69 | | - |
70 | | - if tool_calls: |
71 | | - for tool_call in tool_calls: |
72 | | - log.info(f"tool_call: {tool_call}") |
73 | | - |
74 | | - name = tool_call.function.name |
75 | | - |
76 | | - match name: |
77 | | - case todo_create.__name__: |
78 | | - args = TodoCreateParams.model_validate_json( |
79 | | - tool_call.function.arguments |
80 | | - ) |
81 | | - |
82 | | - result = await agent.step( |
83 | | - function=todo_create, |
84 | | - function_input=args, |
85 | | - ) |
86 | | - self.messages.append( |
87 | | - Message( |
88 | | - role="tool", |
89 | | - tool_call_id=tool_call.id, |
90 | | - content=str(result), |
91 | | - ) |
92 | | - ) |
93 | | - |
94 | | - completion_with_tool_call = await agent.step( |
95 | | - function=llm_chat, |
96 | | - function_input=LlmChatInput( |
97 | | - messages=self.messages, tools=tools |
98 | | - ), |
99 | | - start_to_close_timeout=timedelta(seconds=120), |
100 | | - ) |
101 | | - self.messages.append( |
102 | | - Message( |
103 | | - role="assistant", |
104 | | - content=completion_with_tool_call.choices[ |
105 | | - 0 |
106 | | - ].message.content |
107 | | - or "", |
108 | | - ) |
109 | | - ) |
110 | | - case TodoExecute.__name__: |
111 | | - args = TodoExecuteParams.model_validate_json( |
112 | | - tool_call.function.arguments |
113 | | - ) |
114 | | - |
115 | | - result = await agent.child_execute( |
116 | | - workflow=TodoExecute, |
117 | | - workflow_id=tool_call.id, |
118 | | - workflow_input=args, |
119 | | - ) |
120 | | - self.messages.append( |
121 | | - Message( |
122 | | - role="tool", |
123 | | - tool_call_id=tool_call.id, |
124 | | - content=str(result), |
125 | | - ) |
126 | | - ) |
127 | | - |
128 | | - completion_with_tool_call = await agent.step( |
129 | | - function=llm_chat, |
130 | | - function_input=LlmChatInput( |
131 | | - messages=self.messages, tools=tools |
132 | | - ), |
133 | | - start_to_close_timeout=timedelta(seconds=120), |
134 | | - ) |
135 | | - self.messages.append( |
136 | | - Message( |
137 | | - role="assistant", |
138 | | - content=completion_with_tool_call.choices[ |
139 | | - 0 |
140 | | - ].message.content |
141 | | - or "", |
142 | | - ) |
143 | | - ) |
| 56 | + except Exception as e: |
| 57 | + error_message = f"Error during llm_chat: {e}" |
| 58 | + raise NonRetryableError(error_message) from e |
144 | 59 | else: |
| 60 | + log.info(f"completion: {completion}") |
| 61 | + tool_calls = completion.choices[0].message.tool_calls |
145 | 62 | self.messages.append( |
146 | 63 | Message( |
147 | 64 | role="assistant", |
148 | 65 | content=completion.choices[0].message.content or "", |
| 66 | + tool_calls=tool_calls, |
149 | 67 | ) |
150 | 68 | ) |
| 69 | + |
| 70 | + log.info(f"tool_calls: {tool_calls}") |
| 71 | + |
| 72 | + if tool_calls: |
| 73 | + for tool_call in tool_calls: |
| 74 | + log.info(f"tool_call: {tool_call}") |
| 75 | + |
| 76 | + name = tool_call.function.name |
| 77 | + |
| 78 | + match name: |
| 79 | + case todo_create.__name__: |
| 80 | + args = TodoCreateParams.model_validate_json( |
| 81 | + tool_call.function.arguments |
| 82 | + ) |
| 83 | + |
| 84 | + try: |
| 85 | + result = await agent.step( |
| 86 | + function=todo_create, |
| 87 | + function_input=args, |
| 88 | + ) |
| 89 | + except Exception as e: |
| 90 | + error_message = f"Error during todo_create: {e}" |
| 91 | + raise NonRetryableError(error_message) from e |
| 92 | + else: |
| 93 | + self.messages.append( |
| 94 | + Message( |
| 95 | + role="tool", |
| 96 | + tool_call_id=tool_call.id, |
| 97 | + content=str(result), |
| 98 | + ) |
| 99 | + ) |
| 100 | + try: |
| 101 | + completion_with_tool_call = await agent.step( |
| 102 | + function=llm_chat, |
| 103 | + function_input=LlmChatInput( |
| 104 | + messages=self.messages, tools=tools |
| 105 | + ), |
| 106 | + start_to_close_timeout=timedelta(seconds=120), |
| 107 | + ) |
| 108 | + except Exception as e: |
| 109 | + error_message = f"Error during llm_chat: {e}" |
| 110 | + raise NonRetryableError(error_message) from e |
| 111 | + else: |
| 112 | + self.messages.append( |
| 113 | + Message( |
| 114 | + role="assistant", |
| 115 | + content=completion_with_tool_call.choices[ |
| 116 | + 0 |
| 117 | + ].message.content |
| 118 | + or "", |
| 119 | + ) |
| 120 | + ) |
| 121 | + case TodoExecute.__name__: |
| 122 | + args = TodoExecuteParams.model_validate_json( |
| 123 | + tool_call.function.arguments |
| 124 | + ) |
| 125 | + |
| 126 | + try: |
| 127 | + result = await agent.child_execute( |
| 128 | + workflow=TodoExecute, |
| 129 | + workflow_id=tool_call.id, |
| 130 | + workflow_input=args, |
| 131 | + ) |
| 132 | + except Exception as e: |
| 133 | + error_message = f"Error during TodoExecute: {e}" |
| 134 | + raise NonRetryableError(error_message) from e |
| 135 | + else: |
| 136 | + self.messages.append( |
| 137 | + Message( |
| 138 | + role="tool", |
| 139 | + tool_call_id=tool_call.id, |
| 140 | + content=str(result), |
| 141 | + ) |
| 142 | + ) |
| 143 | + |
| 144 | + try: |
| 145 | + completion_with_tool_call = await agent.step( |
| 146 | + function=llm_chat, |
| 147 | + function_input=LlmChatInput( |
| 148 | + messages=self.messages, tools=tools |
| 149 | + ), |
| 150 | + start_to_close_timeout=timedelta(seconds=120), |
| 151 | + ) |
| 152 | + except Exception as e: |
| 153 | + error_message = f"Error during llm_chat: {e}" |
| 154 | + raise NonRetryableError(error_message) from e |
| 155 | + else: |
| 156 | + self.messages.append( |
| 157 | + Message( |
| 158 | + role="assistant", |
| 159 | + content=completion_with_tool_call.choices[ |
| 160 | + 0 |
| 161 | + ].message.content |
| 162 | + or "", |
| 163 | + ) |
| 164 | + ) |
| 165 | + else: |
| 166 | + self.messages.append( |
| 167 | + Message( |
| 168 | + role="assistant", |
| 169 | + content=completion.choices[0].message.content or "", |
| 170 | + ) |
| 171 | + ) |
151 | 172 | except Exception as e: |
152 | 173 | log.error(f"Error during message event: {e}") |
153 | 174 | raise |
|
0 commit comments