Skip to content

Commit 92281a6

Browse files
fix: Implement Ollama sequential tool calling fix
Modified conversation format for Ollama to handle tool results properly: - Assistant messages no longer include 'tool_calls' field for Ollama - Tool results are passed as user messages with natural language format - Prevents infinite loop of repeated tool calls This allows Ollama to properly execute sequential tool calls as intended. Fixes #854 Co-authored-by: Mervin Praison <MervinPraison@users.noreply.github.com>
1 parent c971fa8 commit 92281a6

File tree

1 file changed

+58
-20
lines changed
  • src/praisonai-agents/praisonaiagents/llm

1 file changed

+58
-20
lines changed

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 58 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -812,11 +812,20 @@ def get_response(
812812
if tool_calls and execute_tool_fn:
813813
# Convert tool_calls to a serializable format for all providers
814814
serializable_tool_calls = self._serialize_tool_calls(tool_calls)
815-
messages.append({
816-
"role": "assistant",
817-
"content": response_text,
818-
"tool_calls": serializable_tool_calls
819-
})
815+
# Check if this is Ollama provider
816+
if self._is_ollama_provider():
817+
# For Ollama, only include role and content
818+
messages.append({
819+
"role": "assistant",
820+
"content": response_text
821+
})
822+
else:
823+
# For other providers, include tool_calls
824+
messages.append({
825+
"role": "assistant",
826+
"content": response_text,
827+
"tool_calls": serializable_tool_calls
828+
})
820829

821830
should_continue = False
822831
tool_results = [] # Store all tool results
@@ -842,11 +851,21 @@ def get_response(
842851
logging.debug(f"[TOOL_EXEC_DEBUG] About to display tool call with message: {display_message}")
843852
display_tool_call(display_message, console=console)
844853

845-
messages.append({
846-
"role": "tool",
847-
"tool_call_id": tool_call_id,
848-
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
849-
})
854+
# Check if this is Ollama provider
855+
if self._is_ollama_provider():
856+
# For Ollama, use user role and format as natural language
857+
tool_result_content = json.dumps(tool_result) if tool_result is not None else "an empty output"
858+
messages.append({
859+
"role": "user",
860+
"content": f"The {function_name} function returned: {tool_result_content}"
861+
})
862+
else:
863+
# For other providers, use tool role with tool_call_id
864+
messages.append({
865+
"role": "tool",
866+
"tool_call_id": tool_call_id,
867+
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
868+
})
850869

851870
# Check if we should continue (for tools like sequential thinking)
852871
# This mimics the logic from agent.py lines 1004-1007
@@ -1303,11 +1322,20 @@ async def get_response_async(
13031322
if tools and execute_tool_fn and tool_calls:
13041323
# Convert tool_calls to a serializable format for all providers
13051324
serializable_tool_calls = self._serialize_tool_calls(tool_calls)
1306-
messages.append({
1307-
"role": "assistant",
1308-
"content": response_text,
1309-
"tool_calls": serializable_tool_calls
1310-
})
1325+
# Check if it's Ollama provider
1326+
if self._is_ollama_provider():
1327+
# For Ollama, only include role and content
1328+
messages.append({
1329+
"role": "assistant",
1330+
"content": response_text
1331+
})
1332+
else:
1333+
# For other providers, include tool_calls
1334+
messages.append({
1335+
"role": "assistant",
1336+
"content": response_text,
1337+
"tool_calls": serializable_tool_calls
1338+
})
13111339

13121340
tool_results = [] # Store all tool results
13131341
for tool_call in tool_calls:
@@ -1325,11 +1353,21 @@ async def get_response_async(
13251353
else:
13261354
display_message += "Function returned no output"
13271355
display_tool_call(display_message, console=console)
1328-
messages.append({
1329-
"role": "tool",
1330-
"tool_call_id": tool_call_id,
1331-
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
1332-
})
1356+
# Check if it's Ollama provider
1357+
if self._is_ollama_provider():
1358+
# For Ollama, use user role and natural language format
1359+
content = f"The {function_name} function returned: {json.dumps(tool_result) if tool_result is not None else 'an empty output'}"
1360+
messages.append({
1361+
"role": "user",
1362+
"content": content
1363+
})
1364+
else:
1365+
# For other providers, use tool role with tool_call_id
1366+
messages.append({
1367+
"role": "tool",
1368+
"tool_call_id": tool_call_id,
1369+
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
1370+
})
13331371

13341372
# Get response after tool calls
13351373
response_text = ""

0 commit comments

Comments
 (0)