Skip to content

Commit 78e0547

Browse files
Merge pull request #557 from MervinPraison/claude/issue-471-20250530_151237
fix(ollama): resolve tool-call issues with local Ollama models
2 parents 745ad50 + cc4e783 commit 78e0547

File tree

3 files changed

+214
-52
lines changed

3 files changed

+214
-52
lines changed

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -864,7 +864,11 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
864864
if self._using_custom_llm:
865865
try:
866866
# Special handling for MCP tools when using provider/model format
867-
tool_param = self.tools if tools is None else tools
867+
# Fix: Handle empty tools list properly - use self.tools if tools is None or empty
868+
if tools is None or (isinstance(tools, list) and len(tools) == 0):
869+
tool_param = self.tools
870+
else:
871+
tool_param = tools
868872

869873
# Convert MCP tool objects to OpenAI format if needed
870874
if tool_param is not None:

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 77 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,58 @@ def __init__(
205205
}
206206
logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
207207

208+
def _is_ollama_provider(self) -> bool:
209+
"""Detect if this is an Ollama provider regardless of naming convention"""
210+
if not self.model:
211+
return False
212+
213+
# Direct ollama/ prefix
214+
if self.model.startswith("ollama/"):
215+
return True
216+
217+
# Check environment variables for Ollama base URL
218+
base_url = os.getenv("OPENAI_BASE_URL", "")
219+
api_base = os.getenv("OPENAI_API_BASE", "")
220+
221+
# Common Ollama endpoints
222+
ollama_endpoints = ["localhost:11434", "127.0.0.1:11434", ":11434"]
223+
224+
return any(endpoint in base_url or endpoint in api_base for endpoint in ollama_endpoints)
225+
226+
def _parse_tool_call_arguments(self, tool_call: Dict, is_ollama: bool = False) -> tuple:
227+
"""
228+
Safely parse tool call arguments with proper error handling
229+
230+
Returns:
231+
tuple: (function_name, arguments, tool_call_id)
232+
"""
233+
try:
234+
if is_ollama:
235+
# Special handling for Ollama provider which may have different structure
236+
if "function" in tool_call and isinstance(tool_call["function"], dict):
237+
function_name = tool_call["function"]["name"]
238+
arguments = json.loads(tool_call["function"]["arguments"])
239+
else:
240+
# Try alternative format that Ollama might return
241+
function_name = tool_call.get("name", "unknown_function")
242+
arguments_str = tool_call.get("arguments", "{}")
243+
arguments = json.loads(arguments_str) if arguments_str else {}
244+
tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
245+
else:
246+
# Standard format for other providers with error handling
247+
function_name = tool_call["function"]["name"]
248+
arguments_str = tool_call["function"]["arguments"]
249+
arguments = json.loads(arguments_str) if arguments_str else {}
250+
tool_call_id = tool_call["id"]
251+
252+
except (KeyError, json.JSONDecodeError, TypeError) as e:
253+
logging.error(f"Error parsing tool call arguments: {e}")
254+
function_name = tool_call.get("name", "unknown_function")
255+
arguments = {}
256+
tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
257+
258+
return function_name, arguments, tool_call_id
259+
208260
def _needs_system_message_skip(self) -> bool:
209261
"""Check if this model requires skipping system messages"""
210262
if not self.model:
@@ -486,32 +538,19 @@ def get_response(
486538
for tool_call in tool_calls:
487539
# Handle both object and dict access patterns
488540
if isinstance(tool_call, dict):
489-
# Special handling for Ollama provider which may have a different structure
490-
if self.model and self.model.startswith("ollama/"):
491-
try:
492-
# Try standard format first
493-
if "function" in tool_call and isinstance(tool_call["function"], dict):
494-
function_name = tool_call["function"]["name"]
495-
arguments = json.loads(tool_call["function"]["arguments"])
496-
else:
497-
# Try alternative format that Ollama might return
498-
function_name = tool_call.get("name", "unknown_function")
499-
arguments = json.loads(tool_call.get("arguments", "{}"))
500-
tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
501-
except Exception as e:
502-
logging.error(f"Error processing Ollama tool call: {e}")
503-
function_name = "unknown_function"
504-
arguments = {}
505-
tool_call_id = f"tool_{id(tool_call)}"
506-
else:
507-
# Standard format for other providers
508-
function_name = tool_call["function"]["name"]
509-
arguments = json.loads(tool_call["function"]["arguments"])
510-
tool_call_id = tool_call["id"]
541+
is_ollama = self._is_ollama_provider()
542+
function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
511543
else:
512-
function_name = tool_call.function.name
513-
arguments = json.loads(tool_call.function.arguments)
514-
tool_call_id = tool_call.id
544+
# Handle object-style tool calls
545+
try:
546+
function_name = tool_call.function.name
547+
arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
548+
tool_call_id = tool_call.id
549+
except (json.JSONDecodeError, AttributeError) as e:
550+
logging.error(f"Error parsing object-style tool call: {e}")
551+
function_name = "unknown_function"
552+
arguments = {}
553+
tool_call_id = f"tool_{id(tool_call)}"
515554

516555
logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
517556
tool_result = execute_tool_fn(function_name, arguments)
@@ -1083,32 +1122,19 @@ async def get_response_async(
10831122
for tool_call in tool_calls:
10841123
# Handle both object and dict access patterns
10851124
if isinstance(tool_call, dict):
1086-
# Special handling for Ollama provider which may have a different structure
1087-
if self.model and self.model.startswith("ollama/"):
1088-
try:
1089-
# Try standard format first
1090-
if "function" in tool_call and isinstance(tool_call["function"], dict):
1091-
function_name = tool_call["function"]["name"]
1092-
arguments = json.loads(tool_call["function"]["arguments"])
1093-
else:
1094-
# Try alternative format that Ollama might return
1095-
function_name = tool_call.get("name", "unknown_function")
1096-
arguments = json.loads(tool_call.get("arguments", "{}"))
1097-
tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
1098-
except Exception as e:
1099-
logging.error(f"Error processing Ollama tool call: {e}")
1100-
function_name = "unknown_function"
1101-
arguments = {}
1102-
tool_call_id = f"tool_{id(tool_call)}"
1103-
else:
1104-
# Standard format for other providers
1105-
function_name = tool_call["function"]["name"]
1106-
arguments = json.loads(tool_call["function"]["arguments"])
1107-
tool_call_id = tool_call["id"]
1125+
is_ollama = self._is_ollama_provider()
1126+
function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
11081127
else:
1109-
function_name = tool_call.function.name
1110-
arguments = json.loads(tool_call.function.arguments)
1111-
tool_call_id = tool_call.id
1128+
# Handle object-style tool calls
1129+
try:
1130+
function_name = tool_call.function.name
1131+
arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
1132+
tool_call_id = tool_call.id
1133+
except (json.JSONDecodeError, AttributeError) as e:
1134+
logging.error(f"Error parsing object-style tool call: {e}")
1135+
function_name = "unknown_function"
1136+
arguments = {}
1137+
tool_call_id = f"tool_{id(tool_call)}"
11121138

11131139
tool_result = await execute_tool_fn(function_name, arguments)
11141140

@@ -1129,7 +1155,7 @@ async def get_response_async(
11291155
response_text = ""
11301156

11311157
# Special handling for Ollama models that don't automatically process tool results
1132-
if self.model and self.model.startswith("ollama/") and tool_result:
1158+
if self._is_ollama_provider() and tool_result:
11331159
# For Ollama models, we need to explicitly ask the model to process the tool results
11341160
# First, check if the response is just a JSON tool call
11351161
try:
Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Test script to verify Ollama tool-call fixes
4+
"""
5+
import sys
6+
import os
7+
8+
# Add the source directory to Python path
9+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'praisonai-agents'))
10+
11+
from praisonaiagents.llm.llm import LLM
12+
from praisonaiagents.agent.agent import Agent
13+
14+
def test_ollama_provider_detection():
15+
"""Test the new Ollama provider detection logic"""
16+
print("Testing Ollama provider detection...")
17+
18+
# Test 1: Direct ollama/ prefix
19+
llm1 = LLM(model="ollama/qwen3:32b")
20+
assert llm1._is_ollama_provider() == True, "Should detect ollama/ prefix"
21+
print("✓ Direct ollama/ prefix detection works")
22+
23+
# Test 2: Environment variable detection
24+
os.environ["OPENAI_BASE_URL"] = "http://localhost:11434/v1"
25+
llm2 = LLM(model="qwen3:32b")
26+
assert llm2._is_ollama_provider() == True, "Should detect via env var"
27+
print("✓ Environment variable detection works")
28+
29+
# Test 3: Non-Ollama model
30+
os.environ.pop("OPENAI_BASE_URL", None)
31+
llm3 = LLM(model="gpt-4o-mini")
32+
assert llm3._is_ollama_provider() == False, "Should not detect non-Ollama"
33+
print("✓ Non-Ollama detection works")
34+
35+
def test_tool_call_parsing():
36+
"""Test the new tool call parsing logic"""
37+
print("\nTesting tool call parsing...")
38+
39+
llm = LLM(model="ollama/qwen3:32b")
40+
41+
# Test 1: Standard format
42+
tool_call_std = {
43+
"id": "call_123",
44+
"function": {
45+
"name": "hello_world",
46+
"arguments": '{"name": "test"}'
47+
}
48+
}
49+
name, args, id = llm._parse_tool_call_arguments(tool_call_std, is_ollama=True)
50+
assert name == "hello_world", f"Expected 'hello_world', got '{name}'"
51+
assert args == {"name": "test"}, f"Expected {{'name': 'test'}}, got {args}"
52+
print("✓ Standard format parsing works")
53+
54+
# Test 2: Ollama alternative format
55+
tool_call_alt = {
56+
"name": "hello_world",
57+
"arguments": '{"name": "test"}'
58+
}
59+
name, args, id = llm._parse_tool_call_arguments(tool_call_alt, is_ollama=True)
60+
assert name == "hello_world", f"Expected 'hello_world', got '{name}'"
61+
assert args == {"name": "test"}, f"Expected {{'name': 'test'}}, got {args}"
62+
print("✓ Alternative format parsing works")
63+
64+
# Test 3: Error handling - malformed JSON
65+
tool_call_bad = {
66+
"function": {
67+
"name": "hello_world",
68+
"arguments": 'invalid json'
69+
}
70+
}
71+
name, args, id = llm._parse_tool_call_arguments(tool_call_bad, is_ollama=False)
72+
assert name == "hello_world", f"Expected 'hello_world', got '{name}'"
73+
assert args == {}, f"Expected empty dict, got {args}"
74+
print("✓ Error handling works")
75+
76+
def test_agent_tool_parameter_logic():
77+
"""Test the fixed tool parameter logic in Agent"""
78+
print("\nTesting agent tool parameter logic...")
79+
80+
def dummy_tool():
81+
"""Dummy tool for testing"""
82+
return "test"
83+
84+
# Test 1: None tools should use agent tools
85+
agent = Agent(name="test", tools=[dummy_tool])
86+
# Simulate the fixed logic
87+
tools = None
88+
if tools is None or (isinstance(tools, list) and len(tools) == 0):
89+
tool_param = agent.tools
90+
else:
91+
tool_param = tools
92+
93+
assert tool_param == [dummy_tool], "Should use agent tools when tools=None"
94+
print("✓ None tools handling works")
95+
96+
# Test 2: Empty list should use agent tools
97+
tools = []
98+
if tools is None or (isinstance(tools, list) and len(tools) == 0):
99+
tool_param = agent.tools
100+
else:
101+
tool_param = tools
102+
103+
assert tool_param == [dummy_tool], "Should use agent tools when tools=[]"
104+
print("✓ Empty list handling works")
105+
106+
# Test 3: Non-empty list should use provided tools
107+
custom_tools = [lambda: "custom"]
108+
tools = custom_tools
109+
if tools is None or (isinstance(tools, list) and len(tools) == 0):
110+
tool_param = agent.tools
111+
else:
112+
tool_param = tools
113+
114+
assert tool_param == custom_tools, "Should use provided tools when not empty"
115+
print("✓ Non-empty list handling works")
116+
117+
if __name__ == "__main__":
118+
print("Running Ollama tool-call fix tests...\n")
119+
120+
try:
121+
test_ollama_provider_detection()
122+
test_tool_call_parsing()
123+
test_agent_tool_parameter_logic()
124+
125+
print("\n🎉 All tests passed! The Ollama tool-call fixes are working correctly.")
126+
sys.exit(0)
127+
128+
except Exception as e:
129+
print(f"\n❌ Test failed: {e}")
130+
import traceback
131+
traceback.print_exc()
132+
sys.exit(1)

0 commit comments

Comments
 (0)