@@ -205,6 +205,58 @@ def __init__(
205205 }
206206 logging .debug (f"LLM instance initialized with: { json .dumps (debug_info , indent = 2 , default = str )} " )
207207
208+ def _is_ollama_provider (self ) -> bool :
209+ """Detect if this is an Ollama provider regardless of naming convention"""
210+ if not self .model :
211+ return False
212+
213+ # Direct ollama/ prefix
214+ if self .model .startswith ("ollama/" ):
215+ return True
216+
217+ # Check environment variables for Ollama base URL
218+ base_url = os .getenv ("OPENAI_BASE_URL" , "" )
219+ api_base = os .getenv ("OPENAI_API_BASE" , "" )
220+
221+ # Common Ollama endpoints
222+ ollama_endpoints = ["localhost:11434" , "127.0.0.1:11434" , ":11434" ]
223+
224+ return any (endpoint in base_url or endpoint in api_base for endpoint in ollama_endpoints )
225+
226+ def _parse_tool_call_arguments (self , tool_call : Dict , is_ollama : bool = False ) -> tuple :
227+ """
228+ Safely parse tool call arguments with proper error handling
229+
230+ Returns:
231+ tuple: (function_name, arguments, tool_call_id)
232+ """
233+ try :
234+ if is_ollama :
235+ # Special handling for Ollama provider which may have different structure
236+ if "function" in tool_call and isinstance (tool_call ["function" ], dict ):
237+ function_name = tool_call ["function" ]["name" ]
238+ arguments = json .loads (tool_call ["function" ]["arguments" ])
239+ else :
240+ # Try alternative format that Ollama might return
241+ function_name = tool_call .get ("name" , "unknown_function" )
242+ arguments_str = tool_call .get ("arguments" , "{}" )
243+ arguments = json .loads (arguments_str ) if arguments_str else {}
244+ tool_call_id = tool_call .get ("id" , f"tool_{ id (tool_call )} " )
245+ else :
246+ # Standard format for other providers with error handling
247+ function_name = tool_call ["function" ]["name" ]
248+ arguments_str = tool_call ["function" ]["arguments" ]
249+ arguments = json .loads (arguments_str ) if arguments_str else {}
250+ tool_call_id = tool_call ["id" ]
251+
252+ except (KeyError , json .JSONDecodeError , TypeError ) as e :
253+ logging .error (f"Error parsing tool call arguments: { e } " )
254+ function_name = tool_call .get ("name" , "unknown_function" )
255+ arguments = {}
256+ tool_call_id = tool_call .get ("id" , f"tool_{ id (tool_call )} " )
257+
258+ return function_name , arguments , tool_call_id
259+
208260 def _needs_system_message_skip (self ) -> bool :
209261 """Check if this model requires skipping system messages"""
210262 if not self .model :
@@ -486,32 +538,19 @@ def get_response(
486538 for tool_call in tool_calls :
487539 # Handle both object and dict access patterns
488540 if isinstance (tool_call , dict ):
489- # Special handling for Ollama provider which may have a different structure
490- if self .model and self .model .startswith ("ollama/" ):
491- try :
492- # Try standard format first
493- if "function" in tool_call and isinstance (tool_call ["function" ], dict ):
494- function_name = tool_call ["function" ]["name" ]
495- arguments = json .loads (tool_call ["function" ]["arguments" ])
496- else :
497- # Try alternative format that Ollama might return
498- function_name = tool_call .get ("name" , "unknown_function" )
499- arguments = json .loads (tool_call .get ("arguments" , "{}" ))
500- tool_call_id = tool_call .get ("id" , f"tool_{ id (tool_call )} " )
501- except Exception as e :
502- logging .error (f"Error processing Ollama tool call: { e } " )
503- function_name = "unknown_function"
504- arguments = {}
505- tool_call_id = f"tool_{ id (tool_call )} "
506- else :
507- # Standard format for other providers
508- function_name = tool_call ["function" ]["name" ]
509- arguments = json .loads (tool_call ["function" ]["arguments" ])
510- tool_call_id = tool_call ["id" ]
541+ is_ollama = self ._is_ollama_provider ()
542+ function_name , arguments , tool_call_id = self ._parse_tool_call_arguments (tool_call , is_ollama )
511543 else :
512- function_name = tool_call .function .name
513- arguments = json .loads (tool_call .function .arguments )
514- tool_call_id = tool_call .id
544+ # Handle object-style tool calls
545+ try :
546+ function_name = tool_call .function .name
547+ arguments = json .loads (tool_call .function .arguments ) if tool_call .function .arguments else {}
548+ tool_call_id = tool_call .id
549+ except (json .JSONDecodeError , AttributeError ) as e :
550+ logging .error (f"Error parsing object-style tool call: { e } " )
551+ function_name = "unknown_function"
552+ arguments = {}
553+ tool_call_id = f"tool_{ id (tool_call )} "
515554
516555 logging .debug (f"[TOOL_EXEC_DEBUG] About to execute tool { function_name } with args: { arguments } " )
517556 tool_result = execute_tool_fn (function_name , arguments )
@@ -1083,32 +1122,19 @@ async def get_response_async(
10831122 for tool_call in tool_calls :
10841123 # Handle both object and dict access patterns
10851124 if isinstance (tool_call , dict ):
1086- # Special handling for Ollama provider which may have a different structure
1087- if self .model and self .model .startswith ("ollama/" ):
1088- try :
1089- # Try standard format first
1090- if "function" in tool_call and isinstance (tool_call ["function" ], dict ):
1091- function_name = tool_call ["function" ]["name" ]
1092- arguments = json .loads (tool_call ["function" ]["arguments" ])
1093- else :
1094- # Try alternative format that Ollama might return
1095- function_name = tool_call .get ("name" , "unknown_function" )
1096- arguments = json .loads (tool_call .get ("arguments" , "{}" ))
1097- tool_call_id = tool_call .get ("id" , f"tool_{ id (tool_call )} " )
1098- except Exception as e :
1099- logging .error (f"Error processing Ollama tool call: { e } " )
1100- function_name = "unknown_function"
1101- arguments = {}
1102- tool_call_id = f"tool_{ id (tool_call )} "
1103- else :
1104- # Standard format for other providers
1105- function_name = tool_call ["function" ]["name" ]
1106- arguments = json .loads (tool_call ["function" ]["arguments" ])
1107- tool_call_id = tool_call ["id" ]
1125+ is_ollama = self ._is_ollama_provider ()
1126+ function_name , arguments , tool_call_id = self ._parse_tool_call_arguments (tool_call , is_ollama )
11081127 else :
1109- function_name = tool_call .function .name
1110- arguments = json .loads (tool_call .function .arguments )
1111- tool_call_id = tool_call .id
1128+ # Handle object-style tool calls
1129+ try :
1130+ function_name = tool_call .function .name
1131+ arguments = json .loads (tool_call .function .arguments ) if tool_call .function .arguments else {}
1132+ tool_call_id = tool_call .id
1133+ except (json .JSONDecodeError , AttributeError ) as e :
1134+ logging .error (f"Error parsing object-style tool call: { e } " )
1135+ function_name = "unknown_function"
1136+ arguments = {}
1137+ tool_call_id = f"tool_{ id (tool_call )} "
11121138
11131139 tool_result = await execute_tool_fn (function_name , arguments )
11141140
@@ -1129,7 +1155,7 @@ async def get_response_async(
11291155 response_text = ""
11301156
11311157 # Special handling for Ollama models that don't automatically process tool results
1132- if self .model and self . model . startswith ( "ollama/" ) and tool_result :
1158+ if self ._is_ollama_provider ( ) and tool_result :
11331159 # For Ollama models, we need to explicitly ask the model to process the tool results
11341160 # First, check if the response is just a JSON tool call
11351161 try :
0 commit comments