Skip to content

Commit cb5130c

Browse files
authored
Merge pull request #46 from BrainDriveAI/bugfix/providers
PR: Harden Ollama Provider Config Parsing, Add Robust Logging & Diagnostics
2 parents 5f68e85 + 2bdfe96 commit cb5130c

File tree

9 files changed

+1415
-60
lines changed

9 files changed

+1415
-60
lines changed

backend/app/ai_providers/ollama.py

Lines changed: 57 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,23 @@ def provider_name(self) -> str:
1414
return "ollama"
1515

1616
async def initialize(self, config: Dict[str, Any]) -> bool:
17+
import logging
18+
logger = logging.getLogger(__name__)
19+
20+
# Debug logging for server URL resolution
21+
logger.info(f"🔧 Ollama provider initializing with config: {config}")
22+
1723
self.server_url = config.get("server_url", "http://localhost:11434")
1824
self.api_key = config.get("api_key", "")
1925
self.server_name = config.get("server_name", "Default Ollama Server")
26+
27+
# Log what URL we're actually using
28+
if self.server_url == "http://localhost:11434" and "server_url" not in config:
29+
logger.warning(f"⚠️ Ollama provider defaulting to localhost! Config was: {config}")
30+
else:
31+
logger.info(f"✅ Ollama provider using server_url: {self.server_url}")
32+
33+
logger.info(f"🎯 Ollama provider initialized - server_name: {self.server_name}, server_url: {self.server_url}")
2034
return True
2135

2236
async def get_models(self) -> List[Dict[str, Any]]:
@@ -41,13 +55,12 @@ async def generate_stream(self, prompt: str, model: str, params: Dict[str, Any])
4155
yield chunk
4256

4357
async def chat_completion(self, messages: List[Dict[str, Any]], model: str, params: Dict[str, Any]) -> Dict[str, Any]:
44-
print(f"Ollama chat_completion received {len(messages)} messages")
45-
print(f"DETAILED MESSAGE INSPECTION:")
46-
for i, msg in enumerate(messages):
47-
print(f" Message {i+1}: role={msg.get('role', 'unknown')}, content={msg.get('content', '')}")
48-
58+
print(f"🤖 OLLAMA CHAT_COMPLETION CALLED")
59+
print(f"📊 Server URL: {self.server_url}")
60+
print(f"📊 Server Name: {self.server_name}")
61+
print(f"📊 Model: {model}")
62+
print(f"📊 Messages: {len(messages)} messages")
4963
prompt = self._format_chat_messages(messages)
50-
print(f"Formatted prompt (full):\n{prompt}")
5164

5265
result = await self._call_ollama_api(prompt, model, params, is_streaming=False)
5366
if "error" not in result:
@@ -61,13 +74,7 @@ async def chat_completion(self, messages: List[Dict[str, Any]], model: str, para
6174
return result
6275

6376
async def chat_completion_stream(self, messages: List[Dict[str, Any]], model: str, params: Dict[str, Any]) -> AsyncGenerator[Dict[str, Any], None]:
64-
print(f"Ollama chat_completion_stream received {len(messages)} messages")
65-
print(f"DETAILED MESSAGE INSPECTION (STREAMING):")
66-
for i, msg in enumerate(messages):
67-
print(f" Message {i+1}: role={msg.get('role', 'unknown')}, content={msg.get('content', '')}")
68-
6977
prompt = self._format_chat_messages(messages)
70-
print(f"Formatted prompt (full):\n{prompt}")
7178

7279
async for chunk in self._stream_ollama_api(prompt, model, params):
7380
if "error" not in chunk:
@@ -81,16 +88,23 @@ async def chat_completion_stream(self, messages: List[Dict[str, Any]], model: st
8188
yield chunk
8289

8390
async def _call_ollama_api(self, prompt: str, model: str, params: Dict[str, Any], is_streaming: bool = False) -> Dict[str, Any]:
91+
import logging
92+
logger = logging.getLogger(__name__)
93+
8494
payload_params = params.copy() if params else {}
8595
payload_params["stream"] = False
8696
payload = {"model": model, "prompt": prompt, **payload_params}
8797
headers = {'Content-Type': 'application/json'}
8898
if self.api_key:
8999
headers['Authorization'] = f'Bearer {self.api_key}'
90100

101+
# Log the actual URL being called
102+
api_url = f"{self.server_url}/api/generate"
103+
logger.debug(f"Making Ollama API call to: {api_url}")
104+
91105
try:
92106
async with httpx.AsyncClient(timeout=60.0) as client:
93-
response = await client.post(f"{self.server_url}/api/generate", json=payload, headers=headers)
107+
response = await client.post(api_url, json=payload, headers=headers)
94108
response.raise_for_status()
95109
result = response.json()
96110
return {
@@ -100,6 +114,36 @@ async def _call_ollama_api(self, prompt: str, model: str, params: Dict[str, Any]
100114
"metadata": result,
101115
"finish_reason": result.get("done") and "stop" or None
102116
}
117+
except httpx.ConnectError as e:
118+
logger.error(f"❌ Cannot connect to Ollama server at {api_url}")
119+
return {
120+
"error": f"Cannot connect to Ollama server at {self.server_url}. "
121+
f"Please check if the server is running and accessible.",
122+
"provider": "ollama",
123+
"model": model,
124+
"server_name": self.server_name,
125+
"server_url": self.server_url
126+
}
127+
except httpx.HTTPStatusError as e:
128+
if e.response.status_code == 404:
129+
logger.error(f"❌ Model '{model}' not found on server {self.server_name}")
130+
return {
131+
"error": f"Model '{model}' not found on Ollama server '{self.server_name}'. "
132+
f"Please check if the model is installed or use a different model.",
133+
"provider": "ollama",
134+
"model": model,
135+
"server_name": self.server_name,
136+
"server_url": self.server_url
137+
}
138+
else:
139+
logger.error(f"❌ HTTP error {e.response.status_code} from server {self.server_name}")
140+
return {
141+
"error": f"HTTP {e.response.status_code} error from Ollama server '{self.server_name}': {e.response.text}",
142+
"provider": "ollama",
143+
"model": model,
144+
"server_name": self.server_name,
145+
"server_url": self.server_url
146+
}
103147
except Exception as e:
104148
return self._format_error(e, model)
105149

backend/app/ai_providers/registry.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@ def register_provider(self, name: str, provider_class: Type[AIProvider]) -> None
3535

3636
async def get_provider(self, name: str, instance_id: str, config: Dict[str, Any]) -> AIProvider:
3737
"""Get or create a provider instance."""
38+
import logging
39+
logger = logging.getLogger(__name__)
40+
logger.debug(f"Provider registry: get_provider called - {name}, instance: {instance_id}")
41+
3842
if name not in self._providers:
3943
raise ValueError(f"Provider '{name}' not registered")
4044

@@ -43,9 +47,11 @@ async def get_provider(self, name: str, instance_id: str, config: Dict[str, Any]
4347

4448
# Check if instance exists
4549
if instance_key in self._instances[name]:
50+
logger.debug(f"Using existing provider instance: {instance_key}")
4651
return self._instances[name][instance_key]
4752

4853
# Create new instance
54+
logger.debug(f"Creating new provider instance: {instance_key}")
4955
provider = self._providers[name]()
5056
await provider.initialize(config)
5157
self._instances[name][instance_key] = provider

backend/app/api/v1/endpoints/ai_providers.py

Lines changed: 94 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from app.models.user import User
1919
from app.ai_providers.registry import provider_registry
2020
from app.ai_providers.ollama import OllamaProvider
21+
from app.utils.json_parsing import safe_encrypted_json_parse, validate_ollama_settings_format, create_default_ollama_settings
2122
from app.schemas.ai_providers import (
2223
TextGenerationRequest,
2324
ChatCompletionRequest,
@@ -41,6 +42,12 @@ async def get_provider_instance_from_request(request, db):
4142
user_id = user_id.replace("-", "")
4243

4344
logger = logging.getLogger(__name__)
45+
print(f"🚀 PROVIDER REQUEST RECEIVED")
46+
print(f"📊 Provider: {request.provider}")
47+
print(f"📊 Settings ID: {request.settings_id}")
48+
print(f"📊 Server ID: {request.server_id}")
49+
print(f"📊 Model: {getattr(request, 'model', 'N/A')}")
50+
print(f"📊 User ID: {user_id}")
4451
logger.info(f"Getting provider instance for: settings_id={request.settings_id}, user_id={user_id}")
4552
logger.info(f"Original user_id from request: {request.user_id}")
4653

@@ -94,43 +101,63 @@ async def get_provider_instance_from_request(request, db):
94101

95102
# Use the first setting found
96103
setting = settings[0]
97-
logger.info(f"Using setting with ID: {setting['id'] if isinstance(setting, dict) else setting.id}")
104+
logger.debug(f"Using setting with ID: {setting['id'] if isinstance(setting, dict) else setting.id}")
98105

99-
# Extract configuration from settings value
100-
# Parse the JSON string if value is a string
106+
# Extract configuration from settings value using robust parsing
101107
setting_value = setting['value'] if isinstance(setting, dict) else setting.value
108+
setting_id = setting['id'] if isinstance(setting, dict) else setting.id
102109

103-
if isinstance(setting_value, str):
104-
try:
105-
# First parse
106-
logger.info("Parsing settings value as JSON string")
107-
value_dict = json.loads(setting_value)
108-
logger.debug(f"Parsed JSON string into: {value_dict}")
109-
110-
# Check if the result is still a string (double-encoded JSON)
111-
if isinstance(value_dict, str):
112-
logger.info("Value is double-encoded JSON, parsing again")
113-
value_dict = json.loads(value_dict)
114-
logger.debug(f"Parsed double-encoded JSON into: {value_dict}")
115-
116-
# Now value_dict should be a dictionary
117-
if not isinstance(value_dict, dict):
118-
logger.error(f"Error: value_dict is not a dictionary: {type(value_dict)}")
110+
# Use our robust JSON parsing utility that handles encryption issues
111+
try:
112+
value_dict = safe_encrypted_json_parse(
113+
setting_value,
114+
context=f"settings_id={request.settings_id}, user_id={user_id}",
115+
setting_id=setting_id,
116+
definition_id=request.settings_id
117+
)
118+
119+
# Ensure we have a dictionary
120+
if not isinstance(value_dict, dict):
121+
logger.error(f"Parsed value is not a dictionary: {type(value_dict)}")
122+
# For Ollama settings, provide a default structure
123+
if 'ollama' in request.settings_id.lower():
124+
logger.warning("Creating default Ollama settings structure")
125+
value_dict = create_default_ollama_settings()
126+
else:
119127
raise HTTPException(
120128
status_code=500,
121-
detail=f"Error parsing settings value: expected dict, got {type(value_dict)}. "
122-
f"Please check the format of your settings."
129+
detail=f"Settings value must be a dictionary, got {type(value_dict)}. "
130+
f"Setting ID: {setting_id}"
123131
)
124-
except json.JSONDecodeError as e:
125-
logger.error(f"Error parsing JSON string: {e}")
132+
133+
logger.debug(f"Successfully parsed settings value for {request.settings_id}")
134+
135+
except ValueError as e:
136+
logger.error(f"Failed to parse encrypted settings: {e}")
137+
# For Ollama settings, provide helpful error message and fallback
138+
if 'ollama' in request.settings_id.lower():
139+
logger.info("Ollama settings parsing failed, using fallback configuration")
140+
value_dict = create_default_ollama_settings()
141+
else:
126142
raise HTTPException(
127143
status_code=500,
128-
detail=f"Error parsing settings value: {str(e)}. "
129-
f"Please ensure the settings value is valid JSON."
144+
detail=str(e)
130145
)
131-
else:
132-
logger.info("Settings value is already a dictionary")
133-
value_dict = setting_value
146+
except Exception as e:
147+
logger.error(f"Unexpected error parsing settings: {e}")
148+
raise HTTPException(
149+
status_code=500,
150+
detail=f"Unexpected error parsing settings: {str(e)}. Setting ID: {setting_id}"
151+
)
152+
153+
# Add specific validation for Ollama settings
154+
if 'ollama' in request.settings_id.lower():
155+
logger.info("Validating Ollama settings format")
156+
if not validate_ollama_settings_format(value_dict):
157+
logger.warning("Ollama settings format validation failed, using default structure")
158+
value_dict = create_default_ollama_settings()
159+
else:
160+
logger.info("Ollama settings format validation passed")
134161

135162
# Handle different provider configurations
136163
if request.provider == "openai":
@@ -207,32 +234,45 @@ async def get_provider_instance_from_request(request, db):
207234
logger.info(f"Created Groq config with API key")
208235
else:
209236
# Other providers (like Ollama) use servers array
210-
logger.info("Processing server-based provider configuration")
237+
logger.debug("Processing server-based provider configuration")
211238
servers = value_dict.get("servers", [])
212-
logger.info(f"Found {len(servers)} servers in settings")
239+
logger.debug(f"Found {len(servers)} servers in settings")
240+
241+
logger.debug("Processing server-based provider configuration")
242+
servers = value_dict.get("servers", [])
243+
logger.debug(f"Found {len(servers)} servers in settings")
213244

214245
# Find the specific server by ID
215-
logger.info(f"Looking for server with ID: {request.server_id}")
246+
logger.debug(f"Looking for server with ID: '{request.server_id}'")
216247
server = next((s for s in servers if s.get("id") == request.server_id), None)
217-
if not server and servers:
218-
# If the requested server ID is not found but there are servers available,
219-
# use the first server as a fallback
220-
logger.warning(f"Server with ID {request.server_id} not found, using first available server as fallback")
221-
server = servers[0]
222-
logger.info(f"Using fallback server: {server.get('serverName')} ({server.get('id')})")
223248

224249
if not server:
225-
logger.error(f"No server found with ID: {request.server_id} and no fallback available")
226-
raise HTTPException(
227-
status_code=404,
228-
detail=f"Server not found with ID: {request.server_id}. "
229-
f"Please check your server configuration or use a different server ID."
230-
)
250+
# Provide detailed error message about available servers
251+
if servers:
252+
available_servers = [f"{s.get('serverName', 'Unknown')} (ID: {s.get('id', 'Unknown')})" for s in servers]
253+
available_list = ", ".join(available_servers)
254+
logger.error(f"❌ Server with ID '{request.server_id}' not found")
255+
logger.error(f"📋 Available servers: {available_list}")
256+
raise HTTPException(
257+
status_code=404,
258+
detail=f"Ollama server '{request.server_id}' not found. "
259+
f"Available servers: {available_list}. "
260+
f"Please select a valid server from your Ollama settings."
261+
)
262+
else:
263+
logger.error(f"❌ No Ollama servers configured")
264+
raise HTTPException(
265+
status_code=404,
266+
detail="No Ollama servers are configured. "
267+
"Please add at least one Ollama server in your settings before using this provider."
268+
)
231269

232-
logger.info(f"Found server: {server.get('serverName')}")
270+
logger.debug(f"Found server: {server.get('serverName')} (ID: {server.get('id')})")
233271

234272
# Create provider configuration from server details
235273
server_url = server.get("serverAddress")
274+
logger.debug(f"Server URL from settings: '{server_url}'")
275+
236276
if not server_url:
237277
logger.error(f"Server URL is missing for server: {server.get('id')}")
238278
raise HTTPException(
@@ -246,11 +286,11 @@ async def get_provider_instance_from_request(request, db):
246286
"api_key": server.get("apiKey", ""),
247287
"server_name": server.get("serverName", "Unknown Server")
248288
}
249-
250-
logger.info(f"Created config with server_url: {config.get('server_url', 'N/A')}")
289+
290+
logger.debug(f"Created server config: {config.get('server_name')} -> {config.get('server_url')}")
251291

252292
# Get provider instance
253-
logger.info(f"Getting provider instance for: {request.provider}, {request.server_id}")
293+
logger.debug(f"Getting provider instance for: {request.provider}, {request.server_id}")
254294
provider_instance = await provider_registry.get_provider(
255295
request.provider,
256296
request.server_id,
@@ -716,6 +756,13 @@ async def chat_completion(request: ChatCompletionRequest, db: AsyncSession = Dep
716756
"""
717757
logger = logging.getLogger(__name__)
718758
try:
759+
print(f"🎯 CHAT COMPLETION ENDPOINT CALLED")
760+
print(f"📊 Provider: {request.provider}")
761+
print(f"📊 Settings ID: {request.settings_id}")
762+
print(f"📊 Server ID: {request.server_id}")
763+
print(f"📊 Model: {request.model}")
764+
print(f"📊 User ID: {request.user_id}")
765+
print(f"📊 Stream: {request.stream}")
719766
logger.info(f"Production chat endpoint called with: provider={request.provider}, settings_id={request.settings_id}, server_id={request.server_id}, model={request.model}")
720767
logger.debug(f"Messages: {request.messages}")
721768
logger.debug(f"Params: {request.params}")

0 commit comments

Comments
 (0)