Skip to content

Commit c050cfd

Browse files
committed
fix: correct problems with ollama and aider
It has a pair of problems: - always need to check that the model contains some value - only send chunks that contain data, and do not send double newlines as they get considered as a different chunk Closes: #586
1 parent 93a5600 commit c050cfd

File tree

2 files changed

+17
-10
lines changed

2 files changed

+17
-10
lines changed

src/codegate/providers/ollama/completion_handler.py

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import json
12
from typing import AsyncIterator, Optional, Union
23

34
import structlog
@@ -11,18 +12,21 @@
1112

1213

1314
async def ollama_stream_generator(
14-
stream: AsyncIterator[ChatResponse],
15+
stream: AsyncIterator[ChatResponse]
1516
) -> AsyncIterator[str]:
1617
"""OpenAI-style SSE format"""
1718
try:
1819
async for chunk in stream:
19-
print(chunk)
2020
try:
21-
yield f"{chunk.model_dump_json()}\n\n"
21+
content = chunk.model_dump_json()
22+
if content:
23+
yield f"{chunk.model_dump_json()}\n"
2224
except Exception as e:
23-
yield f"{str(e)}\n\n"
25+
if str(e):
26+
yield f"{str(e)}\n"
2427
except Exception as e:
25-
yield f"{str(e)}\n\n"
28+
if str(e):
29+
yield f"{str(e)}\n"
2630

2731

2832
class OllamaShim(BaseCompletionHandler):
@@ -39,17 +43,17 @@ async def execute_completion(
3943
) -> Union[ChatResponse, GenerateResponse]:
4044
"""Stream response directly from Ollama API."""
4145
if is_fim_request:
42-
prompt = request["messages"][0]["content"]
46+
prompt = request["messages"][0].get("content", "")
4347
response = await self.client.generate(
44-
model=request["model"], prompt=prompt, stream=stream, options=request["options"]
48+
model=request["model"], prompt=prompt, stream=stream, options=request["options"] # type: ignore
4549
)
4650
else:
4751
response = await self.client.chat(
4852
model=request["model"],
4953
messages=request["messages"],
50-
stream=stream,
51-
options=request["options"],
52-
)
54+
stream=stream, # type: ignore
55+
options=request["options"], # type: ignore
56+
) # type: ignore
5357
return response
5458

5559
def _create_streaming_response(self, stream: AsyncIterator[ChatResponse]) -> StreamingResponse:

src/codegate/providers/ollama/provider.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,9 @@ async def show_model(request: Request):
5858
https://github.com/ollama/ollama/blob/main/docs/api.md#show-model-information
5959
"""
6060
body = await request.body()
61+
body_json = json.loads(body)
62+
if "name" not in body_json:
63+
raise HTTPException(status_code=400, detail="model is required in the request body")
6164
async with httpx.AsyncClient() as client:
6265
response = await client.post(
6366
f"{self.base_url}/api/show",

0 commit comments

Comments
 (0)