Skip to content
This repository was archived by the owner on Jun 5, 2025. It is now read-only.

Commit 499bb24

Browse files
change base_url to endpoints
1 parent 4229631 commit 499bb24

File tree

3 files changed

+15
-15
lines changed

3 files changed

+15
-15
lines changed

src/codegate/providers/ollama/provider.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -41,10 +41,6 @@ def models(self):
4141
return [model["name"] for model in jsonresp.get("models", [])]
4242

4343
async def process_request(self, data: dict, api_key: str, request_url_path: str):
44-
# `base_url` is used in the providers pipeline to do the packages lookup.
45-
# Force it to be the one that comes in the configuration.
46-
data["base_url"] = self.base_url
47-
4844
is_fim_request = self._is_fim_request(request_url_path, data)
4945
try:
5046
stream = await self.complete(data, api_key=None, is_fim_request=is_fim_request)
@@ -112,4 +108,8 @@ async def create_completion(request: Request):
112108
body = await request.body()
113109
data = json.loads(body)
114110

111+
# `base_url` is used in the providers pipeline to do the packages lookup.
112+
# Force it to be the one that comes in the configuration.
113+
data["base_url"] = self.base_url
114+
115115
return await self.process_request(data, None, request.url.path)

src/codegate/providers/openai/provider.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,10 +43,6 @@ def models(self) -> List[str]:
4343
return [model["id"] for model in jsonresp.get("data", [])]
4444

4545
async def process_request(self, data: dict, api_key: str, request_url_path: str):
46-
# if model starts with lm_studio, propagate it
47-
if data.get("model", "").startswith("lm_studio"):
48-
data["base_url"] = self.lm_studio_url + "/v1/"
49-
5046
is_fim_request = self._is_fim_request(request_url_path, data)
5147

5248
try:
@@ -90,4 +86,8 @@ async def create_completion(
9086
body = await request.body()
9187
data = json.loads(body)
9288

89+
# if model starts with lm_studio, propagate it
90+
if data.get("model", "").startswith("lm_studio"):
91+
data["base_url"] = self.lm_studio_url + "/v1/"
92+
9393
return await self.process_request(data, api_key, request.url.path)

src/codegate/providers/vllm/provider.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -38,13 +38,6 @@ def models(self):
3838
return [model["id"] for model in jsonresp.get("data", [])]
3939

4040
async def process_request(self, data: dict, api_key: str, request_url_path: str):
41-
# Add the vLLM base URL to the request
42-
config = Config.get_config()
43-
if config:
44-
data["base_url"] = config.provider_urls.get("vllm")
45-
else:
46-
data["base_url"] = ""
47-
4841
is_fim_request = self._is_fim_request(request_url_path, data)
4942

5043
try:
@@ -100,4 +93,11 @@ async def create_completion(
10093
body = await request.body()
10194
data = json.loads(body)
10295

96+
# Add the vLLM base URL to the request
97+
config = Config.get_config()
98+
if config:
99+
data["base_url"] = config.provider_urls.get("vllm")
100+
else:
101+
data["base_url"] = ""
102+
103103
return await self.process_request(data, api_key, request.url.path)

0 commit comments

Comments
 (0)