Skip to content
This repository was archived by the owner on Jun 5, 2025. It is now read-only.

Fix to also prepend openrouter/ when muxing #983

Merged
merged 1 commit into from
Feb 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion src/codegate/muxing/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,10 @@ def _from_anthropic_to_openai(self, anthropic_body: dict) -> dict:

def _get_provider_formatted_url(self, model_route: rulematcher.ModelRoute) -> str:
"""Get the provider formatted URL to use in base_url. Note this value comes from DB"""
if model_route.endpoint.provider_type == db_models.ProviderType.openai:
if model_route.endpoint.provider_type in [
db_models.ProviderType.openai,
db_models.ProviderType.openrouter,
]:
return f"{model_route.endpoint.endpoint}/v1"
return model_route.endpoint.endpoint

Expand Down
24 changes: 17 additions & 7 deletions src/codegate/providers/openrouter/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from fastapi import Header, HTTPException, Request
from litellm.types.llms.openai import ChatCompletionRequest

from codegate.clients.clients import ClientType
from codegate.clients.detector import DetectClient
from codegate.pipeline.factory import PipelineFactory
from codegate.providers.fim_analyzer import FIMAnalyzer
Expand Down Expand Up @@ -34,6 +35,21 @@ def __init__(self, pipeline_factory: PipelineFactory):
def provider_route_name(self) -> str:
return "openrouter"

async def process_request(
self,
data: dict,
api_key: str,
is_fim_request: bool,
client_type: ClientType,
):
# litellm workaround - add openrouter/ prefix to model name to make it openai-compatible
# once we get rid of litellm, this can simply be removed
original_model = data.get("model", "")
if not original_model.startswith("openrouter/"):
data["model"] = f"openrouter/{original_model}"

return await super().process_request(data, api_key, is_fim_request, client_type)

def _setup_routes(self):
@self.router.post(f"/{self.provider_route_name}/api/v1/chat/completions")
@self.router.post(f"/{self.provider_route_name}/chat/completions")
Expand All @@ -52,14 +68,8 @@ async def create_completion(

base_url = self._get_base_url()
data["base_url"] = base_url

# litellm workaround - add openrouter/ prefix to model name to make it openai-compatible
# once we get rid of litellm, this can simply be removed
original_model = data.get("model", "")
if not original_model.startswith("openrouter/"):
data["model"] = f"openrouter/{original_model}"

is_fim_request = FIMAnalyzer.is_fim_request(request.url.path, data)

return await self.process_request(
data,
api_key,
Expand Down
8 changes: 4 additions & 4 deletions tests/providers/openrouter/test_openrouter_provider.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import json
from unittest.mock import AsyncMock, MagicMock
from unittest.mock import AsyncMock, MagicMock, patch

import pytest
from fastapi import HTTPException
Expand All @@ -26,11 +26,11 @@ def test_get_base_url(provider):


@pytest.mark.asyncio
async def test_model_prefix_added():
@patch("codegate.providers.openai.OpenAIProvider.process_request")
async def test_model_prefix_added(mocked_parent_process_request):
"""Test that model name gets prefixed with openrouter/ when not already present"""
mock_factory = MagicMock(spec=PipelineFactory)
provider = OpenRouterProvider(mock_factory)
provider.process_request = AsyncMock()

# Mock request
mock_request = MagicMock(spec=Request)
Expand All @@ -47,7 +47,7 @@ async def test_model_prefix_added():
await create_completion(request=mock_request, authorization="Bearer test-token")

# Verify process_request was called with prefixed model
call_args = provider.process_request.call_args[0]
call_args = mocked_parent_process_request.call_args[0]
assert call_args[0]["model"] == "openrouter/gpt-4"


Expand Down