Skip to content

Commit a9165f4

Browse files
feat: multi-provider capability added to http integrations
1 parent 4c4defb commit a9165f4

File tree

10 files changed

+328
-12
lines changed

10 files changed

+328
-12
lines changed

docs/contributing/http-integration.md

Lines changed: 42 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ tests/transports-integrations/tests/integrations/
3636

3737
HTTP integrations provide API-compatible endpoints that translate between external service formats (OpenAI, Anthropic, etc.) and Bifrost's unified request/response format. Each integration follows a standardized pattern using Bifrost's `GenericRouter` architecture.
3838

39+
**Key Feature**: All integrations should support **multi-provider model syntax** using `ParseModelString`, allowing users to access any provider through any SDK (e.g., `"anthropic/claude-3-sonnet"` via OpenAI SDK).
40+
3941
### **Integration Architecture Flow**
4042

4143
```mermaid
@@ -150,6 +152,7 @@ package your_integration
150152

151153
import (
152154
"github.com/maximhq/bifrost/core/schemas"
155+
"github.com/maximhq/bifrost/transports/bifrost-http/integrations"
153156
)
154157

155158
// YourChatRequest represents the incoming request format
@@ -179,6 +182,11 @@ type YourChatResponse struct {
179182

180183
// ConvertToBifrostRequest converts your service format to Bifrost format
181184
func (r *YourChatRequest) ConvertToBifrostRequest() *schemas.BifrostRequest {
185+
// Enable multi-provider support with ParseModelString
186+
// This allows users to specify "provider/model" (e.g., "anthropic/claude-3-sonnet")
187+
// or just "model" (uses your integration's default provider)
188+
provider, modelName := integrations.ParseModelString(r.Model, schemas.YourDefaultProvider)
189+
182190
// Convert messages
183191
bifrostMessages := make([]schemas.ModelChatMessage, len(r.Messages))
184192
for i, msg := range r.Messages {
@@ -195,7 +203,8 @@ func (r *YourChatRequest) ConvertToBifrostRequest() *schemas.BifrostRequest {
195203
}
196204

197205
return &schemas.BifrostRequest{
198-
Model: r.Model,
206+
Model: modelName, // Clean model name without provider prefix
207+
Provider: provider, // Extracted or default provider
199208
MaxTokens: &r.MaxTokens,
200209
Temperature: r.Temperature,
201210
Input: schemas.BifrostInput{
@@ -532,6 +541,7 @@ import (
532541
- [ ] **Type Definitions** - Implemented `types.go` with request/response types
533542
- [ ] **Request Conversion** - Properly converts service format to Bifrost format
534543
- [ ] **Response Conversion** - Properly converts Bifrost format to service format
544+
- [ ] **Multi-Provider Support** - Uses `ParseModelString` to enable "provider/model" syntax
535545
- [ ] **Error Handling** - Handles all error cases gracefully
536546
- [ ] **Tool Support** - Supports function/tool calling if applicable
537547
- [ ] **Multi-Modal Support** - Supports images/vision if applicable
@@ -566,20 +576,47 @@ import (
566576

567577
## 🔧 **Common Patterns**
568578

569-
### **Model Provider Detection**
579+
### **Multi-Provider Model Support** (same as shown above in the types.go file example)
570580

571-
Use Bifrost's built-in provider detection:
581+
Enable users to access multiple providers through your integration using `ParseModelString`:
572582

573583
```go
574584
import "github.com/maximhq/bifrost/transports/bifrost-http/integrations"
575585

576-
// In request converter
586+
// In request converter - enables "provider/model" syntax
587+
func (r *YourChatRequest) ConvertToBifrostRequest() *schemas.BifrostRequest {
588+
// ParseModelString handles both "provider/model" and "model" formats
589+
// - "anthropic/claude-3-sonnet" → (schemas.Anthropic, "claude-3-sonnet")
590+
// - "claude-3-sonnet" → (schemas.YourDefaultProvider, "claude-3-sonnet")
591+
provider, modelName := integrations.ParseModelString(r.Model, schemas.YourDefaultProvider)
592+
593+
return &schemas.BifrostRequest{
594+
Model: modelName, // Clean model name without provider prefix
595+
Provider: provider, // Extracted or default provider
596+
// ... rest of conversion
597+
}
598+
}
599+
```
600+
601+
**Benefits for Users:**
602+
603+
- **OpenAI SDK**: `model: "anthropic/claude-3-sonnet"` routes to Anthropic
604+
- **Anthropic SDK**: `model: "openai/gpt-4o"` routes to OpenAI
605+
- **Your SDK**: `model: "vertex/gemini-pro"` routes to Google Vertex
606+
- **Backward Compatible**: `model: "claude-3-sonnet"` uses your default provider
607+
608+
### **Alternative: Pattern-Based Detection**
609+
610+
For automatic provider detection without prefixes:
611+
612+
```go
613+
// Legacy approach - still supported but less flexible
577614
func (r *YourChatRequest) ConvertToBifrostRequest() *schemas.BifrostRequest {
578615
provider := integrations.GetProviderFromModel(r.Model)
579616

580617
return &schemas.BifrostRequest{
581618
Model: r.Model,
582-
Provider: &provider,
619+
Provider: provider,
583620
// ... rest of conversion
584621
}
585622
}

docs/contributing/provider.md

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -595,6 +595,51 @@ Before submitting your provider implementation:
595595
- [ ] **Key Handling** - Proper API key requirement configuration
596596
- [ ] **Configuration** - Standard provider configuration support
597597

598+
### **HTTP Transport Integration**
599+
600+
- [ ] **Provider Recognition** - Added to `validProviders` map in `transports/bifrost-http/integrations/utils.go`
601+
- [ ] **Model Patterns** - Added patterns to appropriate `is*Model()` functions in utils.go
602+
- [ ] **Transport Tests** - All tests pass in `tests/transports-integrations/` directory
603+
- [ ] **Multi-Provider Support** - Verified `ParseModelString` correctly handles your provider prefix
604+
605+
**Required Updates in `utils.go`:**
606+
607+
```go
608+
// 1. Add to validProviders map
609+
var validProviders = map[schemas.ModelProvider]bool{
610+
// ... existing providers
611+
schemas.YourProvider: true, // Add this line
612+
}
613+
614+
// 2. Add model patterns to appropriate function
615+
func isYourProviderModel(model string) bool {
616+
yourProviderPatterns := []string{
617+
"your-provider-pattern", "your-model-prefix", "yourprovider/",
618+
}
619+
return matchesAnyPattern(model, yourProviderPatterns)
620+
}
621+
622+
// 3. Add pattern check to GetProviderFromModel
623+
func GetProviderFromModel(model string) schemas.ModelProvider {
624+
// ... existing checks
625+
626+
// Your Provider Models
627+
if isYourProviderModel(modelLower) {
628+
return schemas.YourProvider
629+
}
630+
631+
// ... rest of function
632+
}
633+
```
634+
635+
**Test Your Integration:**
636+
637+
```bash
638+
# Run HTTP transport integration tests
639+
cd tests/transports-integrations
640+
python -m pytest tests/integrations/ -v
641+
```
642+
598643
---
599644

600645
## 🚀 **Advanced Features**

docs/usage/http-transport/integrations/README.md

Lines changed: 94 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ client = openai.OpenAI(
6464

6565
Your existing code gets these features automatically:
6666

67-
- **Multi-provider fallbacks** - Automatic failover between providers
67+
- **Multi-provider fallbacks** - Automatic failover between multiple providers, regardless of the SDK you use
6868
- **Load balancing** - Distribute requests across multiple API keys
6969
- **Rate limiting** - Built-in request throttling and queuing
7070
- **Tool integration** - MCP tools available in all requests
@@ -161,6 +161,99 @@ export ANTHROPIC_BASE_URL="http://bifrost:8080/anthropic"
161161

162162
---
163163

164+
## 🌐 Multi-Provider Usage
165+
166+
### **Provider-Prefixed Models**
167+
168+
Use multiple providers seamlessly by prefixing model names with the provider:
169+
170+
```python
171+
import openai
172+
173+
# Single client, multiple providers
174+
client = openai.OpenAI(
175+
base_url="http://localhost:8080/openai",
176+
api_key="dummy" # API keys configured in Bifrost
177+
)
178+
179+
# OpenAI models
180+
response1 = client.chat.completions.create(
181+
model="gpt-4o-mini", # (default OpenAI since it's OpenAI's SDK)
182+
messages=[{"role": "user", "content": "Hello!"}]
183+
)
184+
185+
# Anthropic models using OpenAI SDK format
186+
response2 = client.chat.completions.create(
187+
model="anthropic/claude-3-sonnet-20240229",
188+
messages=[{"role": "user", "content": "Hello!"}]
189+
)
190+
191+
# Google Vertex models
192+
response3 = client.chat.completions.create(
193+
model="vertex/gemini-pro",
194+
messages=[{"role": "user", "content": "Hello!"}]
195+
)
196+
197+
# Azure OpenAI models
198+
response4 = client.chat.completions.create(
199+
model="azure/gpt-4o",
200+
messages=[{"role": "user", "content": "Hello!"}]
201+
)
202+
203+
# Local Ollama models
204+
response5 = client.chat.completions.create(
205+
model="ollama/llama3.1:8b",
206+
messages=[{"role": "user", "content": "Hello!"}]
207+
)
208+
```
209+
210+
### **Provider-Specific Optimization**
211+
212+
```python
213+
import openai
214+
215+
client = openai.OpenAI(
216+
base_url="http://localhost:8080/openai",
217+
api_key="dummy"
218+
)
219+
220+
def choose_optimal_model(task_type: str, content: str):
221+
"""Choose the best model based on task requirements"""
222+
223+
if task_type == "code":
224+
# OpenAI excels at code generation
225+
return "openai/gpt-4o-mini"
226+
227+
elif task_type == "creative":
228+
# Anthropic is great for creative writing
229+
return "anthropic/claude-3-sonnet-20240229"
230+
231+
elif task_type == "analysis" and len(content) > 10000:
232+
# Anthropic has larger context windows
233+
return "anthropic/claude-3-sonnet-20240229"
234+
235+
elif task_type == "multilingual":
236+
# Google models excel at multilingual tasks
237+
return "vertex/gemini-pro"
238+
239+
else:
240+
# Default to fastest/cheapest
241+
return "openai/gpt-4o-mini"
242+
243+
# Usage examples
244+
code_response = client.chat.completions.create(
245+
model=choose_optimal_model("code", ""),
246+
messages=[{"role": "user", "content": "Write a Python web scraper"}]
247+
)
248+
249+
creative_response = client.chat.completions.create(
250+
model=choose_optimal_model("creative", ""),
251+
messages=[{"role": "user", "content": "Write a short story about AI"}]
252+
)
253+
```
254+
255+
---
256+
164257
## 🚀 Deployment Scenarios
165258

166259
### **Microservices Architecture**

docs/usage/http-transport/integrations/anthropic-compatible.md

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -542,6 +542,42 @@ test_tool_use()
542542

543543
---
544544

545+
## 🌐 Multi-Provider Support
546+
547+
Use multiple providers with Anthropic SDK format by prefixing model names:
548+
549+
```python
550+
import anthropic
551+
552+
client = anthropic.Anthropic(
553+
base_url="http://localhost:8080/anthropic",
554+
api_key="dummy" # API keys configured in Bifrost
555+
)
556+
557+
# Anthropic models (default)
558+
response1 = client.messages.create(
559+
model="claude-3-sonnet-20240229",
560+
max_tokens=100,
561+
messages=[{"role": "user", "content": "Hello!"}]
562+
)
563+
564+
# OpenAI models via Anthropic SDK
565+
response2 = client.messages.create(
566+
model="openai/gpt-4o-mini",
567+
max_tokens=100,
568+
messages=[{"role": "user", "content": "Hello!"}]
569+
)
570+
571+
# Vertex models via Anthropic SDK
572+
response3 = client.messages.create(
573+
model="vertex/gemini-pro",
574+
max_tokens=100,
575+
messages=[{"role": "user", "content": "Hello!"}]
576+
)
577+
```
578+
579+
---
580+
545581
## 🔧 Configuration
546582

547583
### **Bifrost Config for Anthropic**

docs/usage/http-transport/integrations/genai-compatible.md

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -493,6 +493,33 @@ test_function_calling()
493493

494494
---
495495

496+
## 🌐 Multi-Provider Support
497+
498+
Use multiple providers with Google GenAI SDK format by prefixing model names:
499+
500+
```python
501+
import google.generativeai as genai
502+
503+
genai.configure(
504+
api_key="dummy", # API keys configured in Bifrost
505+
client_options={"api_endpoint": "http://localhost:8080/genai"}
506+
)
507+
508+
# Google models (default)
509+
model1 = genai.GenerativeModel('gemini-pro')
510+
response1 = model1.generate_content("Hello!")
511+
512+
# OpenAI models via GenAI SDK
513+
model2 = genai.GenerativeModel('openai/gpt-4o-mini')
514+
response2 = model2.generate_content("Hello!")
515+
516+
# Anthropic models via GenAI SDK
517+
model3 = genai.GenerativeModel('anthropic/claude-3-sonnet-20240229')
518+
response3 = model3.generate_content("Hello!")
519+
```
520+
521+
---
522+
496523
## 🔧 Configuration
497524

498525
### **Bifrost Config for Google GenAI**

docs/usage/http-transport/integrations/openai-compatible.md

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -467,6 +467,39 @@ benchmark_response_time(openai_client, "Direct OpenAI")
467467

468468
---
469469

470+
## 🌐 Multi-Provider Support
471+
472+
Use multiple providers with OpenAI SDK format by prefixing model names:
473+
474+
```python
475+
import openai
476+
477+
client = openai.OpenAI(
478+
base_url="http://localhost:8080/openai",
479+
api_key="dummy" # API keys configured in Bifrost
480+
)
481+
482+
# OpenAI models (default)
483+
response1 = client.chat.completions.create(
484+
model="gpt-4o-mini",
485+
messages=[{"role": "user", "content": "Hello!"}]
486+
)
487+
488+
# Anthropic models via OpenAI SDK
489+
response2 = client.chat.completions.create(
490+
model="anthropic/claude-3-sonnet-20240229",
491+
messages=[{"role": "user", "content": "Hello!"}]
492+
)
493+
494+
# Vertex models via OpenAI SDK
495+
response3 = client.chat.completions.create(
496+
model="vertex/gemini-pro",
497+
messages=[{"role": "user", "content": "Hello!"}]
498+
)
499+
```
500+
501+
---
502+
470503
## 🔧 Configuration
471504

472505
### **Bifrost Config for OpenAI**

transports/bifrost-http/integrations/anthropic/types.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ import (
66

77
bifrost "github.com/maximhq/bifrost/core"
88
"github.com/maximhq/bifrost/core/schemas"
9+
"github.com/maximhq/bifrost/transports/bifrost-http/integrations"
910
)
1011

1112
var fnTypePtr = bifrost.Ptr(string(schemas.ToolChoiceTypeFunction))
@@ -133,9 +134,11 @@ func (mc *AnthropicContent) UnmarshalJSON(data []byte) error {
133134

134135
// ConvertToBifrostRequest converts an Anthropic messages request to Bifrost format
135136
func (r *AnthropicMessageRequest) ConvertToBifrostRequest() *schemas.BifrostRequest {
137+
provider, model := integrations.ParseModelString(r.Model, schemas.Anthropic)
138+
136139
bifrostReq := &schemas.BifrostRequest{
137-
Provider: schemas.Anthropic,
138-
Model: r.Model,
140+
Provider: provider,
141+
Model: model,
139142
}
140143

141144
messages := []schemas.BifrostMessage{}

0 commit comments

Comments
 (0)