Skip to content

Commit 28a26c1

Browse files
authored
Merge pull request #4 from BossyT/feat/openai-default
feat: switch default AI provider to OpenAI (Issue A)
2 parents 9aabd38 + a6f2a1f commit 28a26c1

File tree

4 files changed

+230
-0
lines changed

4 files changed

+230
-0
lines changed

README.md

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,3 +38,16 @@ Open [http://localhost:3000](http://localhost:3000)
3838
## License
3939

4040
MIT
41+
42+
## AI Provider configuration
43+
44+
# Default provider (OpenAI)
45+
AI_PROVIDER=openai
46+
OPENAI_API_KEY=your_openai_api_key
47+
OPENAI_MODEL=gpt-4o-mini
48+
OPENAI_BASE_URL=https://api.openai.com/v1
49+
50+
# Alternative providers (optional)
51+
ANTHROPIC_API_KEY=your_anthropic_api_key
52+
GEMINI_API_KEY=your_gemini_api_key
53+
GROQ_API_KEY=your_groq_api_key

app/api/ai/chat/route.ts

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
import { NextRequest, NextResponse } from 'next/server';
2+
import { streamChat, ChatMessage } from '@/lib/ai/openai';
3+
4+
// Opt into the Edge runtime. This allows streaming responses with low
5+
// latency and keeps dependencies out of the Node.js layer.
6+
export const runtime = 'edge';
7+
8+
/**
9+
* POST /api/ai/chat
10+
*
11+
* Accepts a JSON body containing a list of chat messages and optional model
12+
* configuration. Invokes the OpenAI chat completion API and streams the
13+
* assistant's response back as raw text. If another AI provider is
14+
* configured via AI_PROVIDER, a 400 will be returned.
15+
*/
16+
export async function POST(req: NextRequest) {
17+
try {
18+
const { messages, model, temperature } = await req.json();
19+
20+
// Basic validation
21+
if (!Array.isArray(messages)) {
22+
return NextResponse.json({ success: false, error: 'messages must be an array' }, { status: 400 });
23+
}
24+
25+
// Only support openai provider for now
26+
const provider = process.env.AI_PROVIDER || 'openai';
27+
if (provider !== 'openai') {
28+
return NextResponse.json({ success: false, error: `Unsupported AI provider: ${provider}` }, { status: 400 });
29+
}
30+
31+
// Call OpenAI and forward the response
32+
const response = await streamChat({
33+
messages: messages as ChatMessage[],
34+
model,
35+
temperature,
36+
});
37+
38+
if (!response.ok || !response.body) {
39+
let errorMessage: string;
40+
try {
41+
const data = await response.json();
42+
errorMessage = data?.error?.message || response.statusText;
43+
} catch {
44+
errorMessage = response.statusText;
45+
}
46+
return NextResponse.json({ success: false, error: errorMessage }, { status: response.status });
47+
}
48+
49+
// Transform OpenAI's SSE stream into raw text
50+
const encoder = new TextEncoder();
51+
const openaiStream = response.body;
52+
const stream = new ReadableStream<Uint8Array>({
53+
async start(controller) {
54+
const reader = openaiStream!.getReader();
55+
const decoder = new TextDecoder('utf-8');
56+
let buffer = '';
57+
const push = (text: string) => {
58+
controller.enqueue(encoder.encode(text));
59+
};
60+
while (true) {
61+
const { value, done } = await reader.read();
62+
if (done) break;
63+
buffer += decoder.decode(value, { stream: true });
64+
const lines = buffer.split('\n');
65+
buffer = lines.pop() ?? '';
66+
for (const line of lines) {
67+
const trimmed = line.trim();
68+
if (!trimmed.startsWith('data:')) continue;
69+
const payload = trimmed.replace(/^data:\s*/, '');
70+
if (payload === '[DONE]') {
71+
controller.close();
72+
return;
73+
}
74+
try {
75+
const parsed = JSON.parse(payload);
76+
const delta: string = parsed.choices?.[0]?.delta?.content ?? '';
77+
if (delta) {
78+
push(delta);
79+
}
80+
} catch {
81+
// Skip malformed lines
82+
}
83+
}
84+
}
85+
controller.close();
86+
},
87+
});
88+
89+
return new Response(stream, {
90+
headers: {
91+
'Content-Type': 'text/plain; charset=utf-8',
92+
},
93+
});
94+
} catch (err) {
95+
console.error('[api/ai/chat] Error:', err);
96+
return NextResponse.json({ success: false, error: (err as Error)?.message || 'Internal error' }, { status: 500 });
97+
}
98+
}

app/api/health/route.ts

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
import { NextResponse } from 'next/server';
2+
3+
export const runtime = 'edge';
4+
5+
export async function GET() {
6+
const services: Record<string, any> = {};
7+
const provider = process.env.AI_PROVIDER || 'openai';
8+
if (provider === 'openai') {
9+
const model = process.env.OPENAI_MODEL || 'gpt-4o-mini';
10+
const apiKey = process.env.OPENAI_API_KEY;
11+
if (!apiKey) {
12+
services.openai = { ok: false, model, error: 'Missing OPENAI_API_KEY' };
13+
} else {
14+
try {
15+
const baseUrl = (process.env.OPENAI_BASE_URL?.replace(/\/+$/, '') || 'https://api.openai.com/v1');
16+
const res = await fetch(`${baseUrl}/models`, { headers: { Authorization: `Bearer ${apiKey}` } });
17+
if (res.ok) {
18+
services.openai = { ok: true, model };
19+
} else {
20+
let errorMessage: string;
21+
try {
22+
const data = await res.json();
23+
errorMessage = data?.error?.message || res.statusText;
24+
} catch {
25+
errorMessage = res.statusText;
26+
}
27+
services.openai = { ok: false, model, error: errorMessage };
28+
}
29+
} catch (error) {
30+
services.openai = { ok: false, model, error: (error as Error).message };
31+
}
32+
}
33+
}
34+
const status = Object.values(services).every((svc: any) => svc?.ok) ? 'ok' : 'error';
35+
return NextResponse.json({ status, services });
36+
}

lib/ai/openai.ts

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
/**
2+
* Minimal OpenAI client for StarStack.
3+
*
4+
* This module reads configuration from environment variables and exposes a
5+
* helper that performs chat completions with streaming support. It is
6+
* deliberately small and self‑contained to avoid pulling heavy dependencies
7+
* into the Edge runtime.
8+
*
9+
* Expected environment variables:
10+
* - AI_PROVIDER: when set to "openai" this client will be used. Other
11+
* values are ignored.
12+
* - OPENAI_API_KEY: your OpenAI API key (required).
13+
* - OPENAI_MODEL: optional override of the default model. If absent the
14+
* fallback is "gpt-4o-mini" to align with the project default.
15+
* - OPENAI_BASE_URL: optional override for the API base URL. When unset
16+
* the standard https://api.openai.com/v1 endpoint is used.
17+
*/
18+
19+
/**
20+
* Chat message interface compatible with OpenAI's API.
21+
*/
22+
export interface ChatMessage {
23+
role: 'system' | 'user' | 'assistant' | string;
24+
content: string;
25+
}
26+
27+
/**
28+
* Returns the currently configured OpenAI model. Falls back to
29+
* `gpt-4o-mini` if no override is provided.
30+
*/
31+
export function getDefaultModel(): string {
32+
return process.env.OPENAI_MODEL || 'gpt-4o-mini';
33+
}
34+
35+
/**
36+
* Internal helper that constructs the full API URL. Allows overriding the
37+
* base via OPENAI_BASE_URL while falling back to the public OpenAI API.
38+
*/
39+
function buildUrl(path: string): string {
40+
const base = (process.env.OPENAI_BASE_URL?.replace(/\/+$/, '') ||
41+
'https://api.openai.com/v1');
42+
return `${base}${path.startsWith('/') ? '' : '/'}${path}`;
43+
}
44+
45+
/**
46+
* Performs a chat completion request against the OpenAI API and returns the
47+
* streaming Response. The returned Response can be piped directly to a
48+
* Next.js API route or consumed manually.
49+
*
50+
* @param messages The chat history. Each message must include a `role`
51+
* ("system" | "user" | "assistant") and `content` string.
52+
* @param model Optional model override. Defaults to getDefaultModel().
53+
* @param temperature Optional sampling temperature. Defaults to 0.5.
54+
*/
55+
export async function streamChat({
56+
messages,
57+
model,
58+
temperature,
59+
}: {
60+
messages: ChatMessage[];
61+
model?: string;
62+
temperature?: number;
63+
}): Promise<Response> {
64+
const apiKey = process.env.OPENAI_API_KEY;
65+
if (!apiKey) {
66+
throw new Error('Missing OPENAI_API_KEY');
67+
}
68+
const resolvedModel = model || getDefaultModel();
69+
70+
return fetch(buildUrl('/chat/completions'), {
71+
method: 'POST',
72+
headers: {
73+
'Content-Type': 'application/json',
74+
Authorization: `Bearer ${apiKey}`,
75+
},
76+
body: JSON.stringify({
77+
model: resolvedModel,
78+
messages,
79+
temperature: typeof temperature === 'number' ? temperature : 0.5,
80+
stream: true,
81+
}),
82+
});
83+
}

0 commit comments

Comments
 (0)