|  | 
|  | 1 | +import { NextRequest, NextResponse } from 'next/server'; | 
|  | 2 | +import { streamChat, ChatMessage } from '@/lib/ai/openai'; | 
|  | 3 | + | 
|  | 4 | +// Opt into the Edge runtime.  This allows streaming responses with low | 
|  | 5 | +// latency and keeps dependencies out of the Node.js layer. | 
|  | 6 | +export const runtime = 'edge'; | 
|  | 7 | + | 
|  | 8 | +/** | 
|  | 9 | + * POST /api/ai/chat | 
|  | 10 | + * | 
|  | 11 | + * Accepts a JSON body containing a list of chat messages and optional model | 
|  | 12 | + * configuration.  Invokes the OpenAI chat completion API and streams the | 
|  | 13 | + * assistant's response back as raw text.  If another AI provider is | 
|  | 14 | + * configured via AI_PROVIDER, a 400 will be returned. | 
|  | 15 | + */ | 
|  | 16 | +export async function POST(req: NextRequest) { | 
|  | 17 | +  try { | 
|  | 18 | +    const { messages, model, temperature } = await req.json(); | 
|  | 19 | + | 
|  | 20 | +    // Basic validation | 
|  | 21 | +    if (!Array.isArray(messages)) { | 
|  | 22 | +      return NextResponse.json({ success: false, error: 'messages must be an array' }, { status: 400 }); | 
|  | 23 | +    } | 
|  | 24 | + | 
|  | 25 | +    // Only support openai provider for now | 
|  | 26 | +    const provider = process.env.AI_PROVIDER || 'openai'; | 
|  | 27 | +    if (provider !== 'openai') { | 
|  | 28 | +      return NextResponse.json({ success: false, error: `Unsupported AI provider: ${provider}` }, { status: 400 }); | 
|  | 29 | +    } | 
|  | 30 | + | 
|  | 31 | +    // Call OpenAI and forward the response | 
|  | 32 | +    const response = await streamChat({ | 
|  | 33 | +      messages: messages as ChatMessage[], | 
|  | 34 | +      model, | 
|  | 35 | +      temperature, | 
|  | 36 | +    }); | 
|  | 37 | + | 
|  | 38 | +    if (!response.ok || !response.body) { | 
|  | 39 | +      let errorMessage: string; | 
|  | 40 | +      try { | 
|  | 41 | +        const data = await response.json(); | 
|  | 42 | +        errorMessage = data?.error?.message || response.statusText; | 
|  | 43 | +      } catch { | 
|  | 44 | +        errorMessage = response.statusText; | 
|  | 45 | +      } | 
|  | 46 | +      return NextResponse.json({ success: false, error: errorMessage }, { status: response.status }); | 
|  | 47 | +    } | 
|  | 48 | + | 
|  | 49 | +    // Transform OpenAI's SSE stream into raw text | 
|  | 50 | +    const encoder = new TextEncoder(); | 
|  | 51 | +    const openaiStream = response.body; | 
|  | 52 | +    const stream = new ReadableStream<Uint8Array>({ | 
|  | 53 | +      async start(controller) { | 
|  | 54 | +        const reader = openaiStream!.getReader(); | 
|  | 55 | +        const decoder = new TextDecoder('utf-8'); | 
|  | 56 | +        let buffer = ''; | 
|  | 57 | +        const push = (text: string) => { | 
|  | 58 | +          controller.enqueue(encoder.encode(text)); | 
|  | 59 | +        }; | 
|  | 60 | +        while (true) { | 
|  | 61 | +          const { value, done } = await reader.read(); | 
|  | 62 | +          if (done) break; | 
|  | 63 | +          buffer += decoder.decode(value, { stream: true }); | 
|  | 64 | +          const lines = buffer.split('\n'); | 
|  | 65 | +          buffer = lines.pop() ?? ''; | 
|  | 66 | +          for (const line of lines) { | 
|  | 67 | +            const trimmed = line.trim(); | 
|  | 68 | +            if (!trimmed.startsWith('data:')) continue; | 
|  | 69 | +            const payload = trimmed.replace(/^data:\s*/, ''); | 
|  | 70 | +            if (payload === '[DONE]') { | 
|  | 71 | +              controller.close(); | 
|  | 72 | +              return; | 
|  | 73 | +            } | 
|  | 74 | +            try { | 
|  | 75 | +              const parsed = JSON.parse(payload); | 
|  | 76 | +              const delta: string = parsed.choices?.[0]?.delta?.content ?? ''; | 
|  | 77 | +              if (delta) { | 
|  | 78 | +                push(delta); | 
|  | 79 | +              } | 
|  | 80 | +            } catch { | 
|  | 81 | +              // Skip malformed lines | 
|  | 82 | +            } | 
|  | 83 | +          } | 
|  | 84 | +        } | 
|  | 85 | +        controller.close(); | 
|  | 86 | +      }, | 
|  | 87 | +    }); | 
|  | 88 | + | 
|  | 89 | +    return new Response(stream, { | 
|  | 90 | +      headers: { | 
|  | 91 | +        'Content-Type': 'text/plain; charset=utf-8', | 
|  | 92 | +      }, | 
|  | 93 | +    }); | 
|  | 94 | +  } catch (err) { | 
|  | 95 | +    console.error('[api/ai/chat] Error:', err); | 
|  | 96 | +    return NextResponse.json({ success: false, error: (err as Error)?.message || 'Internal error' }, { status: 500 }); | 
|  | 97 | +  } | 
|  | 98 | +} | 
0 commit comments