Skip to content

Commit

Permalink
fix perplexity stream parsing
Browse files Browse the repository at this point in the history
  • Loading branch information
mckaywrigley committed Jan 20, 2024
1 parent 09aca58 commit 994d761
Showing 1 changed file with 19 additions and 40 deletions.
59 changes: 19 additions & 40 deletions app/api/chat/perplexity/route.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
import { CHAT_SETTING_LIMITS } from "@/lib/chat-setting-limits"
import { checkApiKey, getServerProfile } from "@/lib/server/server-chat-helpers"
import { ChatSettings } from "@/types"
import { OpenAIStream, StreamingTextResponse } from "ai"
import OpenAI from "openai"

export const runtime = "edge"

Expand All @@ -13,51 +16,27 @@ export async function POST(request: Request) {
try {
const profile = await getServerProfile()

checkApiKey(profile.anthropic_api_key, "Anthropic")
checkApiKey(profile.perplexity_api_key, "Perplexity")

const response = await fetch("https://api.perplexity.ai/chat/completions", {
method: "POST",
headers: {
Accept: "application/json",
"Content-Type": "application/json",
Authorization: `Bearer ${profile.perplexity_api_key}`
},
body: JSON.stringify({
model: chatSettings.model,
messages: messages,
temperature: chatSettings.temperature,
stream: true
})
// Perplexity is compatible the OpenAI SDK
const perplexity = new OpenAI({
apiKey: profile.perplexity_api_key || "",
baseURL: "https://api.perplexity.ai/"
})

const readableStream = new ReadableStream({
async start(controller) {
if (!response.body) {
throw new Error("No response body!")
}

const reader = response.body.getReader()
while (true) {
const { done, value } = await reader.read()
if (done) {
controller.close()
break
}
const chunk = new TextDecoder("utf-8").decode(value)

const data = chunk.split("data: ")[1]
if (data) {
const parsedData = JSON.parse(data)
const messageContent = parsedData.choices[0].delta.content
controller.enqueue(new TextEncoder().encode(messageContent))
}
}
}
const response = await perplexity.chat.completions.create({
model: chatSettings.model,
messages,
max_tokens:
CHAT_SETTING_LIMITS[chatSettings.model].MAX_TOKEN_OUTPUT_LENGTH,
stream: true
})

return new Response(readableStream, {
headers: { "Content-Type": "text/plain" }
})
// Convert the response into a friendly text-stream.
const stream = OpenAIStream(response)

// Respond with the stream
return new StreamingTextResponse(stream)
} catch (error: any) {
const errorMessage = error.error?.message || "An unexpected error occurred"
const errorCode = error.status || 500
Expand Down

0 comments on commit 994d761

Please sign in to comment.