Skip to content

Commit

Permalink
Implement useChat (vercel#5)
Browse files Browse the repository at this point in the history
* wip: add useChat hook

* Fix import

* Make import type only

* resolve conflicts

* prettier

* fix

* add stop and set methods

---------

Co-authored-by: Jared Palmer <jared@jaredpalmer.com>
  • Loading branch information
shuding and jaredpalmer authored May 25, 2023
1 parent 0eb48f8 commit 0f50deb
Show file tree
Hide file tree
Showing 10 changed files with 1,531 additions and 1,689 deletions.
48 changes: 24 additions & 24 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,21 +46,21 @@ The goal of this library lies in its commitment to work directly with each AI/Mo

```tsx
// app/api/generate/route.ts
import { Configuration, OpenAIApi } from 'openai-edge';
import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils';
import { Configuration, OpenAIApi } from "openai-edge";
import { OpenAITextStream, StreamingTextResponse } from "@vercel/ai-utils";

const config = new Configuration({
apiKey: process.env.OPENAI_API_KEY,
});
const openai = new OpenAIApi(config);

export const runtime = 'edge';
export const runtime = "edge";

export async function POST() {
const response = await openai.createChatCompletion({
model: 'gpt-4',
model: "gpt-4",
stream: true,
messages: [{ role: 'user', content: 'What is love?' }],
messages: [{ role: "user", content: "What is love?" }],
});
const stream = OpenAITextStream(response);
return new StreamingTextResponse(stream);
Expand Down Expand Up @@ -99,8 +99,8 @@ Create a Next.js Route Handler that uses the Edge Runtime that we'll use to gene

```tsx
// ./app/api/generate/route.ts
import { Configuration, OpenAIApi } from 'openai-edge';
import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils';
import { Configuration, OpenAIApi } from "openai-edge";
import { OpenAITextStream, StreamingTextResponse } from "@vercel/ai-utils";

// Create an OpenAI API client (that's edge friendly!)
const config = new Configuration({
Expand All @@ -109,15 +109,15 @@ const config = new Configuration({
const openai = new OpenAIApi(config);

// IMPORTANT! Set the runtime to edge
export const runtime = 'edge';
export const runtime = "edge";

export async function POST(req: Request) {
// Extract the `prompt` from the body of the request
const { prompt } = await req.json();

// Ask OpenAI for a streaming chat completion given the prompt
const response = await openai.createCompletion({
model: 'gpt-3.5-turbo',
model: "gpt-3.5-turbo",
stream: true,
prompt,
});
Expand All @@ -136,21 +136,21 @@ Create a Client component with a form that we'll use to gather the prompt from t

```tsx
// ./app/form.ts
'use client';
"use client";

import { useState } from 'react';
import { useCompletion } from '@vercel/ai-utils/react'; //@todo
import { useState } from "react";
import { useCompletion } from "@vercel/ai-utils/react"; //@todo

export function Form() {
const [value, setValue] = useState('');
const { setPrompt, completion } = useCompletion('/api/generate');
const [value, setValue] = useState("");
const { setPrompt, completion } = useCompletion("/api/generate");
return (
<div>
<form
onSubmit={(e) => {
e.preventDefault();
setPrompt(value);
setValue('');
setValue("");
}}
>
<textarea value={value} onChange={(e) => setValue(e.target.value)} />
Expand Down Expand Up @@ -208,16 +208,16 @@ A transform that will extract the text from _most_ chat and completion HuggingFa

```tsx
// app/api/generate/route.ts
import { HfInference } from '@huggingface/inference';
import { HuggingFaceStream, StreamingTextResponse } from '@vercel/ai-utils';
import { HfInference } from "@huggingface/inference";
import { HuggingFaceStream, StreamingTextResponse } from "@vercel/ai-utils";

export const runtime = 'edge';
export const runtime = "edge";

const Hf = new HfInference(process.env.HUGGINGFACE_API_KEY);

export async function POST() {
const response = await Hf.textGenerationStream({
model: 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
model: "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
inputs: `<|prompter|>What's the Earth total population?<|endoftext|><|assistant|>`,
parameters: {
max_new_tokens: 200,
Expand All @@ -239,19 +239,19 @@ This is a tiny wrapper around `Response` class that makes returning `ReadableStr

```tsx
// app/api/generate/route.ts
import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils';
import { OpenAITextStream, StreamingTextResponse } from "@vercel/ai-utils";

export const runtime = 'edge';
export const runtime = "edge";

export async function POST() {
const response = await openai.createChatCompletion({
model: 'gpt-4',
model: "gpt-4",
stream: true,
messages: { role: 'user', content: 'What is love?' },
messages: { role: "user", content: "What is love?" },
});
const stream = OpenAITextStream(response);
return new StreamingTextResponse(stream, {
'X-RATE-LIMIT': 'lol',
"X-RATE-LIMIT": "lol",
}); // => new Response(stream, { status: 200, headers: { 'Content-Type': 'text/plain; charset=utf-8', 'X-RATE-LIMIT': 'lol' }})
}
```
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
"dev": "turbo dev --no-cache --continue",
"lint": "turbo lint",
"prepare": "husky install",
"prettier-check": "prettier --check .",
"prettier-check": "prettier --check \"**/*.{ts,tsx,md}\"",
"type-check": "turbo type-check",
"prettier-fix": "prettier --write \"**/*.{ts,tsx,md}\"",
"publint": "turbo publint",
Expand Down
3 changes: 2 additions & 1 deletion packages/core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
},
"dependencies": {
"eventsource-parser": "1.0.0",
"nanoid": "^4.0.2",
"swr": "2.1.5"
},
"devDependencies": {
Expand All @@ -42,7 +43,7 @@
"typescript": "^4.5.3"
},
"peerDependencies": {
"react": "18.2.0"
"react": "^18.0.0"
},
"engines": {
"node": ">=14.6"
Expand Down
2 changes: 2 additions & 0 deletions packages/core/src/anthropic-stream.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,5 @@ export function AnthropicStream(
): ReadableStream {
return AIStream(res, parseAnthropicStream, cb);
}

AnthropicStream.$$typeof = Symbol.for("AIStream.AnthropicStream");
2 changes: 2 additions & 0 deletions packages/core/src/huggingface-stream.ts
Original file line number Diff line number Diff line change
Expand Up @@ -68,3 +68,5 @@ export function HuggingFaceStream(
});
return stream.pipeThrough(forkedStream);
}

HuggingFaceStream.$$typeof = Symbol.for("AIStream.HuggingFaceStream");
8 changes: 4 additions & 4 deletions packages/core/src/index.tsx
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
export * from './ai-stream';
export * from './openai-stream';
export * from './streaming-text-response';
export * from './useChat';
export * from "./ai-stream";
export * from "./openai-stream";
export * from "./streaming-text-response";
export * from "./use-chat";
2 changes: 2 additions & 0 deletions packages/core/src/openai-stream.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,5 @@ export function OpenAIStream(
): ReadableStream {
return AIStream(res, parseOpenAIStream, cb);
}

OpenAIStream.$$typeof = Symbol.for("AIStream.OpenAIStream");
Loading

0 comments on commit 0f50deb

Please sign in to comment.