diff --git a/.eslintignore b/.eslintignore deleted file mode 100644 index db4c6d9b679..00000000000 --- a/.eslintignore +++ /dev/null @@ -1,2 +0,0 @@ -dist -node_modules \ No newline at end of file diff --git a/.eslintrc b/.eslintrc deleted file mode 100644 index 364ee7925da..00000000000 --- a/.eslintrc +++ /dev/null @@ -1,55 +0,0 @@ -{ - "parser": "@typescript-eslint/parser", - "parserOptions": { - "ecmaVersion": 8, - "sourceType": "module", - "ecmaFeatures": { - "impliedStrict": true, - "experimentalObjectRestSpread": true - }, - "allowImportExportEverywhere": true, - "project": ["**/tsconfig.json"] - }, - "plugins": ["@typescript-eslint", "react-hooks"], - "extends": [ - "eslint:recommended", - "plugin:react/recommended", - "plugin:@typescript-eslint/recommended", - "prettier", - "plugin:jest-dom/recommended", - "plugin:testing-library/react", - "plugin:react/jsx-runtime" - ], - "settings": { - "react": { - "version": "detect" - } - }, - "env": { - "es6": true, - "browser": true, - "node": true, - "jest": true - }, - "rules": { - "func-names": [2, "as-needed"], - "no-shadow": 0, - "@typescript-eslint/no-shadow": 2, - "@typescript-eslint/explicit-function-return-type": 0, - "@typescript-eslint/no-unused-vars": [0, {"argsIgnorePattern": "^_"}], - "@typescript-eslint/no-use-before-define": 0, - "@typescript-eslint/ban-ts-ignore": 0, - "@typescript-eslint/no-empty-function": 0, - "@typescript-eslint/ban-ts-comment": 0, - "@typescript-eslint/no-var-requires": 0, - "@typescript-eslint/no-explicit-any": 0, - "@typescript-eslint/explicit-module-boundary-types": 0, - "@typescript-eslint/consistent-type-imports": [2, {"prefer": "type-imports"}], - "@typescript-eslint/ban-types": 0, - "react-hooks/rules-of-hooks": 2, - "react-hooks/exhaustive-deps": 1, - "react/prop-types": 0, - "testing-library/no-unnecessary-act": 0 - }, - "ignorePatterns": ["dist/", "node_modules", "scripts", "examples"] -} \ No newline at end of file diff --git a/.eslintrc.js b/.eslintrc.js new file mode 100644 index 00000000000..50775b06c36 --- /dev/null +++ b/.eslintrc.js @@ -0,0 +1,10 @@ +module.exports = { + root: true, + // This tells ESLint to load the config from the package `eslint-config-vercel-ai` + extends: ['vercel-ai'], + settings: { + next: { + rootDir: ['apps/*/'], + }, + }, +}; diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a4eb6b63063..f70c02f421d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -11,12 +11,13 @@ jobs: release: name: Release runs-on: ubuntu-latest + timeout-minutes: 10 steps: - name: Checkout Repo uses: actions/checkout@v2 - - name: Setup pnpm - uses: pnpm/action-setup@v2.2.2 + - name: Setup pnpm 7 + uses: pnpm/action-setup@v2 with: version: 7 @@ -24,10 +25,9 @@ jobs: uses: actions/setup-node@v2 with: node-version: 16.x - cache: 'pnpm' - name: Install Dependencies - run: pnpm install + run: pnpm i - name: Create Release Pull Request or Publish to npm id: changesets @@ -35,7 +35,11 @@ jobs: with: # This expects you to have a script called release which does a build for your packages and calls changeset publish publish: pnpm release - version: pnpm version-packages env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} NPM_TOKEN: ${{ secrets.NPM_TOKEN_ELEVATED }} + + - name: Send a Slack notification if a publish happens + if: steps.changesets.outputs.published == 'true' + # You can do something when a publish happens. + run: my-slack-bot send-notification --message "A new version of ${GITHUB_REPOSITORY} was published!" diff --git a/.gitignore b/.gitignore index a309a3497fb..1554c1c53a1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,13 @@ -# build -dist/ - -# dependencies -node_modules/ - -# logs -npm-debug.log +.DS_Store +node_modules +.turbo +*.log +.next +dist +dist-ssr +*.local +.env +.cache +server/dist +public/dist +.turbo diff --git a/.npmrc b/.npmrc index 83584d4aa3b..ded82e2f63f 100644 --- a/.npmrc +++ b/.npmrc @@ -1,2 +1 @@ -save-exact = true -strict-peer-dependencies=false \ No newline at end of file +auto-install-peers = true diff --git a/.prettierignore b/.prettierignore deleted file mode 100644 index c4cbfff6732..00000000000 --- a/.prettierignore +++ /dev/null @@ -1,3 +0,0 @@ -dist -node_modules -pnpm-lock.yaml diff --git a/README.md b/README.md index 1091ce2430f..c4ec20a084c 100644 --- a/README.md +++ b/README.md @@ -46,21 +46,21 @@ The goal of this library lies in its commitment to work directly with each AI/Mo ```tsx // app/api/generate/route.ts -import { Configuration, OpenAIApi } from 'openai-edge'; -import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils'; +import { Configuration, OpenAIApi } from "openai-edge"; +import { OpenAITextStream, StreamingTextResponse } from "@vercel/ai-utils"; const config = new Configuration({ apiKey: process.env.OPENAI_API_KEY, }); const openai = new OpenAIApi(config); -export const runtime = 'edge'; +export const runtime = "edge"; export async function POST() { const response = await openai.createChatCompletion({ - model: 'gpt-4', + model: "gpt-4", stream: true, - messages: [{ role: 'user', content: 'What is love?' }], + messages: [{ role: "user", content: "What is love?" }], }); const stream = OpenAITextStream(response); return new StreamingTextResponse(stream); @@ -99,11 +99,8 @@ Create a Next.js Route Handler that uses the Edge Runtime that we'll use to gene ```tsx // ./app/api/generate/route.ts -import { Configuration, OpenAIApi } from 'openai-edge'; -import { - OpenAITextStream, - StreamingTextResponse -} from '@vercel/ai-utils'; +import { Configuration, OpenAIApi } from "openai-edge"; +import { OpenAITextStream, StreamingTextResponse } from "@vercel/ai-utils"; // Create an OpenAI API client (that's edge friendly!) const config = new Configuration({ @@ -112,7 +109,7 @@ const config = new Configuration({ const openai = new OpenAIApi(config); // IMPORTANT! Set the runtime to edge -export const runtime = 'edge'; +export const runtime = "edge"; export async function POST(req: Request) { // Extract the `prompt` from the body of the request @@ -120,7 +117,7 @@ export async function POST(req: Request) { // Ask OpenAI for a streaming chat completion given the prompt const response = await openai.createCompletion({ - model: 'gpt-3.5-turbo', + model: "gpt-3.5-turbo", stream: true, prompt, }); @@ -139,21 +136,21 @@ Create a Client component with a form that we'll use to gather the prompt from t ```tsx // ./app/form.ts -'use client'; +"use client"; -import { useState } from 'react'; -import { useCompletion } from '@vercel/ai-utils/react'; //@todo +import { useState } from "react"; +import { useCompletion } from "@vercel/ai-utils/react"; //@todo export function Form() { - const [value, setValue] = useState(''); - const { setPrompt, completion } = useCompletion('/api/generate'); + const [value, setValue] = useState(""); + const { setPrompt, completion } = useCompletion("/api/generate"); return (
{ e.preventDefault(); setPrompt(value); - setValue(''); + setValue(""); }} >