Skip to content

Commit

Permalink
feat: Uniform Request and Response Parameters
Browse files Browse the repository at this point in the history
  • Loading branch information
xiangsx committed Jun 13, 2023
1 parent 749c9b1 commit b4c8b52
Show file tree
Hide file tree
Showing 11 changed files with 265 additions and 305 deletions.
56 changes: 35 additions & 21 deletions index.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import Koa, {Context, Next} from 'koa';
import Router from 'koa-router'
import bodyParser from 'koa-bodyparser';
import {ChatModelFactory, Model} from "./model";
import {ChatModelFactory, Site} from "./model";
import dotenv from 'dotenv';
import {ChatRequest, ChatResponse, ModelType, PromptToString} from "./model/base";
import {Event, EventStream} from "./utils";

dotenv.config();

Expand All @@ -21,44 +23,56 @@ app.use(errorHandler);
app.use(bodyParser());
const chatModel = new ChatModelFactory();

interface AskReq {
prompt: string;
model: Model;
interface AskReq extends ChatRequest {
site: Site;
}

interface AskRes extends ChatResponse {
}

router.get('/ask', async (ctx) => {
const {prompt, model = Model.Mcbbs, ...options} = ctx.query as unknown as AskReq;
const {prompt, model = ModelType.GPT3p5, site = Site.You} = ctx.query as unknown as AskReq;
if (!prompt) {
ctx.body = 'please input prompt';
ctx.body = {error: `need prompt in query`} as AskRes;
return;
}
const chat = chatModel.get(model);
const chat = chatModel.get(site);
if (!chat) {
ctx.body = 'Unsupported model';
ctx.body = {error: `not support site: ${site} `} as AskRes;
return;
}
const tokenLimit = chat.support(model);
if (!tokenLimit) {
ctx.body = {error: `${site} not support model ${model}`} as AskRes;
return;
}
const res = await chat.ask({prompt: prompt as string, options});
ctx.body = res.text;
ctx.body = await chat.ask({prompt: PromptToString(prompt, tokenLimit), model});
});

router.get('/ask/stream', async (ctx) => {
const {prompt, model = Model.Mcbbs, ...options} = ctx.query as unknown as AskReq;
const {prompt, model = ModelType.GPT3p5, site = Site.You} = ctx.query as unknown as AskReq;
ctx.set({
"Content-Type": "text/event-stream;charset=utf-8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
});
const es = new EventStream();
if (!prompt) {
ctx.body = 'please input prompt';
es.write(Event.error, {error: 'need prompt in query'})
return;
}
const chat = chatModel.get(model);
const chat = chatModel.get(site);
if (!chat) {
ctx.body = 'Unsupported model';
es.write(Event.error, {error: `not support site: ${site} `})
return;
}
ctx.set({
"Content-Type": "text/event-stream;charset=utf-8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
});
const res = await chat.askStream({prompt: prompt as string, options});
ctx.body = res?.text;
const tokenLimit = chat.support(model);
if (!tokenLimit) {
es.write(Event.error, {error: `${site} not support model ${model}`})
return;
}
await chat.askStream({prompt: PromptToString(prompt, tokenLimit), model}, es);
ctx.body = es.stream();
})

app.use(router.routes());
Expand Down
122 changes: 0 additions & 122 deletions model/aidream/index.ts

This file was deleted.

48 changes: 37 additions & 11 deletions model/base.ts
Original file line number Diff line number Diff line change
@@ -1,21 +1,45 @@
import {Stream} from "stream";
import {EventStream, getTokenSize} from "../utils";

export interface ChatOptions {
}

export interface Response {
text: string | null;
other?: any;
export interface ChatResponse {
content?: string;
error?: string;
}

export interface ResponseStream {
text: Stream;
other?: any;
export type Message = {
role: string;
content: string;
}

export interface Request {
export enum ModelType {
GPT3p5 = 'gpt3.5',
GPT4 = 'gpt4',
}

export interface ChatRequest {
prompt: string;
options?: any;
model: ModelType;
}

export function PromptToString(prompt: string | Message[], limit: number): string {
if (typeof prompt === "string") {
return prompt;
}
let result: Message[] = [];

let tokenSize = 0;
for (let i = prompt.length - 1; i >= 0; i--) {
const item = prompt[i];
const {role, content} = item;
tokenSize += getTokenSize(content);
if (tokenSize > limit) {
break;
}
result.push(item);
}
return result.reverse().map(item => `${item.role}: ${item.content}`).join('\n');
}

export abstract class Chat {
Expand All @@ -25,7 +49,9 @@ export abstract class Chat {
this.options = options;
}

public abstract ask(req: Request): Promise<Response>
public abstract support(model: ModelType): number

public abstract ask(req: ChatRequest): Promise<ChatResponse>

public abstract askStream(req: Request): Promise<ResponseStream>
public abstract askStream(req: ChatRequest, stream: EventStream): Promise<void>
}
Loading

0 comments on commit b4c8b52

Please sign in to comment.