Skip to content

Commit

Permalink
feat: add site xun
Browse files Browse the repository at this point in the history
  • Loading branch information
xiangsx committed Jul 16, 2023
1 parent 5d449a0 commit f664c92
Show file tree
Hide file tree
Showing 4 changed files with 137 additions and 21 deletions.
14 changes: 8 additions & 6 deletions index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ const AskHandle: Middleware = async (ctx) => {
ctx.body = {error: `${site} not support model ${model}`} as AskRes;
return;
}
ctx.body = await chat.ask({prompt: PromptToString(prompt, tokenLimit), model});
const [content, messages] = PromptToString(prompt, tokenLimit);
ctx.body = await chat.ask({prompt: content, messages, model});
}

const AskStreamHandle: (ESType: new () => EventStream) => Middleware = (ESType) => async (ctx) => {
Expand Down Expand Up @@ -88,7 +89,8 @@ const AskStreamHandle: (ESType: new () => EventStream) => Middleware = (ESType)
es.end();
return;
}
await chat.askStream({prompt: PromptToString(prompt, tokenLimit), model}, es);
const [content, messages] = PromptToString(prompt, tokenLimit);
await chat.askStream({prompt: content, messages, model}, es);
ctx.body = es.stream();
}

Expand All @@ -111,7 +113,7 @@ router.get('/supports', (ctx) => {
const site = Site[key];
//@ts-ignore
const chat = chatModel.get(site);
const support: Support = {site:site, models: []}
const support: Support = {site: site, models: []}
for (const mKey in ModelType) {
//@ts-ignore
const model = ModelType[mKey];
Expand Down Expand Up @@ -145,14 +147,14 @@ const openAIHandle: Middleware = async (ctx, next) => {
"index": 0,
"message": {
"role": "assistant",
"content": ctx.body.content,
"content": ctx.body.content || ctx.body.error,
},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": 100,
"completion_tokens": getTokenSize(ctx.body.content),
"total_tokens": 100 + getTokenSize(ctx.body.content)
"completion_tokens": getTokenSize(ctx.body.content || ''),
"total_tokens": 100 + getTokenSize(ctx.body.content || '')
}
}
};
Expand Down
31 changes: 16 additions & 15 deletions model/base.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,33 +17,34 @@ export enum ModelType {
GPT3p5Turbo = 'gpt-3.5-turbo',
GPT3p5_16k = 'gpt-3.5-turbo-16k',
GPT4 = 'gpt-4',
GPT4_32k = 'gpt-4-32k',
Sage = 'sage',
NetGpt3p5 = 'net-gpt3.5-turbo',
ClaudeInstance = 'claude-instance',
Claude = 'claude',
Claude100k = 'claude-100k',
Claude2_100k = 'claude-2-100k',
Gpt4free = 'gpt4free',
GooglePalm = 'google-palm',
}

export interface ChatRequest {
prompt: string;
model: ModelType;
messages: Message[];
}

export function PromptToString(prompt: string, limit: number): string {
export function PromptToString(prompt: string, limit: number): [string, Message[]] {
try {
const messages: Message[] = JSON.parse(prompt);
let result: Message[] = [];
let tokenSize = 0;
for (let i = messages.length - 1; i >= 0; i--) {
const item = messages[i];
const {role, content} = item;
tokenSize += getTokenSize(content);
if (tokenSize > limit) {
break;
}
result.push(item);
const res = `${messages.map(item => `${item.role}: ${item.content}`).join('\n')}\nassistant: `;
console.log(prompt.length, limit, getTokenSize(res));
if (getTokenSize(res) >= limit && messages.length > 1) {
return PromptToString(JSON.stringify(messages.slice(1, messages.length)), limit);
}
return `${result.reverse().map(item => `${item.role}
: ${item.content}
`).join('\n')}\nassistant: `;
return [res, messages];
} catch (e) {
return prompt;
return [prompt, [{role: 'user', content: prompt}]];
}
}

Expand Down
3 changes: 3 additions & 0 deletions model/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import {Phind} from "./phind";
import {Vita} from "./vita";
import {FakeOpen} from "./fakeopen";
import {Better} from "./better";
import {Xun} from "./xun";

export enum Site {
// define new model here
Expand All @@ -16,6 +17,7 @@ export enum Site {
Vita = 'vita',
FakeOpen = 'fakeopen',
Better = 'better',
Xun = 'xun',
}

export class ChatModelFactory {
Expand All @@ -37,6 +39,7 @@ export class ChatModelFactory {
this.modelMap.set(Site.Vita, new Vita(this.options))
this.modelMap.set(Site.FakeOpen, new FakeOpen(this.options))
this.modelMap.set(Site.Better, new Better(this.options))
this.modelMap.set(Site.Xun, new Xun(this.options))
}

get(model: Site): Chat | undefined {
Expand Down
110 changes: 110 additions & 0 deletions model/xun/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
import {Chat, ChatOptions, ChatRequest, ChatResponse, Message, ModelType} from "../base";
import {AxiosInstance, AxiosRequestConfig, CreateAxiosDefaults} from "axios";
import {CreateAxiosProxy} from "../../utils/proxyAgent";
import es from "event-stream";
import {ErrorData, Event, EventStream, MessageData, parseJSON} from "../../utils";

interface RealReq {
messages: Message[];
model: string;
temperature: number;
presence_penalty: number;
top_p: number;
frequency_penalty: number;
stream: boolean;
}

export class Xun extends Chat {
private client: AxiosInstance;

constructor(options?: ChatOptions) {
super(options);
this.client = CreateAxiosProxy({
baseURL: 'https://gpt4.xunika.uk/api/openai',
headers: {
'Content-Type': 'application/json',
"Cache-Control": "no-cache",
"Proxy-Connection": "keep-alive"
}
} as CreateAxiosDefaults);
}

support(model: ModelType): number {
switch (model) {
case ModelType.GPT3p5Turbo:
return 3000;
case ModelType.GPT3p5_16k:
return 15000;
default:
return 0;
}
}

public async ask(req: ChatRequest): Promise<ChatResponse> {
const stream = new EventStream();
const res = await this.askStream(req, stream);
const result: ChatResponse = {
content: '',
}
return new Promise(resolve => {
stream.read((event, data) => {
switch (event) {
case Event.done:
break;
case Event.message:
result.content += (data as MessageData).content || '';
break;
case Event.error:
result.error = (data as ErrorData).error;
break;
}
}, () => {
resolve(result);
})
})

}

public async askStream(req: ChatRequest, stream: EventStream) {
const data: RealReq = {
"messages": req.messages,
"model": req.model,
"temperature": 1,
"presence_penalty": 0,
"top_p": 1,
"frequency_penalty": 0,
"stream": true
};
try {
const res = await this.client.post('/v1/chat/completions', data, {
responseType: 'stream',
} as AxiosRequestConfig);
res.data.pipe(es.split(/\r?\n\r?\n/)).pipe(es.map(async (chunk: any, cb: any) => {
const dataStr = chunk.replace('data: ', '');
if (!dataStr) {
return;
}
if (dataStr === '[DONE]') {
stream.write(Event.done, {content: ""})
stream.end();
return;
}
const data = parseJSON(dataStr, {} as any);
if (!data?.choices) {
stream.write(Event.error, {error: 'not found data.choices'})
stream.end();
return;
}
const [{delta: {content = ""}, finish_reason}] = data.choices;
if (finish_reason === 'stop') {
return;
}
stream.write(Event.message, {content});
}))
} catch (e: any) {
console.error(e);
stream.write(Event.error, {error: e.message})
stream.end();
}
}
}

0 comments on commit f664c92

Please sign in to comment.