|
| 1 | +import fs from 'fs/promises'; |
| 2 | +import os from 'os'; |
| 3 | +import path from 'path'; |
| 4 | +import { OpenAI } from "openai"; |
| 5 | +import { Anthropic } from '@anthropic-ai/sdk'; |
| 6 | +import { encode } from "gpt-tokenizer/esm/model/davinci-codex"; // tokenizer |
| 7 | + |
| 8 | +// Map of model shortcodes to full model names |
| 9 | +export const MODELS = { |
| 10 | + g: 'gpt-4-turbo-2024-04-09', |
| 11 | + G: 'gpt-4-32k-0314', |
| 12 | + c: 'claude-3-haiku-20240307', |
| 13 | + s: 'claude-3-sonnet-20240229', |
| 14 | + C: 'claude-3-opus-20240229', |
| 15 | +}; |
| 16 | + |
| 17 | +// Utility function to read the OpenAI API token |
| 18 | +async function getOpenAIToken() { |
| 19 | + const tokenPath = path.join(os.homedir(), '.config', 'openai.token'); |
| 20 | + try { |
| 21 | + return (await fs.readFile(tokenPath, 'utf8')).trim(); |
| 22 | + } catch (err) { |
| 23 | + console.error('Error reading openai.token file:', err.message); |
| 24 | + process.exit(1); |
| 25 | + } |
| 26 | +} |
| 27 | + |
| 28 | +// Utility function to read the Anthropic API token |
| 29 | +async function getAnthropicToken() { |
| 30 | + const tokenPath = path.join(os.homedir(), '.config', 'anthropic.token'); |
| 31 | + try { |
| 32 | + return (await fs.readFile(tokenPath, 'utf8')).trim(); |
| 33 | + } catch (err) { |
| 34 | + console.error('Error reading anthropic.token file:', err.message); |
| 35 | + process.exit(1); |
| 36 | + } |
| 37 | +} |
| 38 | + |
| 39 | +// Factory function to create a stateful asker |
| 40 | +export function asker() { |
| 41 | + const messages = []; |
| 42 | + |
| 43 | + // Asker function that maintains conversation state |
| 44 | + async function ask(userMessage, { model, temperature = 0.0, max_tokens = 4096 }) { |
| 45 | + model = MODELS[model] || model; |
| 46 | + const isGPT = model.startsWith('gpt'); |
| 47 | + |
| 48 | + const client = isGPT ? |
| 49 | + new OpenAI({ apiKey: await getOpenAIToken() }) : |
| 50 | + new Anthropic({ apiKey: await getAnthropicToken() }); |
| 51 | + |
| 52 | + messages.push({ role: 'user', content: userMessage }); |
| 53 | + |
| 54 | + const params = { |
| 55 | + model, |
| 56 | + temperature, |
| 57 | + max_tokens, |
| 58 | + stream: true, |
| 59 | + }; |
| 60 | + |
| 61 | + let result = ""; |
| 62 | + |
| 63 | + if (isGPT) { |
| 64 | + params.messages = messages; |
| 65 | + |
| 66 | + const stream = await client.chat.completions.create(params); |
| 67 | + |
| 68 | + for await (const chunk of stream) { |
| 69 | + const text = chunk.choices[0]?.delta?.content || ""; |
| 70 | + process.stdout.write(text); |
| 71 | + result += text; |
| 72 | + } |
| 73 | + } else { |
| 74 | + const stream = client.messages.stream({ |
| 75 | + ...params, |
| 76 | + messages |
| 77 | + }).on('text', (text) => { |
| 78 | + process.stdout.write(text); |
| 79 | + result += text; |
| 80 | + }); |
| 81 | + await stream.finalMessage(); |
| 82 | + } |
| 83 | + |
| 84 | + messages.push({ role: 'assistant', content: result }); |
| 85 | + |
| 86 | + return result; |
| 87 | + } |
| 88 | + |
| 89 | + return ask; |
| 90 | +} |
| 91 | + |
| 92 | +export function token_count(inputText) { |
| 93 | + // Encode the input string into tokens |
| 94 | + const tokens = encode(inputText); |
| 95 | + |
| 96 | + // Get the number of tokens |
| 97 | + const numberOfTokens = tokens.length; |
| 98 | + |
| 99 | + // Return the number of tokens |
| 100 | + return numberOfTokens; |
| 101 | +} |
| 102 | + |
0 commit comments