Skip to content

Commit

Permalink
switch back from gpt 4 to 3.5 turbo because of the limitations
Browse files Browse the repository at this point in the history
  • Loading branch information
Leask committed Aug 1, 2023
1 parent 85d4e83 commit 226c9ca
Showing 1 changed file with 8 additions and 2 deletions.
10 changes: 8 additions & 2 deletions lib/hal.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,10 @@ const li = (id, text, url) => `\n${id}. ` + (url ? link(text, url) : text);
const cardReg = /^\[\d*\]:\ ([^\ ]*)\ "(.*)"$/ig;
// https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
const countTokens = txt => Math.ceil(txt.split(/[^a-z0-9]/i).length * 100 / 75);
const MAX_CONTEXT_TOKENS = 8192;
// Keep this for GPT4 {
// const MAX_CONTEXT_TOKENS = 8192;
// }
const MAX_CONTEXT_TOKENS = 4096;
const MAX_PROMPT_TOKENS = Math.floor(MAX_CONTEXT_TOKENS * 0.6);
const MAX_RESPONSE_TOKENS = MAX_CONTEXT_TOKENS - MAX_PROMPT_TOKENS;

Expand All @@ -37,7 +40,10 @@ const init = async options => {
keepNecessaryMessagesOnly: true,
maxContextTokens: MAX_CONTEXT_TOKENS,
modelOptions: {
model: options?.model || 'gpt-4',
// Keep this for GPT4 {
// model: options?.model || 'gpt-4',
// }
model: options?.model || 'gpt-3.5-turbo',
max_tokens: MAX_RESPONSE_TOKENS,
...options?.clientOptions?.modelOptions || {}
}, ...options?.clientOptions || {},
Expand Down

0 comments on commit 226c9ca

Please sign in to comment.