Skip to content

Commit 8c9a1a8

Browse files
committed
improvements
1 parent eedbf72 commit 8c9a1a8

File tree

5 files changed

+192
-53
lines changed

5 files changed

+192
-53
lines changed

Ask.mjs

Lines changed: 59 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,18 @@ import os from 'os';
33
import path from 'path';
44
import { OpenAI } from "openai";
55
import { Anthropic } from '@anthropic-ai/sdk';
6+
import { Groq } from "groq-sdk";
67
import { encode } from "gpt-tokenizer/esm/model/davinci-codex"; // tokenizer
78

89
// Map of model shortcodes to full model names
910
export const MODELS = {
1011
g: 'gpt-4-turbo-2024-04-09',
1112
G: 'gpt-4-32k-0314',
12-
c: 'claude-3-haiku-20240307',
13+
h: 'claude-3-haiku-20240307',
1314
s: 'claude-3-sonnet-20240229',
14-
C: 'claude-3-opus-20240229',
15+
o: 'claude-3-opus-20240229',
16+
l: 'llama3-8b-8192',
17+
L: 'llama3-70b-8192'
1518
};
1619

1720
// Utility function to read the OpenAI API token
@@ -36,49 +39,87 @@ async function getAnthropicToken() {
3639
}
3740
}
3841

42+
// Utility function to read the Groq API token
43+
async function getGroqToken() {
44+
const tokenPath = path.join(os.homedir(), '.config', 'groq.token');
45+
try {
46+
return (await fs.readFile(tokenPath, 'utf8')).trim();
47+
} catch (err) {
48+
console.error('Error reading groq.token file:', err.message);
49+
process.exit(1);
50+
}
51+
}
52+
3953
// Factory function to create a stateful asker
4054
export function asker() {
4155
const messages = [];
4256

4357
// Asker function that maintains conversation state
44-
async function ask(userMessage, { model, temperature = 0.0, max_tokens = 4096 }) {
58+
async function ask(userMessage, { system, model, temperature = 0.0, max_tokens = 4096 }) {
4559
model = MODELS[model] || model;
46-
const isGPT = model.startsWith('gpt');
47-
48-
const client = isGPT ?
49-
new OpenAI({ apiKey: await getOpenAIToken() }) :
50-
new Anthropic({ apiKey: await getAnthropicToken() });
51-
52-
messages.push({ role: 'user', content: userMessage });
60+
const isOpenAI = model.startsWith('gpt');
61+
const isAnthropic = model.startsWith('claude');
62+
const isGroq = model.startsWith('llama');
63+
64+
let client;
65+
if (isOpenAI) {
66+
client = new OpenAI({ apiKey: await getOpenAIToken() });
67+
} else if (isAnthropic) {
68+
client = new Anthropic({ apiKey: await getAnthropicToken() });
69+
} else if (isGroq) {
70+
client = new Groq({ apiKey: await getGroqToken() });
71+
} else {
72+
throw new Error(`Unsupported model: ${model}`);
73+
}
74+
75+
if (messages.length === 0 && !isAnthropic) {
76+
messages.push({ role: "system", content: system });
77+
}
78+
79+
messages.push({ role: "user", content: userMessage });
5380

5481
const params = {
82+
system: isAnthropic ? system : undefined,
5583
model,
56-
temperature,
84+
temperature,
5785
max_tokens,
5886
stream: true,
5987
};
60-
88+
6189
let result = "";
6290

63-
if (isGPT) {
91+
if (isOpenAI) {
6492
params.messages = messages;
65-
93+
6694
const stream = await client.chat.completions.create(params);
67-
95+
6896
for await (const chunk of stream) {
6997
const text = chunk.choices[0]?.delta?.content || "";
7098
process.stdout.write(text);
7199
result += text;
72100
}
73-
} else {
101+
} else if (isAnthropic) {
74102
const stream = client.messages.stream({
75103
...params,
76104
messages
77105
}).on('text', (text) => {
78106
process.stdout.write(text);
79-
result += text;
107+
result += text;
80108
});
81109
await stream.finalMessage();
110+
} else if (isGroq) {
111+
params.messages = messages.map(msg => ({
112+
role: msg.role,
113+
content: msg.content
114+
}));
115+
116+
const stream = await client.chat.completions.create(params);
117+
118+
for await (const chunk of stream) {
119+
const text = chunk.choices[0]?.delta?.content || "";
120+
process.stdout.write(text);
121+
result += text;
122+
}
82123
}
83124

84125
messages.push({ role: 'assistant', content: result });
@@ -93,10 +134,9 @@ export function token_count(inputText) {
93134
// Encode the input string into tokens
94135
const tokens = encode(inputText);
95136

96-
// Get the number of tokens
137+
// Get the number of tokens
97138
const numberOfTokens = tokens.length;
98139

99140
// Return the number of tokens
100141
return numberOfTokens;
101142
}
102-

chatsh.mjs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,8 @@ async function main() {
6060
const userMessage = await prompt('$ ');
6161

6262
try {
63-
const fullMessage = SYSTEM_PROMPT + "\n" + lastOutput + userMessage;
64-
const assistantMessage = await ask(fullMessage, { model: MODEL });
63+
const fullMessage = lastOutput + userMessage;
64+
const assistantMessage = await ask(fullMessage, { system: SYSTEM_PROMPT, model: MODEL });
6565
console.log();
6666

6767
const code = extractCode(assistantMessage);

holefill.mjs

Lines changed: 112 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -2,87 +2,167 @@
22
import { asker, MODELS, token_count } from './Ask.mjs';
33
import process from "process";
44
import fs from 'fs/promises';
5+
import os from 'os';
56
import path from 'path';
67

8+
const ask = asker();
9+
710
const system = `
811
You are a HOLE FILLER. You are provided with a file containing holes, formatted
9-
as '{{X}}'. Your TASK is to answer with a string to replace this hole with.
12+
as '{{HOLE_NAME}}'. Your TASK is to complete with a string to replace this hole
13+
with, inside a <COMPLETION/> XML tag, including context-aware indentation, if
14+
needed. All completions MUST be truthful, accurate, well-written and correct.
1015
11-
# EXAMPLE QUERY:
16+
## EXAMPLE QUERY:
1217
18+
<QUERY>
1319
function sum_evens(lim) {
1420
var sum = 0;
1521
for (var i = 0; i < lim; ++i) {
16-
{{X}}
22+
{{FILL_HERE}}
1723
}
1824
return sum;
1925
}
26+
</QUERY>
2027
21-
TASK: Fill the {{X}} hole.
28+
TASK: Fill the {{FILL_HERE}} hole.
2229
23-
# CORRECT ANSWER:
30+
## CORRECT COMPLETION
2431
25-
if (i % 2 === 0) {
32+
<COMPLETION>if (i % 2 === 0) {
2633
sum += i;
27-
}
34+
}</COMPLETION>
35+
36+
## EXAMPLE QUERY:
37+
38+
<QUERY>
39+
def sum_list(lst):
40+
total = 0
41+
for x in lst:
42+
{{FILL_HERE}}
43+
return total
44+
45+
print sum_list([1, 2, 3])
46+
</QUERY>
47+
48+
## CORRECT COMPLETION:
49+
50+
<COMPLETION> total += x</COMPLETION>
2851
29-
# NOTICE THE CONTEXT-AWARE INDENTATION:
52+
## EXAMPLE QUERY:
3053
31-
1. The first line is NOT indented, because there are already spaces before {{LOOP}}.
54+
<QUERY>
55+
// data Tree a = Node (Tree a) (Tree a) | Leaf a
56+
57+
// sum :: Tree Int -> Int
58+
// sum (Node lft rgt) = sum lft + sum rgt
59+
// sum (Leaf val) = val
60+
61+
// convert to TypeScript:
62+
{{FILL_HERE}}
63+
</QUERY>
64+
65+
## CORRECT COMPLETION:
66+
67+
<COMPLETION>type Tree<T>
68+
= {$:"Node", lft: Tree<T>, rgt: Tree<T>}
69+
| {$:"Leaf", val: T};
70+
71+
function sum(tree: Tree<number>): number {
72+
switch (tree.$) {
73+
case "Node":
74+
return sum(tree.lft) + sum(tree.rgt);
75+
case "Leaf":
76+
return tree.val;
77+
}
78+
}</COMPLETION>
3279
33-
2. The other lines ARE indented, to match the indentation of the context.
80+
## EXAMPLE QUERY:
3481
35-
# ANSWER ONLY WITH THE CORRECT SUBSTITUTION. NOTHING ELSE.
82+
The 4th {{FILL_HERE}} is Jupiter.
83+
84+
## CORRECT COMPLETION:
85+
86+
<COMPLETION>the 4th planet after Mars</COMPLETION>
87+
88+
## EXAMPLE QUERY:
89+
90+
function hypothenuse(a, b) {
91+
return Math.sqrt({{FILL_HERE}}b ** 2);
92+
}
93+
94+
## CORRECT COMPLETION:
95+
96+
<COMPLETION>a ** 2 + </COMPLETION>
3697
`;
3798

3899
var file = process.argv[2];
39-
var curr = process.argv[3];
40-
var model = process.argv[4] || "C";
100+
var mini = process.argv[3];
101+
var model = process.argv[4] || "g";
41102

42103
if (!file) {
43-
console.log("Usage: holefill <file> [<shortened_file>] [<model>]");
44-
console.log("Replaces {{HOLES}} in <file> using the specified model. Shortcuts:");
45-
for (var key in MODELS) {
46-
console.log("- " + key + " = " + MODELS[key]);
47-
}
48-
console.log("A shortened file can be used to provide relevant context.");
49-
process.exit(1);
104+
console.log("Usage: holefill <file> [<shortened_file>] [<model_name>]");
105+
console.log("");
106+
console.log("This will replace all {{HOLES}} in <file>, using GPT-4 / Claude-3.");
107+
console.log("A shortened file can be used to omit irrelevant parts.");
108+
process.exit();
50109
}
51110

52111
var file_code = await fs.readFile(file, 'utf-8');
53-
var curr_code = curr ? await fs.readFile(curr, 'utf-8') : file_code;
112+
var mini_code = mini ? await fs.readFile(mini, 'utf-8') : file_code;
54113

55114
// Imports context files when //./path_to_file// is present.
56115
var regex = /\/\/\.\/(.*?)\/\//g;
57116
var match;
58-
while ((match = regex.exec(curr_code)) !== null) {
117+
while ((match = regex.exec(mini_code)) !== null) {
59118
var import_path = path.resolve(path.dirname(file), match[1]);
60119
if (await fs.stat(import_path).then(() => true).catch(() => false)) {
61120
var import_text = await fs.readFile(import_path, 'utf-8');
62121
console.log("import_file:", match[0]);
63-
curr_code = curr_code.replace(match[0], '\n' + import_text);
122+
mini_code = mini_code.replace(match[0], '\n' + import_text);
64123
} else {
65124
console.log("import_file:", match[0], "ERROR");
66125
process.exit(1);
67126
}
68127
}
69128

70-
await fs.writeFile(curr, curr_code, 'utf-8');
129+
await fs.writeFile(mini, mini_code, 'utf-8');
71130

72-
var tokens = token_count(curr_code);
73-
var holes = curr_code.match(/{{\w+}}/g) || [];
131+
var tokens = token_count(mini_code);
132+
var holes = mini_code.match(/{{\w+}}/g) || [];
74133

75-
var ask = asker();
134+
if (holes.length === 0 && mini_code.indexOf("??") !== -1 && (mini_code.match(/\?\?/g) || []).length == 1) {
135+
holes = "??";
136+
}
76137

77138
console.log("holes_found:", holes);
78139
console.log("token_count:", tokens);
79140
console.log("model_label:", MODELS[model] || model);
80141

81-
for (let hole of holes) {
82-
console.log("next_filled: " + hole + "...");
83-
var prompt = curr_code + "\nTASK: Fill the {{" + hole + "}} hole. Answer only with the EXACT completion to replace {{" + hole + "}} with. INDENT IT BASED ON THE CONTEXT.";
84-
var answer = await ask(prompt, { system, model, temperature: 0, max_tokens: 4096 });
85-
file_code = file_code.replace(hole, answer);
142+
if (holes === "??") {
143+
console.log("next_filled: ??");
144+
var prompt = "<QUERY>\n" + mini_code.replace("??", "{{FILL_HERE}}") + "\n</QUERY>\nTASK: Fill the {{FILL_HERE}} hole. Answer only with the CORRECT completion, and NOTHING ELSE. Do it now.";
145+
var answer = await ask(prompt, {system, model});
146+
var match = answer.match(/<COMPLETION>([\s\S]*?)<\/COMPLETION>/);
147+
if (match) {
148+
file_code = file_code.replace("??", match[1]);
149+
} else {
150+
console.error("Error: Could not find <COMPLETION> tags in the AI's response.");
151+
process.exit(1);
152+
}
153+
} else {
154+
for (let hole of holes) {
155+
console.log("next_filled: " + hole + "...");
156+
var prompt = "<QUERY>\n" + mini_code + "\n</QUERY>\nTASK: Fill the {{"+hole+"}} hole. Answer only with the CORRECT completion, and NOTHING ELSE. Do it now.";
157+
var answer = await ask(prompt, {system, model});
158+
var match = answer.match(/<COMPLETION>([\s\S]*?)<\/COMPLETION>/);
159+
if (match) {
160+
file_code = file_code.replace(hole, match[1]);
161+
} else {
162+
console.error("Error: Could not find <COMPLETION> tags in the AI's response for hole: " + hole);
163+
process.exit(1);
164+
}
165+
}
86166
}
87167

88168
await fs.writeFile(file, file_code, 'utf-8');

package-lock.json

Lines changed: 18 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
"dependencies": {
2020
"@anthropic-ai/sdk": "^0.19.1",
2121
"gpt-tokenizer": "^2.1.2",
22+
"groq-sdk": "^0.3.2",
2223
"openai": "^4.31.0",
2324
"punycode": "^2.3.1"
2425
}

0 commit comments

Comments
 (0)