Skip to content

Commit cd30630

Browse files
committed
Initial commit.
0 parents  commit cd30630

File tree

5 files changed

+495
-0
lines changed

5 files changed

+495
-0
lines changed

GPT.mjs

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
#!/usr/bin/env node
2+
3+
import process from "process";
4+
import OpenAI from "openai";
5+
import fs from "fs/promises";
6+
import os from "os";
7+
import path from "path";
8+
import { encode } from "gpt-tokenizer/esm/model/davinci-codex"; // tokenizer
9+
10+
const openai = new OpenAI({apiKey: await get_token()});
11+
12+
export async function get_token() {
13+
const tokenPath = path.join(os.homedir(), ".config", "openai.token");
14+
try {
15+
const token = (await fs.readFile(tokenPath, "utf8")).trim();
16+
return token;
17+
} catch (err) {
18+
if (err.code === "ENOENT") {
19+
console.error("Error: openai.token file not found in `~/.config/openai.token`.");
20+
console.error("Please make sure the file exists and contains your OpenAI API token.");
21+
} else {
22+
console.error("Error reading openai.token file:", err.message);
23+
}
24+
process.exit(1);
25+
}
26+
}
27+
28+
export async function ask({system, prompt, model, temperature}) {
29+
const stream = await openai.chat.completions.create({
30+
model: model || "gpt-4-0125-preview",
31+
messages: [
32+
{role: "system", content: system || "You're a helpful assistant." },
33+
{role: "user", content: prompt || "What time is it?" }
34+
],
35+
stream: true,
36+
temperature: temperature || 0,
37+
});
38+
var result = "";
39+
for await (const chunk of stream) {
40+
var text = chunk.choices[0]?.delta?.content || "";
41+
process.stdout.write(text);
42+
result += text;
43+
}
44+
process.stdout.write("\n");
45+
return result;
46+
}
47+
48+
export function token_count(inputText) {
49+
// Encode the input string into tokens
50+
const tokens = encode(inputText);
51+
52+
// Get the number of tokens
53+
const numberOfTokens = tokens.length;
54+
55+
// Return the number of tokens
56+
return numberOfTokens;
57+
}

README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
Taelin AI Scripts
2+
=================
3+
4+
Some AI scripts I use daily.

holefill.mjs

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
#!/usr/bin/env node
2+
3+
import * as GPT from './GPT.mjs';
4+
import process from "process";
5+
import fs from 'fs/promises';
6+
import os from 'os';
7+
import path from 'path';
8+
9+
const system = `
10+
You are a HOLE FILLER. You are provided with a file containing holes, formatted
11+
as '{{HOLE}}'. Your TASK is to answer with a string to replace this hole with.
12+
13+
#################
14+
15+
## EXAMPLE QUERY:
16+
17+
function sum_evens(lim) {
18+
var sum = 0;
19+
for (var i = 0; i < lim; ++i) {
20+
{{LOOP}}
21+
}
22+
return sum;
23+
}
24+
25+
TASK: Fill the {{LOOP}} hole.
26+
27+
## CORRECT ANSWER:
28+
29+
if (i % 2 === 0) {
30+
sum += i;
31+
}
32+
33+
## NOTICE THE IDENTATION.
34+
## The first line isn't idented, because the hole already has spaces before it.
35+
## The other lines are idented, to match the surrounding style.
36+
37+
#################
38+
39+
## EXAMPLE QUERY:
40+
41+
function factorial(N) {
42+
var fact = 1;
43+
for (var i = 1; i <= N; ++i) {
44+
{{LOOP}}
45+
}
46+
return fact;
47+
}
48+
49+
TASK: Fill the {{LOOP}} hole.
50+
51+
## CORRECT ANSWER:
52+
53+
fact *= i;
54+
55+
## NOTICE THE IDENTATION.
56+
## ALL lines are idented, because there are no spaces before the hole.
57+
58+
#################
59+
60+
## EXAMPLE QUERY:
61+
62+
Q: Which is the largest mammal?
63+
64+
A: {{ANSWER}}
65+
66+
TASK: Fill the {{ANSWER}} hole.
67+
68+
## CORRECT ANSWER:
69+
70+
The blue whale.
71+
72+
## NOTICE THE IDENTATION.
73+
## There is no identation, since this is an inline hole.
74+
`;
75+
76+
var file = process.argv[2];
77+
var fill = process.argv[3];
78+
var fast = process.argv[4] === "--fast";
79+
80+
if (!file) {
81+
console.log("Usage: holefill <file> [<shortened_file>]");
82+
console.log("");
83+
console.log("This will replace all {{HOLES}} in <file>, using GPT-4.");
84+
console.log("A shortened file can be used to omit irrelevant parts.");
85+
process.exit();
86+
}
87+
88+
var file_code = await fs.readFile(file, 'utf-8');
89+
var fill_code = fill ? await fs.readFile(fill, 'utf-8') : file_code;
90+
var tokens = GPT.token_count(fill_code);
91+
var holes = fill_code.match(/{{\w+}}/g) || [];
92+
var model = fast ? "gpt-4-0125-preview" : "gpt-4-0314";
93+
94+
console.log("holes_found:", holes);
95+
console.log("token_count:", tokens);
96+
console.log("model_label:", model);
97+
98+
for (let hole of holes) {
99+
console.log("next_filled: " + hole + "...");
100+
var prompt = fill_code + "\nTASK: Fill the {{"+hole+"}} hole.";
101+
var answer = await GPT.ask({system, prompt, model});
102+
file_code = file_code.replace(hole, answer);
103+
}
104+
105+
await fs.writeFile(file, file_code, 'utf-8');

0 commit comments

Comments
 (0)