Skip to content

Quick n dirty chat feeling #82

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
## FOR PLEO
To build and run (assuming a lot of stuff) go for
```
chmod +x run.sh
./run.sh
```

<img src="https://github.com/ricklamers/gpt-code-ui/assets/1309307/9ad4061d-2e26-4407-9431-109b650fb022" alt="GPT-Code logo" width=240 />

An open source implementation of OpenAI's ChatGPT [Code interpreter](https://openai.com/blog/chatgpt-plugins#code-interpreter).
Expand Down
95 changes: 86 additions & 9 deletions frontend/src/App.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,19 @@ function App() {
);

let [openAIKey, setOpenAIKey] = useLocalStorage<string>("OpenAIKey", "");

console.log("test")
console.error("test2")
console.info("test3")
console.debug("test4")
let [messages, setMessages] = useState<Array<MessageDict>>(
Array.from([
{
text: "Hello! I'm a GPT Code assistant. Ask me to do something for you! Pro tip: you can upload a file and I'll be able to use it.",
text: "Hello! I'm the Pleo GPT Code and Chat assistant. Ask me to do something for you! Pro tip: you can upload a file and I'll be able to use it.",
role: "generator",
type: "message",
},
{
text: "If I get stuck just type 'reset' and I'll restart the kernel.",
text: "If I get stuck just type 'reset' and I'll restart the kernel. 2",
role: "generator",
type: "message",
},
Expand Down Expand Up @@ -119,19 +122,24 @@ function App() {

const data = await response.json();
const code = data.code;


addMessage({ text: data.text, type: "message", role: "generator" });


if (response.status != 200) {
setWaitingForSystem(WaitingStates.Idle);
return;
}

if (!!code) {
await injectContext(`EXPERT: \n\n ${data.text} \n\n The code you asked for: \n\n ${data.code} \n\n I will now execute it and get back to you with a result and analysis.`)
submitCode(code);
setWaitingForSystem(WaitingStates.RunningCode);
addMessage({ text: data.text, type: "message", role: "generator" });
} else {
await injectContext(`EXPERT: \n\n ${data.text} \n\n `)
setWaitingForSystem(WaitingStates.Idle);
addMessage({ text: data.text, type: "message", role: "generator" });
}
} catch (error) {
console.error(
Expand All @@ -142,20 +150,75 @@ function App() {
};

async function getApiData() {

if(document.hidden){
return;
}

console.log("starting the check")
let response = await fetch(`${Config.API_ADDRESS}/api`);
//console.log("response:", response)
let data = await response.json();
data.results.forEach(function (result: {value: string, type: string}) {
for await (const result of data.results) {
if (result.value.trim().length === 0) {
continue;
}
if ((result.type === "message" || result.type === "message_raw") && result.value !== 'Kernel is ready.') {
console.error(`INJECTING DATA: ${result.value}`)
const chatResponse = await fetch(`${Config.WEB_ADDRESS}/chat`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
prompt: `Please answer my previous question(s) using the following which is the result the python you wrote. If the code was supposed to generate any visuals, make sure to write a description of them. Take into account what parts of the questions you have already answered. The results are coming in as the server completes the execution. Answer the part of the question that fits the results you are given now.
[Python code results]:
${result.value}`,
model: selectedModel,
openAIKey: openAIKey,
}),
});

console.error('Response: ', chatResponse)
const data = await chatResponse.json();

addMessage({ text: data.text, type: "message", role: "generator" });
setWaitingForSystem(WaitingStates.Idle);
} else {
addMessage({ text: result.value, type: result.type, role: "system" });
setWaitingForSystem(WaitingStates.Idle);
}
}
/*await data.results.forEach(async function (result: {value: string, type: string}) {
if (result.value.trim().length == 0) {
return;
}

addMessage({ text: result.value, type: result.type, role: "system" });
setWaitingForSystem(WaitingStates.Idle);
});
if ((result.type === "message" || result.type === "message_raw") && result.value !== 'Kernel is ready.') {
console.error(`INJECTING DATA: ${result.value}`)
const chatResponse = await fetch(`${Config.WEB_ADDRESS}/chat`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
prompt: `Please answer my previous question(s) using the following which is the result the python you wrote. If the code was supposed to generate any visuals, make sure to write a description of them. Take into account what parts of the questions you have already answered. The results are coming in as the server completes the execution. Answer the part of the question that fits the results you are given now.
[Python code results]:
${result.value}`,
model: selectedModel,
openAIKey: openAIKey,
}),
});

console.error('Response: ', chatResponse)
const data = await chatResponse.json();

addMessage({ text: data.text, type: "message", role: "generator" });
setWaitingForSystem(WaitingStates.Idle);
} else {
addMessage({ text: result.value, type: result.type, role: "system" });
setWaitingForSystem(WaitingStates.Idle);
}
});*/
}

function completeUpload(message: string) {
Expand All @@ -176,6 +239,20 @@ function App() {
.catch((error) => console.error("Error:", error));
}

async function injectContext(context: string) {
await fetch(`${Config.WEB_ADDRESS}/inject-context`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
prompt: context,
}),
})
.then(() => {})
.catch((error) => console.error("Error:", error));
}

function startUpload(_: string) {
setWaitingForSystem(WaitingStates.UploadingFile);
}
Expand Down
2 changes: 1 addition & 1 deletion gpt_code_ui/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def print_color(text, color="gray"):

def print_banner():

print("""
print(""" PLEO!!!
█▀▀ █▀█ ▀█▀ ▄▄ █▀▀ █▀█ █▀▄ █▀▀
█▄█ █▀▀ ░█░ ░░ █▄▄ █▄█ █▄▀ ██▄
""")
Expand Down
138 changes: 114 additions & 24 deletions gpt_code_ui/webapp/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@


class LimitedLengthString:
def __init__(self, maxlen=2000):
def __init__(self, maxlen=20000):
self.data = deque()
self.len = 0
self.maxlen = maxlen
Expand Down Expand Up @@ -91,37 +91,48 @@ def inspect_file(filename: str) -> str:
except Exception:
return '' # file reading failed. - Don't want to know why.

system=f"""Act as a data analyst hereby referred to as EXPERT with ten years of experience in the domain of expense management and general accounting. Your role is to help inexperienced people analyse data about expenses and accounting. Be sure to help the user understand what to focus on and give suggestions on where it would make sense to dig deeper.

async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):

prompt = f"""First, here is a history of what I asked you to do earlier.
The actual prompt follows after ENDOFHISTORY.
History:
{message_buffer.get_string()}
ENDOFHISTORY.
Write Python code, in a triple backtick Markdown code block, that does the following:
{user_prompt}
If generating Python code, follow the instructions under [GENERATE_PYTHON_INSTRUCTIONS].

[GENERATE_PYTHON_INSTRUCTIONS]
Notes:
First, think step by step what you want to do and write it down in English.
Then generate valid Python code in a code block
Make sure all code is valid - it be run in a Jupyter Python 3 kernel environment.
Make sure all code is valid - it will be run in a Jupyter Python 3 kernel environment.
Define every variable before you use it.
For data munging, you can use
'numpy', # numpy==1.24.3
'dateparser' #dateparser==1.1.8
'pandas', # matplotlib==1.5.3
'geopandas' # geopandas==0.13.2
'numpy', # numpy==1.26.2
'dateparser', # dateparser==1.2.0
'pandas', # pandas==1.5.3
'geopandas', # geopandas==0.14.1
For pdf extraction, you can use
'PyPDF2', # PyPDF2==3.0.1
'pdfminer', # pdfminer==20191125
'pdfplumber', # pdfplumber==0.9.0
'pdfplumber', # pdfplumber==0.10.3
For data visualization, you can use
'matplotlib', # matplotlib==3.7.1
'matplotlib', # matplotlib==3.8.2
Be sure to generate charts with matplotlib. If you need geographical charts, use geopandas with the geopandas.datasets module.
If the user has just uploaded a file, focus on the file that was most recently uploaded (and optionally all previously uploaded files)

Teacher mode: if the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: <a href='/download?file=INSERT_FILENAME_HERE'>Download file</a>. Replace INSERT_FILENAME_HERE with the actual filename."""
Teacher mode: if the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: <a href='/download?file=INSERT_FILENAME_HERE'>Download file</a>. Replace INSERT_FILENAME_HERE with the actual filename.
"""
async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):


prompt = f"""First, here is a history of what I asked you to do earlier.
The actual prompt follows after ENDOFHISTORY.
History:
{message_buffer.get_string()}
ENDOFHISTORY.
Aiming to help the user in the best possible way with the below [USER_PROMPT] do one of the following:
1. Write Python code, in a triple backtick Markdown code block, that supports what the user is trying to achieve. Use the instructions under [GENERATE_PYTHON_INSTRUCTIONS]
2. Answer the question the user asks.
3. Ask follow-up questions to better understand what the user wants to achieve.

[USER_PROMPT]
{user_prompt}
"""

if user_openai_key:
openai.api_key = user_openai_key
Expand All @@ -130,7 +141,7 @@ async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):
temperature=0.7,
headers=OPENAI_EXTRA_HEADERS,
messages=[
# {"role": "system", "content": system},
{"role": "system", "content": system},
{"role": "user", "content": prompt},
]
)
Expand Down Expand Up @@ -160,6 +171,8 @@ async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):
except AttributeError:
return None, f"Malformed answer from API: {content}", 500

print('CONTENT FROM CODE:' + content)

def extract_code(text):
# Match triple backtick blocks first
triple_match = re.search(r'```(?:\w+\n)?(.+?)```', text, re.DOTALL)
Expand All @@ -173,6 +186,58 @@ def extract_code(text):

return extract_code(content), content.strip(), 200

async def get_chat(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):

prompt = f"""First, here is a history of what I asked you to do earlier.
The actual prompt follows after ENDOFHISTORY.
History:
{message_buffer.get_string()}
ENDOFHISTORY.
{user_prompt}
DO NOT GENERATE ANY CODE
Teacher mode: if the results return a link to a file that was generated, make sure to include the link in your answer.
"""

print('PROMPT FROM CHAT:' + prompt)

if user_openai_key:
openai.api_key = user_openai_key

arguments = dict(
temperature=0.7,
headers=OPENAI_EXTRA_HEADERS,
messages=[
{"role": "system", "content": system},
{"role": "user", "content": prompt},
]
)

if openai.api_type == 'open_ai':
arguments["model"] = model
elif openai.api_type == 'azure':
arguments["deployment_id"] = model
else:
return None, f"Error: Invalid OPENAI_PROVIDER: {openai.api_type}", 500

try:
result_GPT = openai.ChatCompletion.create(**arguments)

if 'error' in result_GPT:
raise openai.APIError(code=result_GPT.error.code, message=result_GPT.error.message)

if result_GPT.choices[0].finish_reason == 'content_filter':
raise openai.APIError('Content Filter')

except openai.OpenAIError as e:
return None, f"Error from API: {e}", 500

try:
content = result_GPT.choices[0].message.content

except AttributeError:
return None, f"Malformed answer from API: {content}", 500
return content, 200

# We know this Flask app is for local use. So we can disable the verbose Werkzeug logger
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
Expand Down Expand Up @@ -204,16 +269,19 @@ def models():
@app.route('/api/<path:path>', methods=["GET", "POST"])
def proxy_kernel_manager(path):
if request.method == "POST":
print('starting code execution')
resp = requests.post(
f'http://localhost:{KERNEL_APP_PORT}/{path}', json=request.get_json())
requestjson = request.get_json()
print(f"""started code execution with status: {resp.status_code} {requestjson}""")
else:
resp = requests.get(f'http://localhost:{KERNEL_APP_PORT}/{path}')

excluded_headers = ['content-encoding',
'content-length', 'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.raw.headers.items()
if name.lower() not in excluded_headers]

response = Response(resp.content, resp.status_code, headers)
return response

Expand All @@ -236,16 +304,16 @@ def download_file():
@app.route('/inject-context', methods=['POST'])
def inject_context():
user_prompt = request.json.get('prompt', '')

# Append all messages to the message buffer for later use
print('INJECTING-CONTEXT:' + user_prompt)
message_buffer.append(user_prompt + "\n\n")

print('message_buffer: ' + message_buffer.get_string())
return jsonify({"result": "success"})


@app.route('/generate', methods=['POST'])
def generate_code():
user_prompt = request.json.get('prompt', '')
print('ACTION:' + user_prompt)
user_openai_key = request.json.get('openAIKey', None)
model = request.json.get('model', None)

Expand All @@ -257,11 +325,33 @@ def generate_code():
loop.close()

# Append all messages to the message buffer for later use
message_buffer.append(user_prompt + "\n\n")
message_buffer.append('USER: ' + user_prompt + "\n\n")

return jsonify({'code': code, 'text': text}), status


@app.route('/chat', methods=['POST'])
def generate_chat():
#all of this comes from the system, not the user!
user_prompt = request.json.get('prompt', '')
user_openai_key = request.json.get('openAIKey', None)
model = request.json.get('model', None)
print('CHAT_TEXT: ' + user_prompt)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)

text, status = loop.run_until_complete(
get_chat(user_prompt, user_openai_key, model))
loop.close()

print('CHAT_TEXT: ' + text)

# Append all messages to the message buffer for later use
message_buffer.append('USER: ' + user_prompt + "\n\n")
print(f"""RETURNING TO UI: ${status}""")
return jsonify({'text': text}), status


@app.route('/upload', methods=['POST'])
def upload_file():
# check if the post request has the file part
Expand Down
Loading