Skip to content

Commit

Permalink
Merge pull request AntonOsika#52 from patillacode/minor-clean-up
Browse files Browse the repository at this point in the history
Minor clean up
  • Loading branch information
AntonOsika authored Jun 15, 2023
2 parents 65e1171 + 4d18327 commit 3538a4c
Show file tree
Hide file tree
Showing 9 changed files with 133 additions and 43 deletions.
32 changes: 32 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,33 @@
# See https://help.github.com/ignore-files/ for more about ignoring files.

# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# Distribution / packaging
dist/
build/
*.egg-info/
*.egg

# Virtual environments
venv/
ENV/

# IDE-specific files
.vscode/

# Compiled Python modules
*.pyc
*.pyo
*.pyd

# macOS specific files
.DS_Store

# Windows specific files
Thumbs.db

# this application's specific files
archive
15 changes: 6 additions & 9 deletions ai.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

import openai


Expand All @@ -8,15 +7,15 @@ def __init__(self, **kwargs):

def start(self, system, user):
messages = [
{"role": "system", "content": system},
{"role": "user", "content": user},
]
{"role": "system", "content": system},
{"role": "user", "content": user},
]

return self.next(messages)

def fsystem(self, msg):
return {"role": "system", "content": msg}

def fuser(self, msg):
return {"role": "user", "content": msg}

Expand All @@ -25,9 +24,7 @@ def next(self, messages: list[dict[str, str]], prompt=None):
messages = messages + [{"role": "user", "content": prompt}]

response = openai.ChatCompletion.create(
messages=messages,
stream=True,
**self.kwargs
messages=messages, stream=True, **self.kwargs
)

chat = []
Expand All @@ -36,4 +33,4 @@ def next(self, messages: list[dict[str, str]], prompt=None):
msg = delta.get('content', '')
print(msg, end="")
chat.append(msg)
return messages + [{"role": "assistant", "content": "".join(chat)}]
return messages + [{"role": "assistant", "content": "".join(chat)}]
6 changes: 3 additions & 3 deletions chat_to_files.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import re


def parse_chat(chat):# -> List[Tuple[str, str]]:
def parse_chat(chat): # -> List[Tuple[str, str]]:
# Get all ``` blocks
regex = r"```(.*?)```"

Expand All @@ -15,7 +15,7 @@ def parse_chat(chat):# -> List[Tuple[str, str]]:
code = "\n".join(code)
# Add the file to the list
files.append((path, code))

return files


Expand All @@ -24,4 +24,4 @@ def to_files(chat, workspace):

files = parse_chat(chat)
for file_name, file_content in files:
workspace[file_name] = file_content
workspace[file_name] = file_content
4 changes: 2 additions & 2 deletions db.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os

from dataclasses import dataclass
import os
from pathlib import Path


Expand All @@ -25,4 +25,4 @@ class DBs:
logs: DB
identity: DB
input: DB
workspace: DB
workspace: DB
18 changes: 8 additions & 10 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,25 @@
import json
import os
import pathlib
from typing import Optional
import openai
from chat_to_files import to_files
from ai import AI
from steps import STEPS
from db import DB, DBs

import typer

from ai import AI
from db import DB, DBs
from steps import STEPS

app = typer.Typer()


@app.command()
def chat(
project_path: str = typer.Argument(None, help="path"),
run_prefix: str = typer.Option("", help="run prefix, if you want to run multiple variants of the same project and later compare them"),
run_prefix: str = typer.Option(
"",
help="run prefix, if you want to run multiple variants of the same project and later compare them",
),
model: str = "gpt-4",
temperature: float = 0.1,
):

if project_path is None:
project_path = str(pathlib.Path(__file__).parent / "example")

Expand All @@ -41,7 +40,6 @@ def chat(
identity=DB(pathlib.Path(__file__).parent / "identity"),
)


for step in STEPS:
messages = step(ai, dbs)
dbs.logs[step.__name__] = json.dumps(messages)
Expand Down
59 changes: 59 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# https://beta.ruff.rs/docs/configuration/#using-rufftoml
[tool.ruff]
select = ["F", "E", "W", "I001"]
line-length = 90
show-fixes = false
target-version = "py311"
task-tags = ["TODO", "FIXME"]
exclude = [
".bzr",
".direnv",
".eggs",
".git",
".ruff_cache",
".svn",
".tox",
".venv",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"venv",
]

[tool.ruff.isort]
known-first-party = []
known-third-party = []
section-order = [
"future",
"standard-library",
"third-party",
"first-party",
"local-folder",
]
combine-as-imports = true
split-on-trailing-comma = false
lines-between-types = 1

# https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html
[tool.black]
line-length = 90
target-version = ["py311"]
include = '\.pyi?$'
exclude = '''
(
/(
\.direnv
| \.eggs
| \.git
| \.tox
| \.venv
| _build
| build
| dist
| venv
)/
)
'''
6 changes: 4 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
openai
typer
black==23.3.0
openai==0.27.8
ruff==0.0.272
typer==0.9.0
1 change: 0 additions & 1 deletion scripts/rerun_edited_message_logs.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ def chat(
temperature: float = 0.1,
max_tokens: int = 4096,
):

ai = AI(
model=model,
temperature=temperature,
Expand Down
35 changes: 19 additions & 16 deletions steps.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,28 @@
import json

from ai import AI
from chat_to_files import to_files
from db import DBs
import json


def setup_sys_prompt(dbs):
return dbs.identity['setup'] + '\nUseful to know:\n' + dbs.identity['philosophy']


def run(ai: AI, dbs: DBs):
'''Run the AI on the main prompt and save the results'''
messages = ai.start(setup_sys_prompt(dbs), dbs.input['main_prompt'])
messages = ai.start(
setup_sys_prompt(dbs),
dbs.input['main_prompt'],
)
to_files(messages[-1]['content'], dbs.workspace)
return messages


def clarify(ai: AI, dbs: DBs):
'''Ask the user if they want to clarify anything and save the results to the workspace'''
'''
Ask the user if they want to clarify anything and save the results to the workspace
'''
messages = [ai.fsystem(dbs.identity['qa'])]
user = dbs.input['main_prompt']
while True:
Expand All @@ -31,35 +39,30 @@ def clarify(ai: AI, dbs: DBs):
break

user += (
'\n\n'
'Is anything else unclear? If yes, only answer in the form:\n'
'\n\n'
'Is anything else unclear? If yes, only answer in the form:\n'
'{remaining unclear areas} remaining questions.\n'
'{Next question}\n'
'If everything is sufficiently clear, only answer "no".'
)
)

print()
return messages


def run_clarified(ai: AI, dbs: DBs):
# get the messages from previous step
messages = json.loads(dbs.logs[clarify.__name__])

messages = (
[
ai.fsystem(setup_sys_prompt(dbs)),
] +
messages[1:]
)
messages = [
ai.fsystem(setup_sys_prompt(dbs)),
] + messages[1:]
messages = ai.next(messages, dbs.identity['use_qa'])
to_files(messages[-1]['content'], dbs.workspace)
return messages


STEPS=[
clarify,
run_clarified
]
STEPS = [clarify, run_clarified]

# Future steps that can be added:
# improve_files,
Expand Down

0 comments on commit 3538a4c

Please sign in to comment.