forked from AntonOsika/gpt-engineer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
106 lines (85 loc) · 2.99 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import logging
import os
from pathlib import Path
import openai
import typer
from dotenv import load_dotenv
from gpt_engineer.ai import AI
from gpt_engineer.collect import collect_learnings
from gpt_engineer.db import DB, DBs, archive
from gpt_engineer.learning import collect_consent
from gpt_engineer.steps import STEPS, Config as StepsConfig
app = typer.Typer() # creates a CLI app
def load_env_if_needed():
if os.getenv("OPENAI_API_KEY") is None:
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
@app.command()
def main(
project_path: str = typer.Argument("projects/example", help="path"),
model: str = typer.Argument("gpt-4", help="model id string"),
temperature: float = 0.1,
steps_config: StepsConfig = typer.Option(
StepsConfig.DEFAULT, "--steps", "-s", help="decide which steps to run"
),
improve_option: bool = typer.Option(
False,
"--improve",
"-i",
help="Improve code from existing project.",
),
azure_endpoint: str = typer.Option(
"",
"--azure",
"-a",
help="""Endpoint for your Azure OpenAI Service (https://xx.openai.azure.com).
In that case, the given model is the deployment name chosen in the Azure AI Studio.""",
),
verbose: bool = typer.Option(False, "--verbose", "-v"),
):
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
# For the improve option take current project as path and add .gpteng folder
if improve_option:
# The default option for the --improve is the IMPROVE_CODE, not DEFAULT
if steps_config == StepsConfig.DEFAULT:
steps_config = StepsConfig.IMPROVE_CODE
load_env_if_needed()
ai = AI(
model_name=model,
temperature=temperature,
azure_endpoint=azure_endpoint,
)
input_path = Path(project_path).absolute()
memory_path = input_path / "memory"
workspace_path = input_path / "workspace"
archive_path = input_path / "archive"
dbs = DBs(
memory=DB(memory_path),
logs=DB(memory_path / "logs"),
input=DB(input_path),
workspace=DB(workspace_path),
preprompts=DB(
Path(__file__).parent / "preprompts"
), # Loads preprompts from the preprompts directory
archive=DB(archive_path),
)
if steps_config not in [
StepsConfig.EXECUTE_ONLY,
StepsConfig.USE_FEEDBACK,
StepsConfig.EVALUATE,
StepsConfig.IMPROVE_CODE,
]:
archive(dbs)
if not dbs.input.get("prompt"):
dbs.input["prompt"] = input(
"\nWhat application do you want gpt-engineer to generate?\n"
)
steps = STEPS[steps_config]
for step in steps:
messages = step(ai, dbs)
dbs.logs[step.__name__] = AI.serialize_messages(messages)
if collect_consent():
collect_learnings(model, temperature, steps, dbs)
dbs.logs["token_usage"] = ai.format_token_usage_log()
if __name__ == "__main__":
app()