Skip to content

Commit

Permalink
Added:
Browse files Browse the repository at this point in the history
- Customizable ESBMC location.
- Can now specify which AI Model to use from the CMD line.
  • Loading branch information
Yiannis128 committed Apr 30, 2023
1 parent 16b50b4 commit ef9dc9c
Show file tree
Hide file tree
Showing 5 changed files with 43 additions and 24 deletions.
1 change: 1 addition & 0 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
OPENAI_API_KEY=XXXXXXXXXXX
CHAT_TEMPERATURE=1.0
AI_MODEL=gpt-3.5-turbo
ESBMC_PATH=./esbmc
9 changes: 5 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ This is an area of research. From the ESBMC GitHub:
## Initial Setup

1. ESBMC-AI does not come with the original ESBMC software. In order to use ESBMC-AI you must provide ESBMC. Download [ESBMC](http://esbmc.org/) executable or build from [source](https://github.com/esbmc/esbmc).
2. Place it in the project directory.
3. Make sure it's named "ESBMC" with no extension.
4. Create a .env file using the provided .env.example as a template, and, insert your OpenAI API key.
5. Further adjust .env settings as required.
2. Create a .env file using the provided .env.example as a template, and, insert your OpenAI API key.
3. Enter the ESBMC executable location in the .env `ESBMC_PATH`.
4. Further adjust .env settings as required.
5. You can now run ESBMC-AI.

## Settings

Expand All @@ -26,6 +26,7 @@ The following settings are adjustable in the .env file, this list may be incompl
1. `OPENAI_API_KEY`: **This is required**. Your OpenAI API key.
2. `CHAT_TEMPERATURE`: The temperature parameter used when calling the chat completion API.
3. `AI_MODEL`: The model to use. Options: `gpt-3.5-turbo`, `gpt-4` (under API key conditions).
4. `ESBMC_PATH`: Override the default ESBMC path. Leave blank to use the default ("./esbmc").

## Usage

Expand Down
14 changes: 10 additions & 4 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,10 @@ def print_help() -> None:

def check_health() -> None:
printv("Performing health check...")
if os.path.exists("./esbmc"):
if os.path.exists(config.esbmc_path):
printv("ESBMC has been located")
else:
print("Error: ESBMC could not be found...")
print("Place ESBMC in the folder current working directory.")
print(f"Error: ESBMC could not be found in {config.esbmc_path}")
exit(3)


Expand All @@ -47,7 +46,7 @@ def get_src(path: str) -> str:

def esbmc(path: str, esbmc_params: list = config.esbmc_params):
# Build parameters
esbmc_cmd = ["./esbmc"]
esbmc_cmd = [config.esbmc_path]
esbmc_cmd.extend(esbmc_params)
esbmc_cmd.append(path)

Expand Down Expand Up @@ -112,6 +111,13 @@ def main() -> None:
help="Responses from AI model will be shown raw.",
)

parser.add_argument(
"-m",
"--ai-model",
default="",
help="Which AI model to use.",
)

args = parser.parse_args()

config.load_envs()
Expand Down
9 changes: 5 additions & 4 deletions src/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,12 @@ def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):


class ChatInterface(object):
system_messages: list = []
messages: list = []
model_name: str = "gpt-3.5-turbo"
max_tokens = MAX_TOKENS_GPT3TURBO
temperature = 1.0

system_messages: list
messages: list
model_name: str
temperature: float

def __init__(
self,
Expand Down
34 changes: 22 additions & 12 deletions src/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,25 +4,14 @@
from dotenv import load_dotenv

openai_api_key: str = ""
esbmc_path: str = "./esbmc"
verbose: bool = False
raw_responses: bool = False
esbmc_params: list[str] = ["--z3", "--unwind", "5"]
chat_temperature: float = 1.0
ai_model: str = "gpt-3.5-turbo"


def load_args(args) -> None:
global verbose
verbose = args.verbose

global raw_responses
raw_responses = args.raw_output

global esbmc_params
if len(args.remaining) != 0:
esbmc_params = args.remaining


def load_envs() -> None:
load_dotenv()

Expand All @@ -34,3 +23,24 @@ def load_envs() -> None:

global ai_model
ai_model = str(os.getenv("AI_MODEL"))

global esbmc_path
value = os.getenv("ESBMC_PATH")
if value != None and value != "":
esbmc_path = str(value)


def load_args(args) -> None:
global verbose
verbose = args.verbose

global raw_responses
raw_responses = args.raw_output

global ai_model
if args.ai_model != "":
ai_model = args.ai_model

global esbmc_params
if len(args.remaining) != 0:
esbmc_params = args.remaining

0 comments on commit ef9dc9c

Please sign in to comment.