Skip to content

Commit

Permalink
Merge pull request #159 from esbmc/improvements
Browse files Browse the repository at this point in the history
* General text formatting improvements when running ESBMC-AI
* Added clock sample source code for debugging
* Added model list caching for OpenAI services, to avoid spamming them because I was blocked for 24 hours
* Solution has been simplified and is now usable
* Verifiers now provide a trace through a function
  • Loading branch information
Yiannis128 authored Jan 27, 2025
2 parents 9fb9757 + c617d9d commit 37697d9
Show file tree
Hide file tree
Showing 26 changed files with 708 additions and 520 deletions.
13 changes: 0 additions & 13 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -30,19 +30,6 @@
"${file}"
]
},
{
"name": "Optimize Code on Open File",
"type": "debugpy",
"request": "launch",
"module": "esbmc_ai",
"justMyCode": true,
"cwd": "${workspaceFolder}",
"args": [
"-c",
"optimize-code",
"${file}"
]
},
{
"name": "Python: Current File",
"type": "debugpy",
Expand Down
2 changes: 1 addition & 1 deletion esbmc_ai/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ def main() -> None:
command = args.command
command_names: list[str] = command_runner.command_names
if command in command_names:
print("Running Command:", command)
print("Running Command:", command, "\n")
_run_command_mode(command=command_runner.commands[command], args=args)
sys.exit(0)
else:
Expand Down
11 changes: 6 additions & 5 deletions esbmc_ai/addon_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,9 @@ def init(self, config: Config, builtin_verifier_names: list[str]):
field_names.append(f.name)
del field_names

# Init the verifier.name field for the main config
# Init the verifier.name field for the main config. The reason this is
# not part of the main config is that verifiers are treated as addons,
# even internally.
self._config.add_config_field(
ConfigField(
name="verifier.name",
Expand Down Expand Up @@ -109,10 +111,9 @@ def _load_chat_command_addons(self) -> None:
self.add_config_field(field)

if len(self.chat_command_addons) > 0:
printv(
"ChatCommand Addons:\n\t* "
+ "\t * ".join(self.chat_command_addon_names)
)
printv("ChatCommand Addons:")
for cm in self.chat_command_addon_names:
printv(f"\t* {cm}")

def _load_verifier_addons(self) -> None:
"""Loads the verifier addons, initializes their config fields."""
Expand Down
64 changes: 35 additions & 29 deletions esbmc_ai/ai_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,14 +160,15 @@ def create_llm(
)

@classmethod
def get_openai_model_max_tokens(self, name: str) -> int:
def get_openai_model_max_tokens(cls, name: str) -> int:
"""Dynamically resolves the max tokens from a base model."""

# https://platform.openai.com/docs/models
tokens = {
"o1-mini": 128000,
"o1": 200000,
"gpt-4o": 128000,
"chatgpt-4o": 128000,
"o1": 128000,
"gpt-4": 8192,
"gpt-3.5-turbo": 16385,
"gpt-3.5-turbo-instruct": 4096,
Expand Down Expand Up @@ -229,6 +230,21 @@ class _AIModels(Enum):
_ai_model_names: set[str] = set(item.value.name for item in _AIModels)


def add_open_ai_model(model_name: str) -> None:
"""Registers an OpenAI model."""
try:
add_custom_ai_model(
AIModelOpenAI(
model_name,
AIModelOpenAI.get_openai_model_max_tokens(model_name),
),
)
except ValueError:
# Some models are included in the models list like dalle which aren't
# sequence models.
pass


def add_custom_ai_model(ai_model: AIModel) -> None:
"""Registers a custom AI model."""
# Check if AI already already exists.
Expand All @@ -238,52 +254,42 @@ def add_custom_ai_model(ai_model: AIModel) -> None:
_custom_ai_models.append(ai_model)


def download_openai_model_names(api_keys: APIKeyCollection) -> list[str]:
"""Gets the open AI models from the API service and returns them."""
assert api_keys and api_keys.openai
from openai import Client

"llm_requests.open_ai.model_refresh_seconds"
# Check if needs refreshing
try:
return [
str(model.id)
for model in Client(api_key=api_keys.openai).models.list().data
]
except ImportError:
return []


def is_valid_ai_model(
ai_model: Union[str, AIModel], api_keys: Optional[APIKeyCollection] = None
) -> bool:
"""Accepts both the AIModel object and the name as parameter. It checks the
openai servers to see if a model is defined on their servers, if not, then
it checks the internally defined AI models list."""
from openai import Client

# Get the name of the model
name: str = ai_model.name if isinstance(ai_model, AIModel) else ai_model

# Try accessing openai api and checking if there is a model defined.
# Will only work on models that start with gpt- to avoid spamming API and
# getting blocked. NOTE: This is not tested as no way to mock API currently.
if name.startswith("gpt-") and api_keys and api_keys.openai:
try:
for model in Client(api_key=api_keys.openai).models.list().data:
if model.id == name:
return True
except ImportError:
pass

# Use the predefined list of models.
return name in _ai_model_names


def get_ai_model_by_name(
name: str, api_keys: Optional[APIKeyCollection] = None
) -> AIModel:
def get_ai_model_by_name(name: str) -> AIModel:
"""Checks for built-in and custom_ai models"""
# Check OpenAI models.
if api_keys and api_keys.openai:
try:
from openai import Client

for model in Client(api_key=api_keys.openai).models.list().data:
if model.id == name:
add_custom_ai_model(
AIModelOpenAI(
model.id,
AIModelOpenAI.get_openai_model_max_tokens(model.id),
),
)
except ImportError:
pass

# Check AIModels enum.
for enum_value in _AIModels:
ai_model: AIModel = enum_value.value
Expand Down
2 changes: 1 addition & 1 deletion esbmc_ai/chats/solution_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from typing import Optional
from langchain_core.language_models import BaseChatModel
from typing_extensions import override
from langchain.schema import BaseMessage, HumanMessage
from langchain.schema import BaseMessage

from esbmc_ai.chat_response import ChatResponse, FinishReason
from esbmc_ai.config import FixCodeScenario, default_scenario
Expand Down
Loading

0 comments on commit 37697d9

Please sign in to comment.