Skip to content

Commit

Permalink
Add gpt-4o model
Browse files Browse the repository at this point in the history
  • Loading branch information
err09r authored May 13, 2024
1 parent dc51eb0 commit aa3dc32
Showing 1 changed file with 8 additions and 3 deletions.
11 changes: 8 additions & 3 deletions bot/openai_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@
GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613")
GPT_4_VISION_MODELS = ("gpt-4-vision-preview",)
GPT_4_128K_MODELS = ("gpt-4-1106-preview","gpt-4-0125-preview","gpt-4-turbo-preview")
GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS
GPT_4O_MODELS = ("gpt-4o")
GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS + GPT_4O_MODELS


def default_max_tokens(model: str) -> int:
Expand All @@ -42,7 +43,7 @@ def default_max_tokens(model: str) -> int:
return base
elif model in GPT_4_MODELS:
return base * 2
elif model in GPT_3_16K_MODELS:
elif model in GPT_3_16K_MODELS:
if model == "gpt-3.5-turbo-1106":
return 4096
return base * 4
Expand All @@ -52,6 +53,8 @@ def default_max_tokens(model: str) -> int:
return 4096
elif model in GPT_4_128K_MODELS:
return 4096
elif model in GPT_4O_MODELS:
return 4096


def are_functions_available(model: str) -> bool:
Expand Down Expand Up @@ -634,6 +637,8 @@ def __max_model_tokens(self):
return base * 31
if self.config['model'] in GPT_4_128K_MODELS:
return base * 31
if self.config['model'] in GPT_4O_MODELS:
return base * 31
raise NotImplementedError(
f"Max tokens for model {self.config['model']} is not implemented yet."
)
Expand All @@ -654,7 +659,7 @@ def __count_tokens(self, messages) -> int:
if model in GPT_3_MODELS + GPT_3_16K_MODELS:
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif model in GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS:
elif model in GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS + GPT_4O_MODELS:
tokens_per_message = 3
tokens_per_name = 1
else:
Expand Down

0 comments on commit aa3dc32

Please sign in to comment.