Skip to content

Commit

Permalink
Add NousResearch/{Hermes-2-Pro-Llama-3-8B,Hermes-2-Theta-Llama-3-8B} …
Browse files Browse the repository at this point in the history
…models (#542)

- Add NousResearch/Hermes-2-Pro-Llama-3-8B model
- Add NousResearch/Hermes-2-Theta-Llama-3-8B model
- Deleted trailing white spaces from hermes_handler which might decrease
performance
- Add `add_generation_prompt=True` from the apply_chat_template to the
hermes_handler

---------

Co-authored-by: Huanzhi (Hans) Mao <huanzhimao@gmail.com>
  • Loading branch information
alonsosilvaallende and HuanzhiMao authored Jul 25, 2024
1 parent 51682f7 commit b0e3289
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 4 deletions.
2 changes: 1 addition & 1 deletion berkeley-function-call-leaderboard/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ Below is *a table of models we support* to run our leaderboard evaluation agains
|mistral-small-2402 | Prompt|
|mistral-tiny-2312 | Prompt|
|Nexusflow-Raven-v2 | Function Calling|
|NousResearch/Hermes-2-Pro-Mistral-7B 💻| Function Calling|
|NousResearch/Hermes-2-{Pro-Llama-3-8B,Pro-Mistral-7B,Theta-Llama-3-8B} 💻| Function Calling|
|snowflake/arctic | Prompt|
|nvidia/nemotron-4-340b-instruct| Prompt|
|THUDM/glm-4-9b-chat 💻| Function Calling|
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -318,12 +318,24 @@
"Databricks",
"Databricks Open Model",
],
"NousResearch/Hermes-2-Pro-Llama-3-8B": [
"Hermes-2-Pro-Llama-3-8B (FC)",
"https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B",
"NousResearch",
"apache-2.0",
],
"NousResearch/Hermes-2-Pro-Mistral-7B": [
"Hermes-2-Pro-Mistral-7B (FC)",
"https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B",
"NousResearch",
"apache-2.0",
],
"NousResearch/Hermes-2-Theta-Llama-3-8B": [
"Hermes-2-Theta-Llama-3-8B (FC)",
"https://huggingface.co/NousResearch/Hermes-2-Theta-Llama-3-8B",
"NousResearch",
"apache-2.0",
],
"meta-llama/Meta-Llama-3-8B-Instruct": [
"Meta-Llama-3-8B-Instruct (Prompt)",
"https://llama.meta.com/llama3",
Expand Down Expand Up @@ -475,6 +487,8 @@
"deepseek-ai/deepseek-coder-6.7b-instruct": 909,
"google/gemma-7b-it": 95,
"NousResearch/Hermes-2-Pro-Mistral-7B": 135,
"NousResearch/Hermes-2-Pro-Llama-3-8B": 77,
"NousResearch/Hermes-2-Theta-Llama-3-8B": 73,
"meta-llama/Meta-Llama-3-8B-Instruct": 73,
"meta-llama/Meta-Llama-3-70B-Instruct": 307,
"gorilla-openfunctions-v2": 83,
Expand Down
2 changes: 2 additions & 0 deletions berkeley-function-call-leaderboard/model_handler/constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,9 @@
"meetkai/functionary-medium-v2.2-FC",
"meetkai/functionary-small-v2.4-FC",
"meetkai/functionary-medium-v2.4-FC",
"NousResearch/Hermes-2-Pro-Llama-3-8B",
"NousResearch/Hermes-2-Pro-Mistral-7B",
"NousResearch/Hermes-2-Theta-Llama-3-8B",
"command-r-plus-FC",
"command-r-plus-FC-optimized",
"THUDM/glm-4-9b-chat",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,9 @@
"meetkai/functionary-small-v2.4-FC": FunctionaryHandler,
"meetkai/functionary-medium-v2.4-FC": FunctionaryHandler,
"databricks-dbrx-instruct": DatabricksHandler,
"NousResearch/Hermes-2-Pro-Llama-3-8B": HermesHandler,
"NousResearch/Hermes-2-Pro-Mistral-7B": HermesHandler,
"NousResearch/Hermes-2-Theta-Llama-3-8B": HermesHandler,
"meta-llama/Meta-Llama-3-8B-Instruct": LlamaHandler,
"meta-llama/Meta-Llama-3-70B-Instruct": LlamaHandler,
"command-r-plus-FC": CohereHandler,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@ def _format_prompt(prompt, function, test_category):
)
pydantic_format = """{"properties": {"arguments": {"title": "Arguments", "type": "object"}, "name": {"title": "Name", "type": "string"}}, "required": ["arguments", "name"], "title": "FunctionCall", "type": "object"}"""
tool_call_format = """{"arguments": <args-dict>, "name": <function-name>}"""
formatted_prompt = """
<|im_start|>system
formatted_prompt = """<|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools>{function}</tools> Use the following pydantic model json schema for each tool call you will make: {pydantic_format} For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{tool_call_format}
Expand All @@ -26,7 +25,8 @@ def _format_prompt(prompt, function, test_category):
<|im_start|>user
{prompt}
<|im_end|>
"""
<|im_start|>assistant
"""
return formatted_prompt.format(
function=function,
pydantic_format=pydantic_format,
Expand Down

0 comments on commit b0e3289

Please sign in to comment.