Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/source/notebooks/tool_usage/relational_data.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@
},
"outputs": [],
"source": [
"from langchain.agents import AgentType, Tool, initialize_agent\n",
"from langchain.agents import AgentType, initialize_agent\n",
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"from langchain_benchmarks.schema import ExtractionTask\n",
Expand Down
17 changes: 16 additions & 1 deletion langchain_benchmarks/model_registration.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,21 @@
),
]

_ANYSCALE_MODELS = [
RegisteredModel(
provider="anyscale",
name="mistral-7b-instruct-v0.1",
description="Mistral 7B model fine-tuned for function-calling.",
type="chat",
params={
"model": "mistralai/Mistral-7B-Instruct-v0.1",
},
),
]

model_registry = ModelRegistry(
registered_models=_OPEN_AI_MODELS + _FIREWORKS_MODELS + _ANTHROPIC_MODELS
registered_models=_OPEN_AI_MODELS
+ _FIREWORKS_MODELS
+ _ANYSCALE_MODELS
+ _ANTHROPIC_MODELS
)
6 changes: 5 additions & 1 deletion langchain_benchmarks/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ def add(self, task: BaseTask) -> None:
self.tasks.append(task)


Provider = Literal["fireworks", "openai", "anthropic"]
Provider = Literal["fireworks", "openai", "anthropic", "anyscale"]
ModelType = Literal["chat", "llm"]
AUTHORIZED_NAMESPACES = {"langchain"}

Expand Down Expand Up @@ -284,6 +284,8 @@ def _get_default_path(provider: str, type_: ModelType) -> str:
paths = {
("fireworks", "chat"): "langchain.chat_models.fireworks.ChatFireworks",
("fireworks", "llm"): "langchain.llms.fireworks.Fireworks",
("anyscale", "chat"): "langchain.chat_models.anyscale.ChatAnyscale",
("anyscale", "llm"): "langchain.llms.anyscale.Anyscale",
("openai", "chat"): "langchain.chat_models.openai.ChatOpenAI",
("openai", "llm"): "langchain.llms.openai.OpenAI",
("anthropic", "chat"): "langchain.chat_models.anthropic.ChatAnthropic",
Expand All @@ -303,6 +305,8 @@ def _get_default_url(provider: str, type_: ModelType) -> Optional[str]:
return "https://platform.openai.com/docs/models"
elif provider == "anthropic":
return "https://docs.anthropic.com/claude/reference/selecting-a-model"
elif provider == "anyscale":
return "https://docs.endpoints.anyscale.com/category/supported-models"
else:
return None

Expand Down
79 changes: 68 additions & 11 deletions langchain_benchmarks/tool_usage/agents/openai_functions.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,81 @@
"""Code for creating an agent factory for evaluating tool usage tasks."""
from typing import Optional
from typing import Any, Callable, Dict, List, Optional, Sequence, Type, Union

from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad import format_to_openai_functions
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.schema.runnable import Runnable
from langchain.tools.render import format_tool_to_openai_function
from langchain.tools.render import format_tool_to_openai_tool
from langchain_core.language_models import BaseChatModel, BaseLanguageModel
from langchain_core.language_models.base import LanguageModelInput
from langchain_core.messages import BaseMessage
from langchain_core.pydantic_v1 import BaseModel

from langchain_benchmarks import rate_limiting
from langchain_benchmarks.model_registration import RegisteredModel
from langchain_benchmarks.schema import ToolUsageTask
from langchain_benchmarks.tool_usage.agents.adapters import apply_agent_executor_adapter

# PUBLIC API


def _bind_tools(
llm: BaseChatModel,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
tool_choice: Optional[str] = None,
json_mode: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tools (and other objects) to this chat model.

Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, or callable. Pydantic
models and callables will be automatically converted to
their schema dictionary representation.
tool_choice: Which tool to require the model to call.
Must be the name of the single provided tool or
"auto" to automatically determine which tool to call
(if any).
json_mode: Whether to set JSON mode for the tool call.
This guarantees the model will respond in valid JSON
(unless truncated).
kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.

"""
formatted_tools: List[Dict[str, Union[str, dict]]] = [
format_tool_to_openai_tool(tool) for tool in tools
]
if tool_choice is not None:
if not formatted_tools:
raise ValueError(
"When specifying `tool_choice`, you must provide at least one " "tool."
)
tool_names = [tool["function"]["name"] for tool in formatted_tools]
if not any(tool_name == tool_choice for tool_name in tool_names):
raise ValueError(
f"Tool choice {tool_choice} was specified, but the only "
f"provided tools were {tool_names}."
)
tool_choice_ = {"type": "function", "function": {"name": tool_choice}}
kwargs = {**kwargs, "tool_choice": tool_choice_}
if json_mode:
kwargs = {**kwargs, "response_format": {"type": "json_object"}}
return llm.bind(
tools=formatted_tools,
**kwargs,
)


class OpenAIAgentFactory:
def __init__(
self,
task: ToolUsageTask,
*,
model: str = "gpt-3.5-turbo-16k",
model: Union[str, RegisteredModel] = "gpt-3.5-turbo-16k",
rate_limiter: Optional[rate_limiting.RateLimiter] = None,
) -> None:
"""Create an OpenAI agent factory for the given task.
Expand All @@ -35,24 +89,27 @@ def __init__(
self.model = model
self.rate_limiter = rate_limiter

def _create_model(self) -> Union[BaseChatModel, BaseLanguageModel]:
if isinstance(self.model, RegisteredModel):
return self.model.get_model(
model_params={"temperature": 0, "model_kwargs": {"seed": 0}}
)
else:
return ChatOpenAI(model=self.model, temperature=0, model_kwargs={"seed": 0})

def create(self) -> Runnable:
"""Agent Executor"""
# For backwards compatibility
return self()

def __call__(self) -> Runnable:
model = ChatOpenAI(
model=self.model,
temperature=0,
)
model = self._create_model()

env = self.task.create_environment()

model = model.bind(
functions=[format_tool_to_openai_function(t) for t in env.tools]
)
model = _bind_tools(model, env.tools)

if rate_limiting:
if self.rate_limiter is not None:
# Rate limited model
model = rate_limiting.with_rate_limit(model, self.rate_limiter)

Expand Down