Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/source/notebooks/tool_usage/multiverse_math.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -300,11 +300,11 @@
],
"source": [
"import uuid\n",
"\n",
"from langsmith.client import Client\n",
"\n",
"from langchain_benchmarks.tool_usage import get_eval_config\n",
"\n",
"\n",
"experiment_uuid = uuid.uuid4().hex[:4]\n",
"\n",
"client = Client()\n",
Expand Down
12 changes: 11 additions & 1 deletion langchain_benchmarks/extraction/evaluators.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,18 @@
from typing import Optional

from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.smith import RunEvalConfig


def get_eval_config(eval_llm: BaseChatModel) -> RunEvalConfig:
def get_eval_config(eval_llm: Optional[BaseChatModel] = None) -> RunEvalConfig:
eval_llm = eval_llm or ChatOpenAI(
model="gpt-4",
temperature=0,
model_kwargs={"seed": 42},
max_retries=1,
request_timeout=60,
)
"""Get the evaluation configuration for the email task."""
return RunEvalConfig(
evaluators=[
Expand Down
8 changes: 7 additions & 1 deletion langchain_benchmarks/extraction/implementations.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,13 @@ def run_on_dataset(
kwargs: Additional arguments to pass to the client.
"""
client = Client()
eval_llm = ChatOpenAI(model="gpt-4", temperature=0.0, model_kwargs={"seed": 42})
eval_llm = ChatOpenAI(
model="gpt-4",
temperature=0.0,
model_kwargs={"seed": 42},
max_retries=1,
request_timeout=60,
)
return client.run_on_dataset(
dataset_name=task.name,
llm_or_chain_factory=create_openai_function_based_extractor(
Expand Down
14 changes: 12 additions & 2 deletions langchain_benchmarks/rag/evaluators.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,10 +84,20 @@ def evaluate_run(

def get_eval_config() -> RunEvalConfig:
"""Returns the evaluator for the environment."""
eval_llm = ChatOpenAI(model="gpt-4", temperature=0.0, model_kwargs={"seed": 42})
eval_llm = ChatOpenAI(
model="gpt-4",
temperature=0.0,
model_kwargs={"seed": 42},
max_retries=1,
request_timeout=60,
)
# Use a longer-context LLM to check documents
faithfulness_eval_llm = ChatOpenAI(
model="gpt-4-1106-preview", temperature=0.0, model_kwargs={"seed": 42}
model="gpt-4-1106-preview",
temperature=0.0,
model_kwargs={"seed": 42},
max_retries=1,
request_timeout=60,
)

return RunEvalConfig(
Expand Down
8 changes: 7 additions & 1 deletion langchain_benchmarks/tool_usage/evaluators.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,13 @@ class AgentTrajectoryEvaluator(RunEvaluator):

def __init__(self) -> None:
"""Initialize the evaluator."""
eval_llm = ChatOpenAI(model="gpt-4", temperature=0, model_kwargs={"seed": 42})
eval_llm = ChatOpenAI(
model="gpt-4",
temperature=0,
model_kwargs={"seed": 42},
max_retries=1,
request_timeout=60,
)
self.qa_evaluator = load_evaluator(EvaluatorType.QA, llm=eval_llm)

def evaluate_run(
Expand Down