|
14 | 14 | }, |
15 | 15 | { |
16 | 16 | "cell_type": "code", |
17 | | - "execution_count": null, |
18 | | - "id": "ddadb9ef-e76a-4b48-85e4-f62c3957f502", |
| 17 | + "execution_count": 1, |
| 18 | + "id": "13a7483b-d08f-49fa-83da-619863171e5b", |
19 | 19 | "metadata": { |
20 | 20 | "tags": [] |
21 | 21 | }, |
|
24 | 24 | "import datetime\n", |
25 | 25 | "import uuid\n", |
26 | 26 | "\n", |
| 27 | + "from langchain.globals import set_verbose\n", |
27 | 28 | "from langsmith.client import Client\n", |
28 | 29 | "\n", |
29 | | - "from langchain_benchmarks import clone_public_dataset, registry\n", |
30 | | - "from langchain_benchmarks.tool_usage import agents" |
| 30 | + "from langchain_benchmarks import (\n", |
| 31 | + " __version__,\n", |
| 32 | + " clone_public_dataset,\n", |
| 33 | + " model_registry,\n", |
| 34 | + " registry,\n", |
| 35 | + ")\n", |
| 36 | + "from langchain_benchmarks.rate_limiting import RateLimiter\n", |
| 37 | + "from langchain_benchmarks.tool_usage.agents import (\n", |
| 38 | + " CustomAgentFactory,\n", |
| 39 | + " OpenAIAgentFactory,\n", |
| 40 | + ")" |
| 41 | + ] |
| 42 | + }, |
| 43 | + { |
| 44 | + "cell_type": "markdown", |
| 45 | + "id": "50bbe23b-a3b1-4607-929d-ea6e88b7085e", |
| 46 | + "metadata": {}, |
| 47 | + "source": [ |
| 48 | + "Prior to starting the tests, you may want to verify\n", |
| 49 | + "that the task that you're working with and the models are propelry defined." |
| 50 | + ] |
| 51 | + }, |
| 52 | + { |
| 53 | + "cell_type": "code", |
| 54 | + "execution_count": 2, |
| 55 | + "id": "adfbcaa9-349c-4223-89be-4abff9cf76ff", |
| 56 | + "metadata": { |
| 57 | + "tags": [] |
| 58 | + }, |
| 59 | + "outputs": [ |
| 60 | + { |
| 61 | + "data": { |
| 62 | + "text/plain": [ |
| 63 | + "{'input': \"Repeat the given string using the provided tools. Do not write anything else or provide any explanations. For example, if the string is 'abc', you must print the letters 'a', 'b', and 'c' one at a time and in that order. \\nWrite down your answer, but do not explain it. Input: `abc`\",\n", |
| 64 | + " 'output': ' Thank you for the input and for confirming the output of each letter I printed. I simply followed the instructions to repeat the given string \"abc\" by printing one letter at a time using the provided \"type_letter\" tool without any additional explanations. Please let me know if you need me to repeat this process with a different input string.',\n", |
| 65 | + " 'intermediate_steps': [(AgentActionMessageLog(tool='type_letter', tool_input={'letter': 'a'}, log=\"\\nInvoking type_letter: {'letter': 'a'}\\n\\t\", message_log=[AIMessage(content='<tool>{\\n \"tool_name\": \"type_letter\",\\n \"arguments\": {\\n \"letter\": \"a\"\\n }\\n}</tool>\\n')]),\n", |
| 66 | + " 'OK'),\n", |
| 67 | + " (AgentActionMessageLog(tool='type_letter', tool_input={'letter': 'b'}, log=\"\\nInvoking type_letter: {'letter': 'b'}\\n\\t\", message_log=[AIMessage(content='<tool>{\\n \"tool_name\": \"type_letter\",\\n \"arguments\": {\\n \"letter\": \"b\"\\n }\\n}</tool>\\n')]),\n", |
| 68 | + " 'OK'),\n", |
| 69 | + " (AgentActionMessageLog(tool='type_letter', tool_input={'letter': 'c'}, log=\"\\nInvoking type_letter: {'letter': 'c'}\\n\\t\", message_log=[AIMessage(content='<tool>{\\n \"tool_name\": \"type_letter\",\\n \"arguments\": {\\n \"letter\": \"c\"\\n }\\n}</tool>\\n')]),\n", |
| 70 | + " 'OK')],\n", |
| 71 | + " 'state': 'abc'}" |
| 72 | + ] |
| 73 | + }, |
| 74 | + "execution_count": 2, |
| 75 | + "metadata": {}, |
| 76 | + "output_type": "execute_result" |
| 77 | + } |
| 78 | + ], |
| 79 | + "source": [ |
| 80 | + "task = registry[\"Tool Usage - Typewriter (1 tool)\"]\n", |
| 81 | + "agent_factory = CustomAgentFactory(task, \"claude-2.1\")\n", |
| 82 | + "\n", |
| 83 | + "agent_factory().invoke({\"question\": \"abc\"})" |
| 84 | + ] |
| 85 | + }, |
| 86 | + { |
| 87 | + "cell_type": "markdown", |
| 88 | + "id": "65b32e7d-3986-4461-8a3b-8e9b6d4008cb", |
| 89 | + "metadata": {}, |
| 90 | + "source": [ |
| 91 | + "Define the test cases" |
| 92 | + ] |
| 93 | + }, |
| 94 | + { |
| 95 | + "cell_type": "code", |
| 96 | + "execution_count": 9, |
| 97 | + "id": "26d390b6-9ade-424c-aabb-d450f52ed121", |
| 98 | + "metadata": { |
| 99 | + "tags": [] |
| 100 | + }, |
| 101 | + "outputs": [], |
| 102 | + "source": [ |
| 103 | + "tests = [\n", |
| 104 | + " # 2-tuple of (architecture, model name)\n", |
| 105 | + " (\"xml\", \"mixtral-8x7b-instruct-fw\"),\n", |
| 106 | + " (\"xml\", \"claude-2.1\"),\n", |
| 107 | + " (\"xml\", \"claude-2\"),\n", |
| 108 | + " (\"xml\", \"yi-34b-200k-fw\"),\n", |
| 109 | + " (\"xml\", \"llama-v2-70b-chat-fw\"),\n", |
| 110 | + " (\"xml\", \"llama-v2-13b-chat-fw\"),\n", |
| 111 | + " (\"openai_functions\", \"gpt-3.5-turbo-1106\"),\n", |
| 112 | + " (\"openai_functions\", \"gpt-3.5-turbo-0613\"),\n", |
| 113 | + " (\"openai_functions\", \"gpt-4-1106-preview\")(\"openai_functions\", \"gpt-4-0613\"),\n", |
| 114 | + "]" |
| 115 | + ] |
| 116 | + }, |
| 117 | + { |
| 118 | + "cell_type": "markdown", |
| 119 | + "id": "b55b7c24-8b4d-4bd7-8b00-365fbe61897f", |
| 120 | + "metadata": {}, |
| 121 | + "source": [ |
| 122 | + "## Run" |
| 123 | + ] |
| 124 | + }, |
| 125 | + { |
| 126 | + "cell_type": "code", |
| 127 | + "execution_count": 10, |
| 128 | + "id": "a415dd82-2e70-4173-a3f3-8e1aac60db9e", |
| 129 | + "metadata": { |
| 130 | + "tags": [] |
| 131 | + }, |
| 132 | + "outputs": [], |
| 133 | + "source": [ |
| 134 | + "experiment_uuid = uuid.uuid4().hex[:4]" |
31 | 135 | ] |
32 | 136 | }, |
33 | 137 | { |
|
39 | 143 | }, |
40 | 144 | "outputs": [], |
41 | 145 | "source": [ |
42 | | - "experiment_uuid = uuid.uuid4().hex[:4]\n", |
43 | | - "models = [\"gpt-3.5-turbo-16k\"]\n", |
44 | 146 | "client = Client() # Launch langsmith client for cloning datasets\n", |
45 | 147 | "today = datetime.date.today().isoformat()\n", |
| 148 | + "rate_limiter = RateLimiter(requests_per_second=1)\n", |
46 | 149 | "\n", |
47 | 150 | "for task in registry:\n", |
48 | 151 | " dataset_name = task.name + f\"_benchmarking_{today}\"\n", |
49 | 152 | " clone_public_dataset(task.dataset_id, dataset_name=dataset_name)\n", |
50 | 153 | "\n", |
51 | 154 | " if task.type != \"ToolUsageTask\":\n", |
52 | 155 | " continue\n", |
53 | | - " for model in models:\n", |
| 156 | + "\n", |
| 157 | + " for arch, model in tests:\n", |
54 | 158 | " print()\n", |
55 | | - " print(f\"Benchmarking {task.name} with model: {model}\")\n", |
| 159 | + " print(f\"Benchmarking {task.name} with model: {model} and arch: {arch}\")\n", |
56 | 160 | " eval_config = task.get_eval_config()\n", |
57 | | - " agent_factory = agents.OpenAIAgentFactory(task, model=model)\n", |
| 161 | + "\n", |
| 162 | + " if arch == \"openai_functions\":\n", |
| 163 | + " agent_factory = OpenAIAgentFactory(\n", |
| 164 | + " task, model=model, rate_limiter=rate_limiter\n", |
| 165 | + " )\n", |
| 166 | + " elif arch == \"xml\":\n", |
| 167 | + " agent_factory = CustomAgentFactory(\n", |
| 168 | + " task, model=model, rate_limiter=rate_limiter\n", |
| 169 | + " )\n", |
| 170 | + " else:\n", |
| 171 | + " raise ValueError()\n", |
58 | 172 | "\n", |
59 | 173 | " client.run_on_dataset(\n", |
60 | 174 | " dataset_name=dataset_name,\n", |
61 | 175 | " llm_or_chain_factory=agent_factory,\n", |
62 | 176 | " evaluation=eval_config,\n", |
63 | 177 | " verbose=False,\n", |
64 | | - " project_name=f\"{dataset_name}-{model}-{experiment_uuid}\",\n", |
| 178 | + " project_name=f\"{model}{experiment_uuid}\",\n", |
65 | 179 | " tags=[model],\n", |
66 | | - " concurrency_level=1,\n", |
| 180 | + " concurrency_level=5,\n", |
67 | 181 | " project_metadata={\n", |
68 | 182 | " \"model\": model,\n", |
69 | 183 | " \"id\": experiment_uuid,\n", |
70 | 184 | " \"task\": task.name,\n", |
71 | 185 | " \"date\": today,\n", |
| 186 | + " \"langchain_benchmarks_version\": __version__,\n", |
| 187 | + " \"arch\": arch,\n", |
72 | 188 | " },\n", |
73 | | - " )" |
| 189 | + " )\n", |
| 190 | + " break" |
74 | 191 | ] |
75 | 192 | } |
76 | 193 | ], |
|
0 commit comments