From 012a6c7fd405b71c892199b1846b81ca1e068aa2 Mon Sep 17 00:00:00 2001 From: Herman Hrand <54174661+allohamora@users.noreply.github.com> Date: Thu, 19 Sep 2024 00:13:36 +0300 Subject: [PATCH] fix(groq): Feature/groq response format improvements (#6754) Co-authored-by: jacoblee93 --- .../docs/integrations/chat/groq.ipynb | 56 +++++++++++++++++-- libs/langchain-groq/src/chat_models.ts | 3 +- 2 files changed, 52 insertions(+), 7 deletions(-) diff --git a/docs/core_docs/docs/integrations/chat/groq.ipynb b/docs/core_docs/docs/integrations/chat/groq.ipynb index 4bdc2ea7f6f3..b6a58553ad3b 100644 --- a/docs/core_docs/docs/integrations/chat/groq.ipynb +++ b/docs/core_docs/docs/integrations/chat/groq.ipynb @@ -145,11 +145,11 @@ ], "source": [ "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", + " {\n", + " role: \"system\",\n", + " content: \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " },\n", + " { role: \"user\", content: \"I love programming.\" },\n", "])\n", "aiMsg" ] @@ -174,6 +174,50 @@ "console.log(aiMsg.content)" ] }, + { + "cell_type": "markdown", + "id": "ce0414fe", + "metadata": {}, + "source": [ + "## Json invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "3f0a7a2a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " aiInvokeMsgContent: '{\\n\"result\": 6\\n}',\n", + " aiBindMsg: '{\\n\"result\": 6\\n}'\n", + "}\n" + ] + } + ], + "source": [ + "const messages = [\n", + " {\n", + " role: \"system\",\n", + " content: \"You are a math tutor that handles math exercises and makes output in json in format { result: number }.\",\n", + " },\n", + " { role: \"user\", content: \"2 + 2 * 2\" },\n", + "];\n", + "\n", + "const aiInvokeMsg = await llm.invoke(messages, { response_format: { type: \"json_object\" } });\n", + "\n", + "// if you want not to pass response_format in every invoke, you can bind it to the instance\n", + "const llmWithResponseFormat = llm.bind({ response_format: { type: \"json_object\" } });\n", + "const aiBindMsg = await llmWithResponseFormat.invoke(messages);\n", + "\n", + "// they are the same\n", + "console.log({ aiInvokeMsgContent: aiInvokeMsg.content, aiBindMsg: aiBindMsg.content });" + ] + }, { "cell_type": "markdown", "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", @@ -186,7 +230,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", "metadata": {}, "outputs": [ diff --git a/libs/langchain-groq/src/chat_models.ts b/libs/langchain-groq/src/chat_models.ts index c16b58752520..524782a6e49e 100644 --- a/libs/langchain-groq/src/chat_models.ts +++ b/libs/langchain-groq/src/chat_models.ts @@ -42,6 +42,7 @@ import { ChatCompletionCreateParams, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, + CompletionCreateParams, } from "groq-sdk/resources/chat/completions"; import { Runnable, @@ -73,7 +74,7 @@ export interface ChatGroqCallOptions extends BaseChatModelCallOptions { headers?: Record; tools?: ChatGroqToolType[]; tool_choice?: OpenAIClient.ChatCompletionToolChoiceOption | "any" | string; - response_format?: { type: "json_object" }; + response_format?: CompletionCreateParams.ResponseFormat; } export interface ChatGroqInput extends BaseChatModelParams {