Skip to content

Commit

Permalink
Support Message API for chatbot and chatinterface (#8422)
Browse files Browse the repository at this point in the history
* first commit

* Add code

* Tests + code

* lint

* Add code

* notebook

* add changeset

* type

* Add client test

* type

* Add code

* Chatbot type

* Add code

* test chatbot

* fix e2e test

* js tests

* Consolidate Error and Tool message. Allow Messages in postprocess

* Rename to messages

* fix tests

* notebook clean

* More tests and messages

* add changeset

* notebook

* client test

* Fix issues

* Chatbot docs

* add changeset

* Add image

* Add img tag

* Address comments

* Add code

* Revert chatinterface streaming change. Use title in metadata. Address pngwn comments

* Add code

* changelog highlight

---------

Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
  • Loading branch information
freddyaboulton and gradio-pr-bot authored Jul 10, 2024
1 parent 936c713 commit 4221290
Show file tree
Hide file tree
Showing 37 changed files with 1,868 additions and 668 deletions.
93 changes: 93 additions & 0 deletions .changeset/young-crabs-begin.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
---
"@gradio/chatbot": minor
"@gradio/tootils": minor
"gradio": minor
"website": minor
---

highlight:

#### Support message format in chatbot 💬

`gr.Chatbot` and `gr.ChatInterface` now support the [Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api#messages-api), which is fully compatible with LLM API providers such as Hugging Face Text Generation Inference, OpenAI's chat completions API, and Llama.cpp server.

Building Gradio applications around these LLM solutions is now even easier!

`gr.Chatbot` and `gr.ChatInterface` now have a `msg_format` parameter that can accept two values - `'tuples'` and `'messages'`. If set to `'tuples'`, the default chatbot data format is expected. If set to `'messages'`, a list of dictionaries with `content` and `role` keys is expected. See below -

```python
def chat_greeter(msg, history):
history.append({"role": "assistant", "content": "Hello!"})
return history
```

Additionally, gradio now exposes a `gr.ChatMessage` dataclass you can use for IDE type hints and auto completion.

<img width="852" alt="image" src="https://github.com/freddyaboulton/freddyboulton/assets/41651716/d283e8f3-b194-466a-8194-c7e697dca9ad">


#### Tool use in Chatbot 🛠️

The Gradio Chatbot can now natively display tool usage and intermediate thoughts common in Agent and chain-of-thought workflows!

If you are using the new "messages" format, simply add a `metadata` key with a dictionary containing a `title` key and `value`. This will display the assistant message in an expandable message box to show the result of a tool or intermediate step.

```python
import gradio as gr
from gradio import ChatMessage
import time

def generate_response(history):
history.append(ChatMessage(role="user", content="What is the weather in San Francisco right now?"))
yield history
time.sleep(0.25)
history.append(ChatMessage(role="assistant",
content="In order to find the current weather in San Francisco, I will need to use my weather tool.")
)
yield history
time.sleep(0.25)

history.append(ChatMessage(role="assistant",
content="API Error when connecting to weather service.",
metadata={"title": "💥 Error using tool 'Weather'"})
)
yield history
time.sleep(0.25)

history.append(ChatMessage(role="assistant",
content="I will try again",
))
yield history
time.sleep(0.25)

history.append(ChatMessage(role="assistant",
content="Weather 72 degrees Fahrenheit with 20% chance of rain.",
metadata={"title": "🛠️ Used tool 'Weather'"}
))
yield history
time.sleep(0.25)

history.append(ChatMessage(role="assistant",
content="Now that the API succeeded I can complete my task.",
))
yield history
time.sleep(0.25)

history.append(ChatMessage(role="assistant",
content="It's a sunny day in San Francisco with a current temperature of 72 degrees Fahrenheit and a 20% chance of rain. Enjoy the weather!",
))
yield history


with gr.Blocks() as demo:
chatbot = gr.Chatbot(msg_format="messages")
button = gr.Button("Get San Francisco Weather")
button.click(generate_response, chatbot, chatbot)

if __name__ == "__main__":
demo.launch()
```



![tool-box-demo](https://github.com/freddyaboulton/freddyboulton/assets/41651716/cf73ecc9-90ac-42ce-bca5-768e0cc00a48)
32 changes: 26 additions & 6 deletions .config/playwright-setup.js
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,10 @@ const test_files = readdirSync(TEST_FILES_PATH)
!f.endsWith(".component.spec.ts") &&
!f.endsWith(".reload.spec.ts")
)
.map((f) => basename(f, ".spec.ts"));
.map((f) => ({
module_name: `${basename(f, ".spec.ts")}.run`,
dir_name: basename(f, ".spec.ts")
}));

export default async function global_setup() {
const verbose = process.env.GRADIO_TEST_VERBOSE;
Expand All @@ -29,7 +32,24 @@ export default async function global_setup() {

process.stdout.write(kl.yellow("\nCreating test gradio app.\n\n"));

const test_app = make_app(test_files, port);
const test_cases = [];
// check if there is a testcase file in the same directory as the test file
// if there is, append that to the file
test_files.forEach((value) => {
const test_case_dir = join(ROOT, "demo", value.dir_name);

readdirSync(test_case_dir)
.filter((f) => f.endsWith("_testcase.py"))
.forEach((f) => {
test_cases.push({
module_name: `${value.dir_name}.${basename(f, ".py")}`,
dir_name: `${value.dir_name}_${basename(f, ".py")}`
});
});
});

const all_test_files = test_files.concat(test_cases);
const test_app = make_app(all_test_files, port);
process.stdout.write(kl.yellow("App created. Starting test server.\n\n"));

process.stdout.write(kl.bgBlue(" =========================== \n"));
Expand Down Expand Up @@ -111,14 +131,14 @@ import uvicorn
from fastapi import FastAPI
import gradio as gr
${demos.map((d) => `from demo.${d}.run import demo as ${d}`).join("\n")}
${demos.map((obj) => `from demo.${obj.module_name} import demo as ${obj.dir_name}`).join("\n")}
app = FastAPI()
${demos
.map(
(d) =>
`app = gr.mount_gradio_app(app, ${d}, path="/${d}", max_file_size=${
d == "upload_file_limit_test" ? "'15kb'" : "None"
(obj) =>
`app = gr.mount_gradio_app(app, ${obj.dir_name}, path="/${obj.dir_name}", max_file_size=${
obj.dir_name == "upload_file_limit_test" ? "'15kb'" : "None"
})`
)
.join("\n")}
Expand Down
23 changes: 23 additions & 0 deletions client/python/test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,3 +459,26 @@ def max_file_size_demo():
)

return demo


@pytest.fixture
def chatbot_message_format():
with gr.Blocks() as demo:
chatbot = gr.Chatbot(msg_format="messages")
msg = gr.Textbox()

def respond(message, chat_history: list):
bot_message = random.choice(
["How are you?", "I love you", "I'm very hungry"]
)
chat_history.extend(
[
{"role": "user", "content": message},
{"role": "assistant", "content": bot_message},
]
)
return "", chat_history

msg.submit(respond, [msg, chatbot], [msg, chatbot], api_name="chat")

return demo
19 changes: 19 additions & 0 deletions client/python/test/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -682,6 +682,25 @@ def test_missing_params(self, calculator_demo):
):
client.predict(num1=3, operation="add", api_name="/predict")

def test_chatbot_message_format(self, chatbot_message_format):
with connect(chatbot_message_format) as client:
_, history = client.predict("hello", [], api_name="/chat")
assert history[1]["role"] == "assistant"
assert history[1]["content"] in [
"How are you?",
"I love you",
"I'm very hungry",
]
_, history = client.predict("hi", history, api_name="/chat")
assert history[2]["role"] == "user"
assert history[2]["content"] == "hi"
assert history[3]["role"] == "assistant"
assert history[3]["content"] in [
"How are you?",
"I love you",
"I'm very hungry",
]


class TestStatusUpdates:
@patch("gradio_client.client.Endpoint.make_end_to_end_fn")
Expand Down
101 changes: 101 additions & 0 deletions demo/chatbot_core_components_simple/messages_testcase.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
import gradio as gr
import random

# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.


color_map = {
"harmful": "crimson",
"neutral": "gray",
"beneficial": "green",
}

def html_src(harm_level):
return f"""
<div style="display: flex; gap: 5px;padding: 2px 4px;margin-top: -40px">
<div style="background-color: {color_map[harm_level]}; padding: 2px; border-radius: 5px;">
{harm_level}
</div>
</div>
"""

def print_like_dislike(x: gr.LikeData):
print(x.index, x.value, x.liked)

def add_message(history, message):
for x in message["files"]:
history.append({"role": "user", "content": {"path": x}})
if message["text"] is not None:
history.append({"role": "user", "content": message['text']})
return history, gr.MultimodalTextbox(value=None, interactive=False)

def bot(history, response_type):
if response_type == "gallery":
msg = {"role": "assistant", "content": gr.Gallery(
["https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",
"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"]
)
}
elif response_type == "image":
msg = {"role": "assistant",
"content": gr.Image("https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png")
}
elif response_type == "video":
msg = {"role": "assistant",
"content": gr.Video("https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/world.mp4")
}
elif response_type == "audio":
msg = {"role": "assistant",
"content": gr.Audio("https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav")
}
elif response_type == "html":
msg = {"role": "assistant",
"content": gr.HTML(
html_src(random.choice(["harmful", "neutral", "beneficial"]))
)
}
else:
msg = {"role": "assistant", "content": "Cool!"}
history.append(msg)
return history


with gr.Blocks(fill_height=True) as demo:
chatbot = gr.Chatbot(
elem_id="chatbot",
bubble_full_width=False,
scale=1,
msg_format="messages"
)
response_type = gr.Radio(
[
"image",
"text",
"gallery",
"video",
"audio",
"html",
],
value="text",
label="Response Type",
)

chat_input = gr.MultimodalTextbox(
interactive=True,
placeholder="Enter message or upload file...",
show_label=False,
)

chat_msg = chat_input.submit(
add_message, [chatbot, chat_input], [chatbot, chat_input]
)
bot_msg = chat_msg.then(
bot, [chatbot, response_type], chatbot, api_name="bot_response"
)
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])

chatbot.like(print_like_dislike, None, None)

demo.queue()
if __name__ == "__main__":
demo.launch()
Loading

0 comments on commit 4221290

Please sign in to comment.