diff --git a/.changeset/gentle-wombats-thank.md b/.changeset/gentle-wombats-thank.md
deleted file mode 100644
index 1a9902db76680..0000000000000
--- a/.changeset/gentle-wombats-thank.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-"@gradio/app": patch
-"gradio": patch
----
-
-fix:Refactoring component making the code simpler and fixing a Playground mode bug
diff --git a/.changeset/modern-comics-refuse.md b/.changeset/modern-comics-refuse.md
deleted file mode 100644
index c9f9f1492d3aa..0000000000000
--- a/.changeset/modern-comics-refuse.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-"@gradio/plot": patch
-"gradio": patch
----
-
-fix:Fixes Plotly is rendered smaller in a hidden `gr.Tab`
diff --git a/.changeset/tangy-beds-guess.md b/.changeset/tangy-beds-guess.md
deleted file mode 100644
index 0ce78651ad529..0000000000000
--- a/.changeset/tangy-beds-guess.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-"@gradio/app": minor
-"gradio": minor
----
-
-feat:Allow app to fill width
diff --git a/.changeset/true-ears-knock.md b/.changeset/true-ears-knock.md
new file mode 100644
index 0000000000000..3f8af24d80ed9
--- /dev/null
+++ b/.changeset/true-ears-knock.md
@@ -0,0 +1,5 @@
+---
+"gradio": patch
+---
+
+feat:Add guides for msg format and llm agents
diff --git a/.changeset/young-crabs-begin.md b/.changeset/young-crabs-begin.md
index 3cc236b279cdd..eca49c4d88e99 100644
--- a/.changeset/young-crabs-begin.md
+++ b/.changeset/young-crabs-begin.md
@@ -13,7 +13,7 @@ highlight:
Building Gradio applications around these LLM solutions is now even easier!
-`gr.Chatbot` and `gr.ChatInterface` now have a `msg_format` parameter that can accept two values - `'tuples'` and `'messages'`. If set to `'tuples'`, the default chatbot data format is expected. If set to `'messages'`, a list of dictionaries with `content` and `role` keys is expected. See below -
+`gr.Chatbot` and `gr.ChatInterface` now have a `type` parameter that can accept two values - `'tuples'` and `'messages'`. If set to `'tuples'`, the default chatbot data format is expected. If set to `'messages'`, a list of dictionaries with `content` and `role` keys is expected. See below -
```python
def chat_greeter(msg, history):
@@ -80,7 +80,7 @@ def generate_response(history):
with gr.Blocks() as demo:
- chatbot = gr.Chatbot(msg_format="messages")
+ chatbot = gr.Chatbot(type="messages")
button = gr.Button("Get San Francisco Weather")
button.click(generate_response, chatbot, chatbot)
diff --git a/client/python/test/conftest.py b/client/python/test/conftest.py
index ab7659fcf18c5..a46ea99c54857 100644
--- a/client/python/test/conftest.py
+++ b/client/python/test/conftest.py
@@ -464,7 +464,7 @@ def max_file_size_demo():
@pytest.fixture
def chatbot_message_format():
with gr.Blocks() as demo:
- chatbot = gr.Chatbot(msg_format="messages")
+ chatbot = gr.Chatbot(type="messages")
msg = gr.Textbox()
def respond(message, chat_history: list):
diff --git a/demo/agent_chatbot/requirements.txt b/demo/agent_chatbot/requirements.txt
new file mode 100644
index 0000000000000..646219d1bb0ae
--- /dev/null
+++ b/demo/agent_chatbot/requirements.txt
@@ -0,0 +1 @@
+git+https://github.com/huggingface/transformers.git#egg=transformers[agents]
\ No newline at end of file
diff --git a/demo/agent_chatbot/run.ipynb b/demo/agent_chatbot/run.ipynb
new file mode 100644
index 0000000000000..86d56a845e105
--- /dev/null
+++ b/demo/agent_chatbot/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: agent_chatbot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers.git#egg=transformers[agents]"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/agent_chatbot/utils.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from gradio import ChatMessage\n", "from transformers import load_tool, ReactCodeAgent, HfEngine\n", "from utils import stream_from_transformers_agent\n", "\n", "# Import tool from Hub\n", "image_generation_tool = load_tool(\"m-ric/text-to-image\")\n", "\n", "\n", "llm_engine = HfEngine(\"meta-llama/Meta-Llama-3-70B-Instruct\")\n", "# Initialize the agent with both tools\n", "agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)\n", "\n", "\n", "def interact_with_agent(prompt, messages):\n", " messages.append(ChatMessage(role=\"user\", content=prompt))\n", " yield messages\n", " for msg in stream_from_transformers_agent(agent, prompt):\n", " messages.append(msg)\n", " yield messages\n", " yield messages\n", "\n", "\n", "with gr.Blocks() as demo:\n", " stored_message = gr.State([])\n", " chatbot = gr.Chatbot(label=\"Agent\",\n", " type=\"messages\",\n", " avatar_images=(None, \"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png\"))\n", " text_input = gr.Textbox(lines=1, label=\"Chat Message\")\n", " text_input.submit(lambda s: (s, \"\"), [text_input], [stored_message, text_input]).then(interact_with_agent, [stored_message, chatbot], [chatbot])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/demo/agent_chatbot/run.py b/demo/agent_chatbot/run.py
new file mode 100644
index 0000000000000..88841c4bf0e4d
--- /dev/null
+++ b/demo/agent_chatbot/run.py
@@ -0,0 +1,34 @@
+import gradio as gr
+from gradio import ChatMessage
+from transformers import load_tool, ReactCodeAgent, HfEngine
+from utils import stream_from_transformers_agent
+
+# Import tool from Hub
+image_generation_tool = load_tool("m-ric/text-to-image")
+
+
+llm_engine = HfEngine("meta-llama/Meta-Llama-3-70B-Instruct")
+# Initialize the agent with both tools
+agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
+
+
+def interact_with_agent(prompt, messages):
+ messages.append(ChatMessage(role="user", content=prompt))
+ yield messages
+ for msg in stream_from_transformers_agent(agent, prompt):
+ messages.append(msg)
+ yield messages
+ yield messages
+
+
+with gr.Blocks() as demo:
+ stored_message = gr.State([])
+ chatbot = gr.Chatbot(label="Agent",
+ type="messages",
+ avatar_images=(None, "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png"))
+ text_input = gr.Textbox(lines=1, label="Chat Message")
+ text_input.submit(lambda s: (s, ""), [text_input], [stored_message, text_input]).then(interact_with_agent, [stored_message, chatbot], [chatbot])
+
+
+if __name__ == "__main__":
+ demo.launch()
\ No newline at end of file
diff --git a/demo/agent_chatbot/utils.py b/demo/agent_chatbot/utils.py
new file mode 100644
index 0000000000000..a169df79f6683
--- /dev/null
+++ b/demo/agent_chatbot/utils.py
@@ -0,0 +1,63 @@
+from gradio import ChatMessage
+from transformers.agents import ReactCodeAgent, agent_types
+from typing import Generator
+
+
+def pull_message(step_log: dict):
+ if step_log.get("rationale"):
+ yield ChatMessage(
+ role="assistant", content=step_log["rationale"]
+ )
+ if step_log.get("tool_call"):
+ used_code = step_log["tool_call"]["tool_name"] == "code interpreter"
+ content = step_log["tool_call"]["tool_arguments"]
+ if used_code:
+ content = f"```py\n{content}\n```"
+ yield ChatMessage(
+ role="assistant",
+ metadata={"title": f"🛠️ Used tool {step_log['tool_call']['tool_name']}"},
+ content=content,
+ )
+ if step_log.get("observation"):
+ yield ChatMessage(
+ role="assistant", content=f"```\n{step_log['observation']}\n```"
+ )
+ if step_log.get("error"):
+ yield ChatMessage(
+ role="assistant",
+ content=str(step_log["error"]),
+ metadata={"title": "💥 Error"},
+ )
+
+
+def stream_from_transformers_agent(
+ agent: ReactCodeAgent, prompt: str
+) -> Generator[ChatMessage, None, ChatMessage | None]:
+ """Runs an agent with the given prompt and streams the messages from the agent as ChatMessages."""
+
+ class Output:
+ output: agent_types.AgentType | str = None
+
+ for step_log in agent.run(prompt, stream=True):
+ if isinstance(step_log, dict):
+ for message in pull_message(step_log):
+ print("message", message)
+ yield message
+
+
+ Output.output = step_log
+ if isinstance(Output.output, agent_types.AgentText):
+ yield ChatMessage(
+ role="assistant", content=f"**Final answer:**\n```\n{Output.output.to_string()}\n```")
+ elif isinstance(Output.output, agent_types.AgentImage):
+ yield ChatMessage(
+ role="assistant",
+ content={"path": Output.output.to_string(), "mime_type": "image/png"},
+ )
+ elif isinstance(Output.output, agent_types.AgentAudio):
+ yield ChatMessage(
+ role="assistant",
+ content={"path": Output.output.to_string(), "mime_type": "audio/wav"},
+ )
+ else:
+ return ChatMessage(role="assistant", content=Output.output)
diff --git a/demo/blocks_xray/run.ipynb b/demo/blocks_xray/run.ipynb
index c8cf25affa3ee..6754b73faff91 100644
--- a/demo/blocks_xray/run.ipynb
+++ b/demo/blocks_xray/run.ipynb
@@ -1 +1 @@
-{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_xray"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "disease_values = [0.25, 0.5, 0.75]\n", "\n", "def xray_model(diseases, img):\n", " return [{disease: disease_values[idx] for idx,disease in enumerate(diseases)}]\n", "\n", "\n", "def ct_model(diseases, img):\n", " return [{disease: 0.1 for disease in diseases}]\n", "\n", "with gr.Blocks(fill_width=True) as demo:\n", " gr.Markdown(\n", " \"\"\"\n", "# Detect Disease From Scan\n", "With this model you can lorem ipsum\n", "- ipsum 1\n", "- ipsum 2\n", "\"\"\"\n", " )\n", " gr.DuplicateButton()\n", " disease = gr.CheckboxGroup(\n", " info=\"Select the diseases you want to scan for.\",\n", " choices=[\"Covid\", \"Malaria\", \"Lung Cancer\"], label=\"Disease to Scan For\"\n", " )\n", " slider = gr.Slider(0, 100)\n", "\n", " with gr.Tab(\"X-ray\") as x_tab:\n", " with gr.Row():\n", " xray_scan = gr.Image()\n", " xray_results = gr.JSON()\n", " xray_run = gr.Button(\"Run\")\n", " xray_run.click(\n", " xray_model,\n", " inputs=[disease, xray_scan],\n", " outputs=xray_results,\n", " api_name=\"xray_model\"\n", " )\n", "\n", " with gr.Tab(\"CT Scan\"):\n", " with gr.Row():\n", " ct_scan = gr.Image()\n", " ct_results = gr.JSON()\n", " ct_run = gr.Button(\"Run\")\n", " ct_run.click(\n", " ct_model,\n", " inputs=[disease, ct_scan],\n", " outputs=ct_results,\n", " api_name=\"ct_model\"\n", " )\n", "\n", " upload_btn = gr.Button(\"Upload Results\", variant=\"primary\")\n", " upload_btn.click(\n", " lambda ct, xr: None,\n", " inputs=[ct_results, xray_results],\n", " outputs=[],\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
+{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_xray"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "disease_values = [0.25, 0.5, 0.75]\n", "\n", "def xray_model(diseases, img):\n", " return [{disease: disease_values[idx] for idx,disease in enumerate(diseases)}]\n", "\n", "\n", "def ct_model(diseases, img):\n", " return [{disease: 0.1 for disease in diseases}]\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", "# Detect Disease From Scan\n", "With this model you can lorem ipsum\n", "- ipsum 1\n", "- ipsum 2\n", "\"\"\"\n", " )\n", " gr.DuplicateButton()\n", " disease = gr.CheckboxGroup(\n", " info=\"Select the diseases you want to scan for.\",\n", " choices=[\"Covid\", \"Malaria\", \"Lung Cancer\"], label=\"Disease to Scan For\"\n", " )\n", " slider = gr.Slider(0, 100)\n", "\n", " with gr.Tab(\"X-ray\") as x_tab:\n", " with gr.Row():\n", " xray_scan = gr.Image()\n", " xray_results = gr.JSON()\n", " xray_run = gr.Button(\"Run\")\n", " xray_run.click(\n", " xray_model,\n", " inputs=[disease, xray_scan],\n", " outputs=xray_results,\n", " api_name=\"xray_model\"\n", " )\n", "\n", " with gr.Tab(\"CT Scan\"):\n", " with gr.Row():\n", " ct_scan = gr.Image()\n", " ct_results = gr.JSON()\n", " ct_run = gr.Button(\"Run\")\n", " ct_run.click(\n", " ct_model,\n", " inputs=[disease, ct_scan],\n", " outputs=ct_results,\n", " api_name=\"ct_model\"\n", " )\n", "\n", " upload_btn = gr.Button(\"Upload Results\", variant=\"primary\")\n", " upload_btn.click(\n", " lambda ct, xr: None,\n", " inputs=[ct_results, xray_results],\n", " outputs=[],\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/demo/blocks_xray/run.py b/demo/blocks_xray/run.py
index f1cb5102636d0..e8e8e8167f1d6 100644
--- a/demo/blocks_xray/run.py
+++ b/demo/blocks_xray/run.py
@@ -10,7 +10,7 @@ def xray_model(diseases, img):
def ct_model(diseases, img):
return [{disease: 0.1 for disease in diseases}]
-with gr.Blocks(fill_width=True) as demo:
+with gr.Blocks() as demo:
gr.Markdown(
"""
# Detect Disease From Scan
diff --git a/demo/chatbot_core_components_simple/messages_testcase.py b/demo/chatbot_core_components_simple/messages_testcase.py
index fd6709b3a4f42..797f7b79735fd 100644
--- a/demo/chatbot_core_components_simple/messages_testcase.py
+++ b/demo/chatbot_core_components_simple/messages_testcase.py
@@ -65,7 +65,7 @@ def bot(history, response_type):
elem_id="chatbot",
bubble_full_width=False,
scale=1,
- msg_format="messages"
+ type="messages"
)
response_type = gr.Radio(
[
diff --git a/demo/chatbot_multimodal/messages_testcase.py b/demo/chatbot_multimodal/messages_testcase.py
index 33813ed3a4aea..bb61b2113f556 100644
--- a/demo/chatbot_multimodal/messages_testcase.py
+++ b/demo/chatbot_multimodal/messages_testcase.py
@@ -27,7 +27,7 @@ def bot(history: list):
[],
elem_id="chatbot",
bubble_full_width=False,
- msg_format="messages"
+ type="messages"
)
chat_input = gr.MultimodalTextbox(interactive=True,
diff --git a/demo/chatbot_streaming/testcase_messages.py b/demo/chatbot_streaming/testcase_messages.py
index 1d772e26f9721..7797e0b4a0514 100644
--- a/demo/chatbot_streaming/testcase_messages.py
+++ b/demo/chatbot_streaming/testcase_messages.py
@@ -3,7 +3,7 @@
import time
with gr.Blocks() as demo:
- chatbot = gr.Chatbot(msg_format="messages")
+ chatbot = gr.Chatbot(type="messages")
msg = gr.Textbox()
clear = gr.Button("Clear")
diff --git a/demo/chatbot_with_tools/run.ipynb b/demo/chatbot_with_tools/run.ipynb
index 0a6b2005d3965..ff3225f0dd5f9 100644
--- a/demo/chatbot_with_tools/run.ipynb
+++ b/demo/chatbot_with_tools/run.ipynb
@@ -1 +1 @@
-{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_with_tools"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from gradio import ChatMessage\n", "import time\n", "\n", "def generate_response(history):\n", " history.append(ChatMessage(role=\"user\", content=\"What is the weather in San Francisco right now?\"))\n", " yield history\n", " time.sleep(0.25)\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"In order to find the current weather in San Francisco, I will need to use my weather tool.\")\n", " )\n", " yield history\n", " time.sleep(0.25)\n", "\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"API Error when connecting to weather service.\",\n", " metadata={\"title\": \"\ud83d\udca5 Error using tool 'Weather'\"})\n", " )\n", " yield history\n", " time.sleep(0.25)\n", "\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"I will try again\",\n", " ))\n", " yield history\n", " time.sleep(0.25)\n", "\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"Weather 72 degrees Fahrenheit with 20% chance of rain.\",\n", " metadata={\"title\": \"\ud83d\udee0\ufe0f Used tool 'Weather'\"}\n", " ))\n", " yield history\n", " time.sleep(0.25)\n", "\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"Now that the API succeeded I can complete my task.\",\n", " ))\n", " yield history\n", " time.sleep(0.25)\n", "\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"It's a sunny day in San Francisco with a current temperature of 72 degrees Fahrenheit and a 20% chance of rain. Enjoy the weather!\",\n", " ))\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(msg_format=\"messages\")\n", " button = gr.Button(\"Get San Francisco Weather\")\n", " button.click(generate_response, chatbot, chatbot)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
+{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_with_tools"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from gradio import ChatMessage\n", "import time\n", "\n", "def generate_response(history):\n", " history.append(ChatMessage(role=\"user\", content=\"What is the weather in San Francisco right now?\"))\n", " yield history\n", " time.sleep(0.25)\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"In order to find the current weather in San Francisco, I will need to use my weather tool.\")\n", " )\n", " yield history\n", " time.sleep(0.25)\n", "\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"API Error when connecting to weather service.\",\n", " metadata={\"title\": \"\ud83d\udca5 Error using tool 'Weather'\"})\n", " )\n", " yield history\n", " time.sleep(0.25)\n", "\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"I will try again\",\n", " ))\n", " yield history\n", " time.sleep(0.25)\n", "\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"Weather 72 degrees Fahrenheit with 20% chance of rain.\",\n", " metadata={\"title\": \"\ud83d\udee0\ufe0f Used tool 'Weather'\"}\n", " ))\n", " yield history\n", " time.sleep(0.25)\n", "\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"Now that the API succeeded I can complete my task.\",\n", " ))\n", " yield history\n", " time.sleep(0.25)\n", "\n", " history.append(ChatMessage(role=\"assistant\",\n", " content=\"It's a sunny day in San Francisco with a current temperature of 72 degrees Fahrenheit and a 20% chance of rain. Enjoy the weather!\",\n", " ))\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(type=\"messages\")\n", " button = gr.Button(\"Get San Francisco Weather\")\n", " button.click(generate_response, chatbot, chatbot)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/demo/chatbot_with_tools/run.py b/demo/chatbot_with_tools/run.py
index c3155b7a632a4..6aebe5d87220b 100644
--- a/demo/chatbot_with_tools/run.py
+++ b/demo/chatbot_with_tools/run.py
@@ -45,7 +45,7 @@ def generate_response(history):
with gr.Blocks() as demo:
- chatbot = gr.Chatbot(msg_format="messages")
+ chatbot = gr.Chatbot(type="messages")
button = gr.Button("Get San Francisco Weather")
button.click(generate_response, chatbot, chatbot)
diff --git a/demo/chatinterface_streaming_echo/messages_testcase.py b/demo/chatinterface_streaming_echo/messages_testcase.py
index 85dd94bb73ee6..b2b3df8c154b3 100644
--- a/demo/chatinterface_streaming_echo/messages_testcase.py
+++ b/demo/chatinterface_streaming_echo/messages_testcase.py
@@ -9,7 +9,7 @@ def slow_echo(message, history):
-demo = gr.ChatInterface(slow_echo, msg_format="messages")
+demo = gr.ChatInterface(slow_echo, type="messages")
if __name__ == "__main__":
demo.launch()
diff --git a/demo/test_chatinterface_streaming_echo/messages_testcase.py b/demo/test_chatinterface_streaming_echo/messages_testcase.py
index 02a7bc5f85e0b..546e1285a96a2 100644
--- a/demo/test_chatinterface_streaming_echo/messages_testcase.py
+++ b/demo/test_chatinterface_streaming_echo/messages_testcase.py
@@ -11,7 +11,7 @@ def slow_echo(message, history):
yield f"Run {runs} - You typed: " + message[: i + 1]
-demo = gr.ChatInterface(slow_echo, msg_format="messages").queue()
+demo = gr.ChatInterface(slow_echo, type="messages").queue()
if __name__ == "__main__":
demo.launch()
diff --git a/gradio/blocks.py b/gradio/blocks.py
index 75f96a11d1598..a79d3b83a59c4 100644
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -925,7 +925,6 @@ def __init__(
js: str | None = None,
head: str | None = None,
fill_height: bool = False,
- fill_width: bool = False,
delete_cache: tuple[int, int] | None = None,
**kwargs,
):
@@ -939,7 +938,6 @@ def __init__(
js: Custom js as a string or path to a js file. The custom js should be in the form of a single js function. This function will automatically be executed when the page loads. For more flexibility, use the head parameter to insert js inside
-
-{#if playground}
-
- {#key index_component_key}
- {#if error}
-
- {:else}
-
- {/if}
- {/key}
-
-{:else}
- {#key index_component_key}
- {#if error}
-
- {:else}
-
- {/if}
- {/key}
-{/if}
diff --git a/js/app/src/lite/Playground.svelte b/js/app/src/lite/Playground.svelte
index 815e5bf796dab..8b564bcb46f88 100644
--- a/js/app/src/lite/Playground.svelte
+++ b/js/app/src/lite/Playground.svelte
@@ -1,16 +1,35 @@