diff --git a/.changeset/lazy-symbols-behave.md b/.changeset/lazy-symbols-behave.md new file mode 100644 index 0000000000000..083d99d04bac7 --- /dev/null +++ b/.changeset/lazy-symbols-behave.md @@ -0,0 +1,6 @@ +--- +"gradio": minor +"website": minor +--- + +feat:Support `additional_outputs` in `gr.ChatInterface` diff --git a/demo/chatinterface_artifacts/run.ipynb b/demo/chatinterface_artifacts/run.ipynb new file mode 100644 index 0000000000000..b2b2264126af4 --- /dev/null +++ b/demo/chatinterface_artifacts/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatinterface_artifacts"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "python_code = \"\"\"\n", "def fib(n):\n", " if n <= 0:\n", " return 0\n", " elif n == 1:\n", " return 1\n", " else:\n", " return fib(n-1) + fib(n-2)\n", "\"\"\"\n", "\n", "js_code = \"\"\"\n", "function fib(n) {\n", " if (n <= 0) return 0;\n", " if (n === 1) return 1;\n", " return fib(n - 1) + fib(n - 2);\n", "}\n", "\"\"\"\n", "\n", "def chat(message, history):\n", " if \"python\" in message.lower():\n", " return \"Type Python or JavaScript to see the code.\", gr.Code(language=\"python\", value=python_code)\n", " elif \"javascript\" in message.lower():\n", " return \"Type Python or JavaScript to see the code.\", gr.Code(language=\"javascript\", value=js_code)\n", " else:\n", " return \"Please ask about Python or JavaScript.\", None\n", "\n", "with gr.Blocks() as demo:\n", " code = gr.Code(render=False)\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Markdown(\"

Write Python or JavaScript

\")\n", " gr.ChatInterface(\n", " chat,\n", " examples=[\"Python\", \"JavaScript\"],\n", " additional_outputs=[code],\n", " type=\"messages\"\n", " )\n", " with gr.Column():\n", " gr.Markdown(\"

Code Artifacts

\")\n", " code.render()\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/chatinterface_artifacts/run.py b/demo/chatinterface_artifacts/run.py new file mode 100644 index 0000000000000..067a018b68e1f --- /dev/null +++ b/demo/chatinterface_artifacts/run.py @@ -0,0 +1,44 @@ +import gradio as gr + +python_code = """ +def fib(n): + if n <= 0: + return 0 + elif n == 1: + return 1 + else: + return fib(n-1) + fib(n-2) +""" + +js_code = """ +function fib(n) { + if (n <= 0) return 0; + if (n === 1) return 1; + return fib(n - 1) + fib(n - 2); +} +""" + +def chat(message, history): + if "python" in message.lower(): + return "Type Python or JavaScript to see the code.", gr.Code(language="python", value=python_code) + elif "javascript" in message.lower(): + return "Type Python or JavaScript to see the code.", gr.Code(language="javascript", value=js_code) + else: + return "Please ask about Python or JavaScript.", None + +with gr.Blocks() as demo: + code = gr.Code(render=False) + with gr.Row(): + with gr.Column(): + gr.Markdown("

Write Python or JavaScript

") + gr.ChatInterface( + chat, + examples=["Python", "JavaScript"], + additional_outputs=[code], + type="messages" + ) + with gr.Column(): + gr.Markdown("

Code Artifacts

") + code.render() + +demo.launch() diff --git a/gradio/chat_interface.py b/gradio/chat_interface.py index 67851cd18d43e..d6fe7b1af482e 100644 --- a/gradio/chat_interface.py +++ b/gradio/chat_interface.py @@ -39,7 +39,7 @@ from gradio.events import Dependency, SelectData from gradio.helpers import create_examples as Examples # noqa: N812 from gradio.helpers import special_args, update -from gradio.layouts import Accordion, Group, Row +from gradio.layouts import Accordion, Column, Group, Row from gradio.routes import Request from gradio.themes import ThemeClass as Theme @@ -74,6 +74,7 @@ def __init__( textbox: Textbox | MultimodalTextbox | None = None, additional_inputs: str | Component | list[str | Component] | None = None, additional_inputs_accordion: str | Accordion | None = None, + additional_outputs: Component | list[Component] | None = None, examples: list[str] | list[MultimodalValue] | list[list] | None = None, example_labels: list[str] | None = None, example_icons: list[str] | None = None, @@ -108,6 +109,7 @@ def __init__( textbox: an instance of the gr.Textbox or gr.MultimodalTextbox component to use for the chat interface, if you would like to customize the textbox properties. If not provided, a default gr.Textbox or gr.MultimodalTextbox component will be created. additional_inputs: an instance or list of instances of gradio components (or their string shortcuts) to use as additional inputs to the chatbot. If the components are not already rendered in a surrounding Blocks, then the components will be displayed under the chatbot, in an accordion. The values of these components will be passed into `fn` as arguments in order after the chat history. additional_inputs_accordion: if a string is provided, this is the label of the `gr.Accordion` to use to contain additional inputs. A `gr.Accordion` object can be provided as well to configure other properties of the container holding the additional inputs. Defaults to a `gr.Accordion(label="Additional Inputs", open=False)`. This parameter is only used if `additional_inputs` is provided. + additional_outputs: an instance or list of instances of gradio components to use as additional outputs from the chat function. These must be components that are already defined in the same Blocks scope. If provided, the chat function should return additional values for these components. See $demo/chatinterface_artifacts. examples: sample inputs for the function; if provided, appear within the chatbot and can be clicked to populate the chatbot input. Should be a list of strings representing text-only examples, or a list of dictionaries (with keys `text` and `files`) representing multimodal examples. If `additional_inputs` are provided, the examples must be a list of lists, where the first element of each inner list is the string or dictionary example message and the remaining elements are the example values for the additional inputs -- in this case, the examples will appear under the chatbot. example_labels: labels for the examples, to be displayed instead of the examples themselves. If provided, should be a list of strings with the same length as the examples list. Only applies when examples are displayed within the chatbot (i.e. when `additional_inputs` is not provided). example_icons: icons for the examples, to be displayed above the examples. If provided, should be a list of string URLs or local paths with the same length as the examples list. Only applies when examples are displayed within the chatbot (i.e. when `additional_inputs` is not provided). @@ -123,7 +125,7 @@ def __init__( head_paths: Custom html code as a pathlib.Path to a html file or a list of such paths. This html files will be read, concatenated, and included in the head of the demo webpage. If the `head` parameter is also set, the html from `head` will be included first. analytics_enabled: whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True. autofocus: if True, autofocuses to the textbox when the page loads. - autoscroll: If True, will automatically scroll to the bottom of the textbox when the value changes, unless the user scrolls up. If False, will not scroll to the bottom of the textbox when the value changes. + autoscroll: If True, will automatically scroll to the bottom of the chatbot when a new message appears, unless the user scrolls up. If False, will not scroll to the bottom of the chatbot automatically. submit_btn: If True, will show a submit button with a submit icon within the textbox. If a string, will use that string as the submit button text in place of the icon. If False, will not show a submit button. stop_btn: If True, will show a button with a stop icon during generator executions, to stop generating. If a string, will use that string as the submit button text in place of the stop icon. If False, will not show a stop button. concurrency_limit: if set, this is the maximum number of chatbot submissions that can be running simultaneously. Can be set to None to mean no limit (any number of chatbot submissions can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `.queue()`, which is 1 by default). @@ -169,6 +171,7 @@ def __init__( get_component_instance(i) for i in utils.none_or_singleton_to_list(additional_inputs) ] + self.additional_outputs = utils.none_or_singleton_to_list(additional_outputs) if additional_inputs_accordion is None: self.additional_inputs_accordion_params = { "label": "Additional Inputs", @@ -203,114 +206,116 @@ def __init__( break with self: - if title: - Markdown( - f"

{self.title}

" - ) - if description: - Markdown(description) - if chatbot: - if self.type: - if self.type != chatbot.type: + with Column(): + if title: + Markdown( + f"

{self.title}

" + ) + if description: + Markdown(description) + if chatbot: + if self.type: + if self.type != chatbot.type: + warnings.warn( + "The type of the gr.Chatbot does not match the type of the gr.ChatInterface." + f"The type of the gr.ChatInterface, '{self.type}', will be used." + ) + chatbot.type = self.type + chatbot._setup_data_model() + else: + warnings.warn( + f"The gr.ChatInterface was not provided with a type, so the type of the gr.Chatbot, '{chatbot.type}', will be used." + ) + self.type = chatbot.type + self.chatbot = cast( + Chatbot, get_component_instance(chatbot, render=True) + ) + if self.chatbot.examples and self.examples_messages: warnings.warn( - "The type of the gr.Chatbot does not match the type of the gr.ChatInterface." - f"The type of the gr.ChatInterface, '{self.type}', will be used." + "The ChatInterface already has examples set. The examples provided in the chatbot will be ignored." ) - chatbot.type = self.type - chatbot._setup_data_model() + self.chatbot.examples = ( + self.examples_messages + if not self._additional_inputs_in_examples + else None + ) + self.chatbot._setup_examples() else: - warnings.warn( - f"The gr.ChatInterface was not provided with a type, so the type of the gr.Chatbot, '{chatbot.type}', will be used." + self.type = self.type or "tuples" + self.chatbot = Chatbot( + label="Chatbot", + scale=1, + height=200 if fill_height else None, + type=self.type, + autoscroll=autoscroll, + examples=self.examples_messages + if not self._additional_inputs_in_examples + else None, ) - self.type = chatbot.type - self.chatbot = cast( - Chatbot, get_component_instance(chatbot, render=True) - ) - if self.chatbot.examples and self.examples_messages: - warnings.warn( - "The ChatInterface already has examples set. The examples provided in the chatbot will be ignored." + with Group(): + with Row(): + if textbox: + textbox.show_label = False + textbox_ = get_component_instance(textbox, render=True) + if not isinstance(textbox_, (Textbox, MultimodalTextbox)): + raise TypeError( + f"Expected a gr.Textbox or gr.MultimodalTextbox component, but got {builtins.type(textbox_)}" + ) + self.textbox = textbox_ + else: + textbox_component = ( + MultimodalTextbox if self.multimodal else Textbox + ) + self.textbox = textbox_component( + show_label=False, + label="Message", + placeholder="Type a message...", + scale=7, + autofocus=autofocus, + submit_btn=submit_btn, + stop_btn=stop_btn, + ) + + # Hide the stop button at the beginning, and show it with the given value during the generator execution. + self.original_stop_btn = self.textbox.stop_btn + self.textbox.stop_btn = False + + self.fake_api_btn = Button("Fake API", visible=False) + self.fake_response_textbox = Textbox( + label="Response", visible=False ) - self.chatbot.examples = ( - self.examples_messages - if not self._additional_inputs_in_examples - else None - ) - self.chatbot._setup_examples() - else: - self.type = self.type or "tuples" - self.chatbot = Chatbot( - label="Chatbot", - scale=1, - height=200 if fill_height else None, - type=self.type, - autoscroll=autoscroll, - examples=self.examples_messages - if not self._additional_inputs_in_examples - else None, - ) - with Group(): - with Row(): - if textbox: - textbox.show_label = False - textbox_ = get_component_instance(textbox, render=True) - if not isinstance(textbox_, (Textbox, MultimodalTextbox)): - raise TypeError( - f"Expected a gr.Textbox or gr.MultimodalTextbox component, but got {builtins.type(textbox_)}" - ) - self.textbox = textbox_ - else: - textbox_component = ( - MultimodalTextbox if self.multimodal else Textbox - ) - self.textbox = textbox_component( - show_label=False, - label="Message", - placeholder="Type a message...", - scale=7, - autofocus=autofocus, - submit_btn=submit_btn, - stop_btn=stop_btn, - ) + if self.examples: + self.examples_handler = Examples( + examples=self.examples, + inputs=[self.textbox] + self.additional_inputs, + outputs=self.chatbot, + fn=self._examples_stream_fn + if self.is_generator + else self._examples_fn, + cache_examples=self.cache_examples, + cache_mode=self.cache_mode, + visible=self._additional_inputs_in_examples, + preprocess=self._additional_inputs_in_examples, + ) - # Hide the stop button at the beginning, and show it with the given value during the generator execution. - self.original_stop_btn = self.textbox.stop_btn - self.textbox.stop_btn = False - - self.fake_api_btn = Button("Fake API", visible=False) - self.fake_response_textbox = Textbox(label="Response", visible=False) - - if self.examples: - self.examples_handler = Examples( - examples=self.examples, - inputs=[self.textbox] + self.additional_inputs, - outputs=self.chatbot, - fn=self._examples_stream_fn - if self.is_generator - else self._examples_fn, - cache_examples=self.cache_examples, - cache_mode=self.cache_mode, - visible=self._additional_inputs_in_examples, - preprocess=self._additional_inputs_in_examples, + any_unrendered_inputs = any( + not inp.is_rendered for inp in self.additional_inputs ) - - any_unrendered_inputs = any( - not inp.is_rendered for inp in self.additional_inputs - ) - if self.additional_inputs and any_unrendered_inputs: - with Accordion(**self.additional_inputs_accordion_params): # type: ignore - for input_component in self.additional_inputs: - if not input_component.is_rendered: - input_component.render() - - self.saved_input = State() - self.chatbot_state = ( - State(self.chatbot.value) if self.chatbot.value else State([]) - ) - self.previous_input = State(value=[]) - self.show_progress = show_progress - self._setup_events() - self._setup_api() + if self.additional_inputs and any_unrendered_inputs: + with Accordion(**self.additional_inputs_accordion_params): # type: ignore + for input_component in self.additional_inputs: + if not input_component.is_rendered: + input_component.render() + + self.saved_input = State() + self.chatbot_state = ( + State(self.chatbot.value) if self.chatbot.value else State([]) + ) + self.previous_input = State(value=[]) + self.show_progress = show_progress + self._setup_events() + self._setup_api() @staticmethod def _setup_example_messages( @@ -360,7 +365,7 @@ def _setup_events(self) -> None: .then( submit_fn, [self.saved_input, self.chatbot] + self.additional_inputs, - [self.chatbot], + [self.chatbot] + self.additional_outputs, show_api=False, concurrency_limit=cast( Union[int, Literal["default"], None], self.concurrency_limit @@ -398,7 +403,7 @@ def _setup_events(self) -> None: ).then( submit_fn, [self.saved_input, self.chatbot], - [self.chatbot], + [self.chatbot] + self.additional_outputs, show_api=False, concurrency_limit=cast( Union[int, Literal["default"], None], self.concurrency_limit @@ -431,7 +436,7 @@ def _setup_events(self) -> None: .then( submit_fn, [self.saved_input, self.chatbot] + self.additional_inputs, - [self.chatbot], + [self.chatbot] + self.additional_outputs, show_api=False, concurrency_limit=cast( Union[int, Literal["default"], None], self.concurrency_limit @@ -465,7 +470,7 @@ def _setup_events(self) -> None: ).then( submit_fn, [self.saved_input, self.chatbot], - [self.chatbot], + [self.chatbot] + self.additional_outputs, show_api=False, concurrency_limit=cast( Union[int, Literal["default"], None], self.concurrency_limit @@ -668,7 +673,7 @@ async def _submit_fn( history_with_input: TupleFormat | list[MessageDict], request: Request, *args, - ) -> tuple[TupleFormat, TupleFormat] | tuple[list[MessageDict], list[MessageDict]]: + ) -> TupleFormat | list[MessageDict] | tuple[TupleFormat | list[MessageDict], ...]: message_serialized, history = self._process_msg_and_trim_history( message, history_with_input ) @@ -682,9 +687,15 @@ async def _submit_fn( response = await anyio.to_thread.run_sync( self.fn, *inputs, limiter=self.limiter ) + additional_outputs = None + if isinstance(response, tuple): + response, *additional_outputs = response + self._append_history(history_with_input, response) - return history_with_input # type: ignore + if additional_outputs: + return history_with_input, *additional_outputs + return history_with_input async def _stream_fn( self, @@ -692,7 +703,10 @@ async def _stream_fn( history_with_input: TupleFormat | list[MessageDict], request: Request, *args, - ) -> AsyncGenerator: + ) -> AsyncGenerator[ + TupleFormat | list[MessageDict] | tuple[TupleFormat | list[MessageDict], ...], + None, + ]: message_serialized, history = self._process_msg_and_trim_history( message, history_with_input ) @@ -707,15 +721,29 @@ async def _stream_fn( self.fn, *inputs, limiter=self.limiter ) generator = utils.SyncToAsyncIterator(generator, self.limiter) + + additional_outputs = None try: first_response = await utils.async_iteration(generator) + if isinstance(first_response, tuple): + first_response, *additional_outputs = first_response self._append_history(history_with_input, first_response) - yield history_with_input + yield ( + history_with_input + if not additional_outputs + else (history_with_input, *additional_outputs) + ) except StopIteration: yield history_with_input async for response in generator: + if isinstance(response, tuple): + response, *additional_outputs = response self._append_history(history_with_input, response, first_response=False) - yield history_with_input + yield ( + history_with_input + if not additional_outputs + else (history_with_input, *additional_outputs) + ) def option_clicked( self, history: list[MessageDict], option: SelectData diff --git a/guides/05_chatbots/01_creating-a-chatbot-fast.md b/guides/05_chatbots/01_creating-a-chatbot-fast.md index 644c043243ed4..699c1a495300b 100644 --- a/guides/05_chatbots/01_creating-a-chatbot-fast.md +++ b/guides/05_chatbots/01_creating-a-chatbot-fast.md @@ -1,6 +1,6 @@ # How to Create a Chatbot with Gradio -Tags: NLP, LLM, CHATBOT +Tags: LLM, CHATBOT, NLP ## Introduction diff --git a/guides/05_chatbots/02_chatinterface-examples.md b/guides/05_chatbots/02_chatinterface-examples.md index e53780ce616bd..1a7bd266aff88 100644 --- a/guides/05_chatbots/02_chatinterface-examples.md +++ b/guides/05_chatbots/02_chatinterface-examples.md @@ -1,5 +1,7 @@ # Using Popular LLM libraries and APIs +Tags: LLM, CHATBOT, API + In this Guide, we go through several examples of how to use `gr.ChatInterface` with popular LLM libraries and API providers. We will cover the following libraries and API providers: @@ -37,7 +39,7 @@ Tip: For quick prototyping, the sambanova-gradio library makes it even easier to build chatbots on top of OpenAI models. +Tip: For quick prototyping, the sambanova-gradio library makes it even easier to build chatbots on top of SambaNova models. ## Hyperbolic @@ -55,7 +57,7 @@ The Hyperbolic AI API provides access to many open-source models, such as the Ll $code_llm_hyperbolic -Tip: For quick prototyping, the hyperbolic-gradio library makes it even easier to build chatbots on top of OpenAI models. +Tip: For quick prototyping, the hyperbolic-gradio library makes it even easier to build chatbots on top of Hyperbolic models. ## Anthropic's Claude diff --git a/js/_website/generate_jsons/src/docs/__init__.py b/js/_website/generate_jsons/src/docs/__init__.py index 20eba0e649f67..fc1d6d9d88dfd 100644 --- a/js/_website/generate_jsons/src/docs/__init__.py +++ b/js/_website/generate_jsons/src/docs/__init__.py @@ -3,6 +3,9 @@ import os import re import requests +import base64 +import urllib.parse + from gradio_client.documentation import document_cls, generate_documentation @@ -97,11 +100,34 @@ def add_guides(): add_guides() +def generate_playground_link(demo_name): + playground_url = "https://gradio.app/playground?demo=Blank" + with open(os.path.join(DEMOS_DIR, demo_name, "run.py")) as f: + demo_code = f.read() + encoded_code = base64.b64encode(demo_code.encode('utf-8')).decode('utf-8') + encoded_code_url = urllib.parse.quote(encoded_code, safe='') + playground_url += "&code=" + encoded_code_url + if "requirements.txt" in os.listdir(os.path.join(DEMOS_DIR, demo_name)): + with open(os.path.join(DEMOS_DIR, demo_name, "requirements.txt")) as f: + requirements = f.read() + if requirements: + encoded_reqs = base64.b64encode(requirements.encode('utf-8')).decode('utf-8') + encoded_reqs_url = urllib.parse.quote(encoded_reqs, safe='') + playground_url += "&reqs=" + encoded_reqs_url + return f"demo/{demo_name}" + + def escape_parameters(parameters): new_parameters = [] for param in parameters: param = param.copy() # Manipulating the list item directly causes issues, so copy it first param["doc"] = html.escape(param["doc"]) if param["doc"] else param["doc"] + if param["doc"] and "$demo/" in param["doc"]: + param["doc"] = re.sub( + r"\$demo/(\w+)", + lambda m: generate_playground_link(m.group(1)), + param["doc"], + ) new_parameters.append(param) assert len(new_parameters) == len(parameters) return new_parameters