From 3e4a86973c2bacfb847b02bc632248da93d0beb4 Mon Sep 17 00:00:00 2001 From: Lianmin Zheng Date: Thu, 21 Sep 2023 23:49:59 +0000 Subject: [PATCH] Show terms of use as an alert --- fastchat/serve/gradio_block_arena_anony.py | 32 ++------ fastchat/serve/gradio_block_arena_named.py | 32 ++------ fastchat/serve/gradio_web_server.py | 69 +++++++---------- fastchat/serve/gradio_web_server_multi.py | 90 ++++++---------------- fastchat/utils.py | 14 ++++ 5 files changed, 76 insertions(+), 161 deletions(-) diff --git a/fastchat/serve/gradio_block_arena_anony.py b/fastchat/serve/gradio_block_arena_anony.py index 2bdb9abbb..edd89d072 100644 --- a/fastchat/serve/gradio_block_arena_anony.py +++ b/fastchat/serve/gradio_block_arena_anony.py @@ -57,17 +57,7 @@ def load_demo_side_by_side_anony(models_, url_params): gr.Markdown.update(visible=True), ) - return ( - states - + selector_updates - + (gr.Chatbot.update(visible=True),) * num_sides - + ( - gr.Textbox.update(visible=True), - gr.Box.update(visible=True), - gr.Row.update(visible=True), - gr.Accordion.update(visible=True), - ) - ) + return states + selector_updates def vote_last_response(states, vote_type, model_selectors, request: gr.Request): @@ -388,9 +378,6 @@ def build_side_by_side_ui_anony(models): ### Leaderboard See [lmsys/chatbot-arena-leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) or the 4th tab above on this page. -### Terms of use -By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) license.** The demo works better on desktop devices with a wide screen. - ### Battle Please scroll down and start chatting. The models include both closed-source models (e.g., ChatGPT) and open-source models (e.g., Llama). """ @@ -407,7 +394,7 @@ def build_side_by_side_ui_anony(models): label = "Model A" if i == 0 else "Model B" with gr.Column(): chatbots[i] = gr.Chatbot( - label=label, elem_id=f"chatbot", visible=False, height=550 + label=label, elem_id=f"chatbot", height=550 ) with gr.Row(): @@ -432,19 +419,18 @@ def build_side_by_side_ui_anony(models): textbox = gr.Textbox( show_label=False, placeholder="Enter your prompt here and press ENTER", - visible=False, container=False, elem_id="input_box", ) with gr.Column(scale=1, min_width=50): - send_btn = gr.Button(value="Send", visible=False, variant="primary") + send_btn = gr.Button(value="Send", variant="primary") with gr.Row() as button_row: clear_btn = gr.Button(value="🗑️ Clear history", interactive=False) regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False) share_btn = gr.Button(value="📷 Share") - with gr.Accordion("Parameters", open=False, visible=True) as parameter_row: + with gr.Accordion("Parameters", open=False) as parameter_row: temperature = gr.Slider( minimum=0.0, maximum=1.0, @@ -560,12 +546,4 @@ def build_side_by_side_ui_anony(models): flash_buttons, [], btn_list ) - return ( - states, - model_selectors, - chatbots, - textbox, - send_btn, - button_row, - parameter_row, - ) + return states + model_selectors diff --git a/fastchat/serve/gradio_block_arena_named.py b/fastchat/serve/gradio_block_arena_named.py index 3da3b223a..29d3ba885 100644 --- a/fastchat/serve/gradio_block_arena_named.py +++ b/fastchat/serve/gradio_block_arena_named.py @@ -62,17 +62,7 @@ def load_demo_side_by_side_named(models, url_params): gr.Dropdown.update(choices=models, value=model_right, visible=True), ) - return ( - states - + selector_updates - + (gr.Chatbot.update(visible=True),) * num_sides - + ( - gr.Textbox.update(visible=True), - gr.Box.update(visible=True), - gr.Row.update(visible=True), - gr.Accordion.update(visible=True), - ) - ) + return states + selector_updates def vote_last_response(states, vote_type, model_selectors, request: gr.Request): @@ -313,9 +303,6 @@ def build_side_by_side_ui_named(models): - You can do multiple turns of conversations before voting. - Click "Clear history" to start a new round. -### Terms of use -By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) license.** The demo works better on desktop devices with a wide screen. - ### Choose two models to chat with (view [leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard)) """ @@ -345,7 +332,7 @@ def build_side_by_side_ui_named(models): label = "Model A" if i == 0 else "Model B" with gr.Column(): chatbots[i] = gr.Chatbot( - label=label, elem_id=f"chatbot", visible=False, height=550 + label=label, elem_id=f"chatbot", height=550 ) with gr.Row(): @@ -365,19 +352,18 @@ def build_side_by_side_ui_named(models): textbox = gr.Textbox( show_label=False, placeholder="Enter your prompt here and press ENTER", - visible=False, container=False, elem_id="input_box", ) with gr.Column(scale=1, min_width=50): - send_btn = gr.Button(value="Send", visible=False, variant="primary") + send_btn = gr.Button(value="Send", variant="primary") with gr.Row() as button_row: regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False) clear_btn = gr.Button(value="🗑️ Clear history", interactive=False) share_btn = gr.Button(value="📷 Share") - with gr.Accordion("Parameters", open=False, visible=True) as parameter_row: + with gr.Accordion("Parameters", open=False) as parameter_row: temperature = gr.Slider( minimum=0.0, maximum=1.0, @@ -495,12 +481,4 @@ def build_side_by_side_ui_named(models): flash_buttons, [], btn_list ) - return ( - states, - model_selectors, - chatbots, - textbox, - send_btn, - button_row, - parameter_row, - ) + return states + model_selectors diff --git a/fastchat/serve/gradio_web_server.py b/fastchat/serve/gradio_web_server.py index d77286655..2c5a54850 100644 --- a/fastchat/serve/gradio_web_server.py +++ b/fastchat/serve/gradio_web_server.py @@ -28,7 +28,7 @@ SESSION_EXPIRATION_TIME, ) from fastchat.model.model_adapter import get_conversation_template -from fastchat.model.model_registry import get_model_info +from fastchat.model.model_registry import get_model_info, model_info from fastchat.serve.api_provider import ( anthropic_api_stream_iter, openai_api_stream_iter, @@ -39,6 +39,7 @@ build_logger, violates_moderation, get_window_url_params_js, + get_window_url_params_with_tos_js, parse_gradio_auth_creds, ) @@ -163,15 +164,7 @@ def load_demo_single(models, url_params): ) state = None - return ( - state, - dropdown_update, - gr.Chatbot.update(visible=True), - gr.Textbox.update(visible=True), - gr.Button.update(visible=True), - gr.Row.update(visible=True), - gr.Accordion.update(visible=True), - ) + return [state, dropdown_update] def load_demo(url_params, request: gr.Request): @@ -560,9 +553,6 @@ def build_single_model_ui(models, add_promotion_links=False): # 🏔️ Chat with Open Large Language Models {promotion} -### Terms of use -By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) license.** - ### Choose a model to chat with """ @@ -582,7 +572,6 @@ def build_single_model_ui(models, add_promotion_links=False): chatbot = gr.Chatbot( elem_id="chatbot", label="Scroll down and start chatting", - visible=False, height=550, ) with gr.Row(): @@ -590,21 +579,20 @@ def build_single_model_ui(models, add_promotion_links=False): textbox = gr.Textbox( show_label=False, placeholder="Enter your prompt here and press ENTER", - visible=False, container=False, elem_id="input_box", ) with gr.Column(scale=1, min_width=50): - send_btn = gr.Button(value="Send", visible=False, variant="primary") + send_btn = gr.Button(value="Send", variant="primary") - with gr.Row(visible=False) as button_row: + with gr.Row() as button_row: upvote_btn = gr.Button(value="👍 Upvote", interactive=False) downvote_btn = gr.Button(value="👎 Downvote", interactive=False) flag_btn = gr.Button(value="⚠️ Flag", interactive=False) regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False) clear_btn = gr.Button(value="🗑️ Clear history", interactive=False) - with gr.Accordion("Parameters", open=False, visible=False) as parameter_row: + with gr.Accordion("Parameters", open=False) as parameter_row: temperature = gr.Slider( minimum=0.0, maximum=1.0, @@ -667,49 +655,42 @@ def build_single_model_ui(models, add_promotion_links=False): [state, chatbot] + btn_list, ) send_btn.click( - add_text, [state, model_selector, textbox], [state, chatbot, textbox] + btn_list + add_text, [state, model_selector, textbox], [state, chatbot, textbox] + btn_list, ).then( bot_response, [state, temperature, top_p, max_output_tokens], [state, chatbot] + btn_list, ) - return state, model_selector, chatbot, textbox, send_btn, button_row, parameter_row + return [state, model_selector] def build_demo(models): with gr.Blocks( title="Chat with Open Large Language Models", - theme=gr.themes.Base(), + theme=gr.themes.Default(), css=block_css, ) as demo: url_params = gr.JSON(visible=False) - ( - state, - model_selector, - chatbot, - textbox, - send_btn, - button_row, - parameter_row, - ) = build_single_model_ui(models) + state, model_selector = build_single_model_ui(models) if args.model_list_mode not in ["once", "reload"]: raise ValueError(f"Unknown model list mode: {args.model_list_mode}") + + if args.show_terms_of_use: + load_js = get_window_url_params_with_tos_js + else: + load_js = get_window_url_params_js + demo.load( load_demo, [url_params], [ state, model_selector, - chatbot, - textbox, - send_btn, - button_row, - parameter_row, ], - _js=get_window_url_params_js, + _js=load_js ) return demo @@ -722,29 +703,35 @@ def build_demo(models): parser.add_argument( "--share", action="store_true", - help="Whether to generate a public, shareable link.", + help="Whether to generate a public, shareable link", ) parser.add_argument( "--controller-url", type=str, default="http://localhost:21001", - help="The address of the controller.", + help="The address of the controller", ) parser.add_argument( "--concurrency-count", type=int, default=10, - help="The concurrency count of the gradio queue.", + help="The concurrency count of the gradio queue", ) parser.add_argument( "--model-list-mode", type=str, default="once", choices=["once", "reload"], - help="Whether to load the model list once or reload the model list every time.", + help="Whether to load the model list once or reload the model list every time", + ) + parser.add_argument( + "--moderate", action="store_true", + help="Enable content moderation to block unsafe inputs" ) parser.add_argument( - "--moderate", action="store_true", help="Enable content moderation" + "--show-terms-of-use", + action="store_true", + help="Shows term of use before loading the demo", ) parser.add_argument( "--add-chatgpt", diff --git a/fastchat/serve/gradio_web_server_multi.py b/fastchat/serve/gradio_web_server_multi.py index 97aecc66c..96ad60106 100644 --- a/fastchat/serve/gradio_web_server_multi.py +++ b/fastchat/serve/gradio_web_server_multi.py @@ -34,6 +34,7 @@ from fastchat.utils import ( build_logger, get_window_url_params_js, + get_window_url_params_with_tos_js, parse_gradio_auth_creds, ) @@ -105,68 +106,13 @@ def build_demo(models, elo_results_file, leaderboard_table_file): ) as demo: with gr.Tabs() as tabs: with gr.Tab("Chatbot Arena (battle)", id=0): - ( - b_states, - b_model_selectors, - b_chatbots, - b_textbox, - b_send_btn, - b_button_row, - b_parameter_row, - ) = build_side_by_side_ui_anony(models) - b_list = ( - b_states - + b_model_selectors - + b_chatbots - + [ - b_textbox, - b_send_btn, - b_button_row, - b_parameter_row, - ] - ) + side_by_side_anony_list = build_side_by_side_ui_anony(models) with gr.Tab("Chatbot Arena (side-by-side)", id=1): - ( - c_states, - c_model_selectors, - c_chatbots, - c_textbox, - c_send_btn, - c_button_row, - c_parameter_row, - ) = build_side_by_side_ui_named(models) - c_list = ( - c_states - + c_model_selectors - + c_chatbots - + [ - c_textbox, - c_send_btn, - c_button_row, - c_parameter_row, - ] - ) + side_by_side_named_list = build_side_by_side_ui_named(models) with gr.Tab("Single Model", id=2): - ( - a_state, - a_model_selector, - a_chatbot, - a_textbox, - a_send_btn, - a_button_row, - a_parameter_row, - ) = build_single_model_ui(models, add_promotion_links=True) - a_list = [ - a_state, - a_model_selector, - a_chatbot, - a_textbox, - a_send_btn, - a_button_row, - a_parameter_row, - ] + single_model_list = build_single_model_ui(models, add_promotion_links=True) if elo_results_file: with gr.Tab("Leaderboard", id=3): @@ -176,11 +122,17 @@ def build_demo(models, elo_results_file, leaderboard_table_file): if args.model_list_mode not in ["once", "reload"]: raise ValueError(f"Unknown model list mode: {args.model_list_mode}") + + if args.show_terms_of_use: + load_js = get_window_url_params_with_tos_js + else: + load_js = get_window_url_params_js + demo.load( load_demo, [url_params], - [tabs] + a_list + b_list + c_list, - _js=get_window_url_params_js, + [tabs] + single_model_list + side_by_side_anony_list + side_by_side_named_list, + _js=load_js, ) return demo @@ -193,19 +145,19 @@ def build_demo(models, elo_results_file, leaderboard_table_file): parser.add_argument( "--share", action="store_true", - help="Whether to generate a public, shareable link.", + help="Whether to generate a public, shareable link", ) parser.add_argument( "--controller-url", type=str, default="http://localhost:21001", - help="The address of the controller.", + help="The address of the controller", ) parser.add_argument( "--concurrency-count", type=int, default=10, - help="The concurrency count of the gradio queue.", + help="The concurrency count of the gradio queue", ) parser.add_argument( "--model-list-mode", @@ -215,7 +167,13 @@ def build_demo(models, elo_results_file, leaderboard_table_file): help="Whether to load the model list once or reload the model list every time.", ) parser.add_argument( - "--moderate", action="store_true", help="Enable content moderation" + "--moderate", action="store_true", + help="Enable content moderation to block unsafe inputs" + ) + parser.add_argument( + "--show-terms-of-use", + action="store_true", + help="Shows term of use before loading the demo", ) parser.add_argument( "--add-chatgpt", @@ -248,8 +206,8 @@ def build_demo(models, elo_results_file, leaderboard_table_file): help='Set the gradio authentication file path. The file should contain one or more user:password pairs in this format: "u1:p1,u2:p2,u3:p3"', default=None, ) - parser.add_argument("--elo-results-file", type=str) - parser.add_argument("--leaderboard-table-file", type=str) + parser.add_argument("--elo-results-file", type=str, help="Load leaderboard results and plots") + parser.add_argument("--leaderboard-table-file", type=str, help="Load leaderboard results and plots") args = parser.parse_args() logger.info(f"args: {args}") diff --git a/fastchat/utils.py b/fastchat/utils.py index e2d3a6ac6..947d8b687 100644 --- a/fastchat/utils.py +++ b/fastchat/utils.py @@ -201,6 +201,20 @@ def pretty_print_semaphore(semaphore): """ +get_window_url_params_with_tos_js = """ +function() { + const params = new URLSearchParams(window.location.search); + url_params = Object.fromEntries(params); + console.log("url_params", url_params); + + msg = "Users of this website are required to agree to the following terms:\\nThe service is a research preview. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes.\\nThe service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) license." + alert(msg); + + return url_params; + } +""" + + def iter_over_async( async_gen: AsyncGenerator, event_loop: AbstractEventLoop ) -> Generator: