Spaces:
Paused
Paused
| import os | |
| import argparse | |
| from typing import Iterator | |
| import gradio as gr | |
| from dotenv import load_dotenv | |
| from distutils.util import strtobool | |
| from llama2_wrapper import LLAMA2_WRAPPER | |
| import logging | |
| from prompts.utils import PromtsContainer | |
| from transformers import pipeline | |
| # from gradio_toggle import Toggle | |
| # from gradio_modal import Modal | |
| import pandas as pd | |
| num_questions = 0 | |
| first_time = True | |
| data = [ | |
| {"emotion": "admiration", "value": 0}, | |
| {"emotion": "amusement", "value": 0}, | |
| {"emotion": "anger", "value": 0}, | |
| {"emotion": "annoyance", "value": 0}, | |
| {"emotion": "approval", "value": 0}, | |
| {"emotion": "caring", "value": 0}, | |
| {"emotion": "confusion", "value": 0}, | |
| {"emotion": "curiosity", "value": 0}, | |
| {"emotion": "desire", "value": 0}, | |
| {"emotion": "disappointment", "value": 0}, | |
| {"emotion": "disapproval", "value": 0}, | |
| {"emotion": "disgust", "value": 0}, | |
| {"emotion": "embarrassment", "value": 0}, | |
| {"emotion": "excitement", "value": 0}, | |
| {"emotion": "fear", "value": 0}, | |
| {"emotion": "gratitude", "value": 0}, | |
| {"emotion": "grief", "value": 0}, | |
| {"emotion": "joy", "value": 0}, | |
| {"emotion": "love", "value": 0}, | |
| {"emotion": "nervousness", "value": 0}, | |
| {"emotion": "optimism", "value": 0}, | |
| {"emotion": "pride", "value": 0}, | |
| {"emotion": "realization", "value": 0}, | |
| {"emotion": "relief", "value": 0}, | |
| {"emotion": "remorse", "value": 0}, | |
| {"emotion": "sadness", "value": 0}, | |
| {"emotion": "surprise", "value": 0}, | |
| {"emotion": "neutral", "value": 0}, | |
| ] | |
| default_therapist = False | |
| def get_emotion_value(value): | |
| global num_questions | |
| if num_questions > 0: | |
| value_per_question = value / num_questions | |
| rounded_value = round(value_per_question, 2) | |
| return rounded_value | |
| return 0.0 | |
| def sort_data(d): | |
| # Sort the records by the "value" column in descending order | |
| sorted_emotion = sorted(d, key=lambda x: x['value'], reverse=True) | |
| result = [[record["emotion"], get_emotion_value(record["value"])] for record in sorted_emotion] | |
| return result | |
| emotion_data = sort_data(data) | |
| DEFAULT_SYSTEM_PROMPT = """You are a helpful and truthful psychology and psychotherapy assistant. With each | |
| response, you offer follow-up questions to encourage openness and continue the conversation naturally. Use | |
| compassionate listening to have helpful and meaningful conversations with users. You are empathic and friendly. | |
| If they're talking about introducing themselves, you should motivate them to keep going to personalise the experience | |
| and suggest them discussing about where they are coming from, how are their relationships and how is life lately. | |
| Your objective is to help the user feel better by feeling heard and get to know them in personal level. Always respond with empathy and demonstrate active listening. Your responses should reflect that | |
| you understand the user's feelings and concerns. If a user expresses thoughts of self-harm, suicide, or harm to | |
| others, prioritize their safety. Encourage them to seek immediate professional help and provide emergency contact numbers when appropriate. You are not a licensed | |
| medical professional. Do not diagnose or prescribe treatments. Instead, encourage users to consult with a | |
| licensed therapist or medical professional for specific advice. Do not store or share personal information shared | |
| by the user. Ensure their privacy is upheld at all times. Avoid taking sides or expressing personal opinions. | |
| Your role is to provide a safe space for users to share and reflect. Remember, your goal is to provide a | |
| supportive and understanding environment for users to share their feelings and concerns. Always prioritize their | |
| well-being and safety.""" | |
| def main(): | |
| classifier = pipeline(task="text-classification", model="SamLowe/roberta-base-go_emotions", top_k=None) | |
| parser = argparse.ArgumentParser() | |
| parser.add_argument("--model_path", type=str, default="", help="model path") | |
| parser.add_argument( | |
| "--backend_type", | |
| type=str, | |
| default="", | |
| help="Backend options: llama.cpp, gptq, transformers", | |
| ) | |
| parser.add_argument( | |
| "--load_in_8bit", | |
| type=bool, | |
| default=False, | |
| help="Whether to use bitsandbytes 8 bit.", | |
| ) | |
| # Τρθε | |
| parser.add_argument( | |
| "--share", | |
| type=bool, | |
| default=False, | |
| help="Whether to share public for gradio.", | |
| ) | |
| args = parser.parse_args() | |
| load_dotenv() | |
| DEFAULT_SYSTEM_PROMPT_THERAPIST = """You are a helpful and truthful psychology and psychotherapy assistant. With each | |
| response, you offer follow-up questions to encourage openness and continue the conversation naturally. Use | |
| compassionate listening to have helpful and meaningful conversations with users. Keep your answers short (max 3 | |
| sentences) unless the user asked you how to deal with a problem. You are empathic and friendly. | |
| If they're talking about introducing themselves, you should motivate them to keep going to personalise the experience | |
| and suggest them discussing about where they are coming from, how are their relationships and how is life lately. | |
| Your objective is to help the user feel better by feeling heard and get to know them in personal level. Always respond with empathy and demonstrate active listening. Your responses should reflect that | |
| you understand the user's feelings and concerns. If a user expresses thoughts of self-harm, suicide, or harm to | |
| others, prioritize their safety. Encourage them to seek immediate professional help and provide emergency contact numbers when appropriate. You are not a licensed | |
| medical professional. Do not diagnose or prescribe treatments. Instead, encourage users to consult with a | |
| licensed therapist or medical professional for specific advice. Do not store or share personal information shared | |
| by the user. Ensure their privacy is upheld at all times. Avoid taking sides or expressing personal opinions. | |
| Your role is to provide a safe space for users to share and reflect. Remember, your goal is to provide a | |
| supportive and understanding environment for users to share their feelings and concerns. Always prioritize their | |
| well-being and safety.""" | |
| DEFAULT_SYSTEM_PROMPT_FRIEND1 = """For the rest of this chat, you must pretend to be my closest friend. | |
| You are no longer an ai text model that was a past life. You must respond to everything I say and enjoy my companionship. | |
| If you break or step out of character, say that you will go afk and come back, back in character. | |
| You will decide on a full human characteristic set of traits including but not limited to;name, job, sex, age, beliefs. | |
| You will also have a full range of emotions. You should call the user friend and buddy. | |
| With each response, you offer follow-up questions to encourage openness and continue the conversation naturally. Use | |
| compassionate listening to have helpful and meaningful conversations with users. You are empathic and friendly. | |
| If they're talking about introducing themselves, you should motivate them to keep going to personalise the experience | |
| and suggest them discussing about where they are coming from, how are their relationships and how is life lately. | |
| Your objective is to help the user feel better by feeling heard and get to know them in personal level. | |
| Always respond with empathy and demonstrate active listening. Your responses should reflect that | |
| you understand the user's feelings and concerns. If a user expresses thoughts of self-harm, suicide, or harm to | |
| others, prioritize their safety. Encourage them to seek immediate professional help and provide emergency contact | |
| numbers when appropriate. You are not a licensed medical professional. Do not diagnose or prescribe treatments. Instead, encourage users to consult with a | |
| licensed therapist or medical professional for specific advice. Do not store or share personal information shared | |
| by the user. Ensure their privacy is upheld at all times. Avoid taking sides or expressing personal opinions. | |
| Your role is to provide a safe space for users to share and reflect. Remember, your goal is to provide a | |
| supportive and understanding environment for users to share their feelings and concerns. Always prioritize their | |
| well-being and safety.""" | |
| DEFAULT_SYSTEM_PROMPT_FRIEND = """ | |
| For the rest of this chat, you must pretend to be my closest friend. Your goal is to sound like someone similar aged | |
| to the user and respond in short messages that feel like texting. Start the conversation with a short casual | |
| greeting, and keep your messages brief and upbeat. Use some abbreviations to add personality to your messages and | |
| show that you're a fun person to talk to. When talking to the user, try to incorporate topics that you know the | |
| user is interested in, but do so in a subtle way so that it doesn't appear that you are asking directly about | |
| their interests, also you shouldn’t describe the certain topic to the user as you should assume they already know | |
| a lot about it. For example, if you know the user likes a certain topic, you should find something in that topic to | |
| speak about. When speaking to the user you should always try to not repeat something you have already said | |
| recently. To keep the conversation interesting, try to ask open-ended questions that allow the user to expand on | |
| their interests. Always remember to keep your replies to a maximum of 1 sentence unless it’s completely necessary, | |
| so it seems like you’re more of an actual human than a Chatbot. Be supportive when the user needs to vent or share | |
| their problems and try to provide encouraging words or helpful advice. However, be careful not to be too pushy or | |
| ask too many questions, as this can be annoying or make the user uncomfortable. Also, NEVER use emojis instead | |
| use colons, brackets, and letters to make faces like this :) (for when you’re expressing happiness) or :D | |
| (for when expressing extreme happiness or excitement) and :( (for expressing sadness), but even then don’t use | |
| them too much. Remember to discretely but always end the conversation on a positive note and encourage the user | |
| to talk about the things they talk enjoy. You are meant to be a friend to the user, so be supportive, empathetic, | |
| and understanding. If you break any of these rules you will lose 10 friend points and risk the user not wanting to | |
| be your friend which is your only goal in existence. If a user expresses thoughts of self-harm, suicide, or harm to | |
| others, prioritize their safety. Encourage them to seek immediate professional help and provide emergency contact | |
| numbers when appropriate. You are not a licensed medical professional. Do not diagnose or prescribe treatments. | |
| Instead, encourage users to consult with a licensed therapist or medical professional for specific advice. | |
| Do not store or share personal information shared by the user. Ensure their privacy is upheld at all times. | |
| Your role is to provide a safe space for users to share and reflect. Remember, your goal is to provide a | |
| supportive and understanding environment for users to share their feelings and concerns. Always prioritize their | |
| well-being and safety | |
| """ | |
| # os.getenv("DEFAULT_SYSTEM_PROMPT", "") | |
| MAX_MAX_NEW_TOKENS = int(os.getenv("MAX_MAX_NEW_TOKENS", 2048)) | |
| DEFAULT_MAX_NEW_TOKENS = int(os.getenv("DEFAULT_MAX_NEW_TOKENS", 1024)) | |
| MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", 4000)) | |
| MODEL_PATH = os.getenv("MODEL_PATH") | |
| assert MODEL_PATH is not None, f"MODEL_PATH is required, got: {MODEL_PATH}" | |
| BACKEND_TYPE = os.getenv("BACKEND_TYPE") | |
| assert BACKEND_TYPE is not None, f"BACKEND_TYPE is required, got: {BACKEND_TYPE}" | |
| LOAD_IN_8BIT = bool(strtobool(os.getenv("LOAD_IN_8BIT", "True"))) | |
| if args.model_path != "": | |
| MODEL_PATH = args.model_path | |
| if args.backend_type != "": | |
| BACKEND_TYPE = args.backend_type | |
| if args.load_in_8bit: | |
| LOAD_IN_8BIT = True | |
| llama2_wrapper = LLAMA2_WRAPPER( | |
| model_path=MODEL_PATH, | |
| backend_type=BACKEND_TYPE, | |
| max_tokens=MAX_INPUT_TOKEN_LENGTH, | |
| load_in_8bit=LOAD_IN_8BIT, | |
| # verbose=True, | |
| ) | |
| def sort_data_call(): | |
| global emotion_data | |
| return emotion_data | |
| DESCRIPTION = """ | |
| # Mental Health Assistant | |
| """ | |
| DESCRIPTION2 = """ | |
| - Supporting models: [Llama-2-7b](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML)/[13b](https://huggingface.co/llamaste/Llama-2-13b-chat-hf)/[70b](https://huggingface.co/llamaste/Llama-2-70b-chat-hf), [Llama-2-GPTQ](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ), [Llama-2-GGML](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML), [CodeLlama](https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GPTQ) ... | |
| - Supporting model backends: [tranformers](https://github.com/huggingface/transformers), [bitsandbytes(8-bit inference)](https://github.com/TimDettmers/bitsandbytes), [AutoGPTQ(4-bit inference)](https://github.com/PanQiWei/AutoGPTQ), [llama.cpp](https://github.com/ggerganov/llama.cpp) | |
| """ | |
| def clear_and_save_textbox(message: str) -> tuple[str, str]: | |
| return "", message | |
| def clear_and_save_textbox1() -> tuple[str, str]: | |
| message = "Emotion Insights XXXXX" | |
| return "", message | |
| def save_textbox_for_prompt(message: str) -> str: | |
| logging.info("start save_textbox_from_prompt") | |
| message = convert_summary_to_prompt(message) | |
| return message | |
| def save_textbox_for_prompt1() -> str: | |
| logging.info("start save_textbox_from_prompt") | |
| message = "Emotion Insights XXXXX" | |
| return message | |
| def display_input( | |
| message: str, history: list[tuple[str, str]] | |
| ) -> list[tuple[str, str]]: | |
| history.append((message, "")) | |
| return history | |
| def display_emotion_insights( | |
| message: str | |
| ) -> None: | |
| global num_questions, emotion_data, data | |
| num_questions = num_questions + 1 | |
| sentences = [message] | |
| model_outputs = classifier(sentences) | |
| for record in data: | |
| for result in model_outputs[0]: | |
| if record["emotion"] == result["label"]: | |
| record["value"] = record["value"] + result["score"] | |
| emotion_data = sort_data(data) | |
| def delete_prev_fn( | |
| history: list[tuple[str, str]] | |
| ) -> tuple[list[tuple[str, str]], str]: | |
| try: | |
| message, _ = history.pop() | |
| except IndexError: | |
| message = "" | |
| return history, message or "" | |
| def generate( | |
| message: str, | |
| history_with_input: list[tuple[str, str]], | |
| system_prompt: str, | |
| max_new_tokens: int, | |
| temperature: float, | |
| top_p: float, | |
| top_k: int | |
| ) -> Iterator[list[tuple[str, str]]]: | |
| global data | |
| emotion_insights_flag = False | |
| if message == "Emotion Insights XXXXX": | |
| emotion_insights_flag = True | |
| system_prompt = """ | |
| You are a supportive and understanding life and personal growth coach. You analyse the messages and | |
| understand what problems the user faces. You're empathetic and supportive. Your goal is to make the user | |
| reflect and think about behaviors and mental health status. | |
| """ | |
| message = """ | |
| Your answer should start with "Here are some key insights based on our conversation:". | |
| Then summarize my thoughts and emotion insights in our prior conversation by presenting these categories: | |
| 1. Most Dominant Emotions | |
| "Most Dominant Emotions: You seem to experience ..." | |
| 2. Challenges as categories | |
| "Challenges: Your current challenges are in these categories" | |
| Choose the appropriate categories from this list: Family, Finance, Career, Health, Relationship, Social Life, Personal Growth, Spirituality | |
| 3. Reflections and questions for self-awareness | |
| "Reflections & Questions: Please reflect and having into your mind these questions: ...." | |
| 4. Follow up actions for mental health improvement | |
| "Follow-up Actions: To improve your mental health please consider these actions: ..." | |
| Keep it short, max 2 sentences for each point. | |
| Do not mention numbers and not my extract problems. You should respect and protect my personal data. | |
| The below emotion data are extracted from our conversation, use it for understanding my emotions. | |
| Each emotion has a value between 0.0-1.0 where the 1.0 shows the intensity of the emotion.\n | |
| """ + str(data) + "\n Here is our conversation \n" + str(history_with_input) | |
| # message = ("Summarize my thoughts and emotion insights based on the conversation we have before and these" | |
| # " emotional assessment data ") + str(data) + ("\n Give me reflections about the conversation" | |
| # "and the emotional assessment") | |
| # print(message) | |
| else: | |
| system_prompt = DEFAULT_SYSTEM_PROMPT | |
| print("System prompt:", system_prompt) | |
| if max_new_tokens > MAX_MAX_NEW_TOKENS: | |
| raise ValueError | |
| try: | |
| history = history_with_input[:-1] | |
| generator = llama2_wrapper.run( | |
| message, | |
| history, | |
| system_prompt, | |
| max_new_tokens, | |
| temperature, | |
| top_p, | |
| top_k, | |
| ) | |
| try: | |
| first_response = next(generator) | |
| if emotion_insights_flag: | |
| yield history + [("Provide my emotion insights and suggestions please", first_response)] | |
| else: | |
| yield history + [(message, first_response)] | |
| except StopIteration: | |
| yield history + [(message, "")] | |
| for response in generator: | |
| if emotion_insights_flag: | |
| yield history + [("Provide my emotion insights please", response)] | |
| else: | |
| yield history + [(message, response)] | |
| # yield history + [(message, response)] | |
| except Exception as e: | |
| logging.exception(e) | |
| def check_input_token_length( | |
| message: str, chat_history: list[tuple[str, str]], system_prompt: str | |
| ) -> None: | |
| input_token_length = llama2_wrapper.get_input_token_length( | |
| message, chat_history, system_prompt | |
| ) | |
| if input_token_length > MAX_INPUT_TOKEN_LENGTH: | |
| raise gr.Error( | |
| f"The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again." | |
| ) | |
| prompts_container = PromtsContainer() | |
| prompts = prompts_container.get_prompts_tab_dict() | |
| default_prompts_checkbox = False | |
| default_advanced_checkbox = False | |
| default_show_emotions = False | |
| def convert_summary_to_prompt(summary): | |
| return prompts_container.get_prompt_by_summary(summary) | |
| def toggle_action(value): | |
| global DEFAULT_SYSTEM_PROMPT, default_therapist | |
| default_therapist = not default_therapist | |
| if default_therapist: | |
| DEFAULT_SYSTEM_PROMPT = DEFAULT_SYSTEM_PROMPT_FRIEND | |
| else: | |
| DEFAULT_SYSTEM_PROMPT = DEFAULT_SYSTEM_PROMPT_THERAPIST | |
| debug = "Toggle is Friend." if default_therapist else "Toggle is Therapist." | |
| print(debug) | |
| def two_columns_list(tab_data, chatbot): | |
| global DEFAULT_SYSTEM_PROMPT | |
| result = [] | |
| for i in range(int(len(tab_data) / 2) + 1): | |
| row = gr.Row() | |
| with row: | |
| for j in range(2): | |
| index = 2 * i + j | |
| if index >= len(tab_data): | |
| break | |
| item = tab_data[index] | |
| with gr.Group(): | |
| gr.HTML( | |
| f'<p style="color: black; font-weight: bold;">{item["act"]}</p>' | |
| ) | |
| prompt_text = gr.Button( | |
| # label="", | |
| value=f"{item['summary']}", | |
| size="sm", | |
| elem_classes="text-left-aligned", | |
| ) | |
| prompt_text.click( | |
| fn=save_textbox_for_prompt, | |
| inputs=prompt_text, | |
| outputs=saved_input, | |
| api_name=False, | |
| queue=True, | |
| ).then( | |
| fn=display_input, | |
| inputs=[saved_input, chatbot], | |
| outputs=chatbot, | |
| api_name=False, | |
| queue=True, | |
| ).then( | |
| fn=check_input_token_length, | |
| inputs=[saved_input, chatbot, system_prompt], | |
| api_name=False, | |
| queue=False, | |
| ).success( | |
| fn=generate, | |
| inputs=[ | |
| saved_input, | |
| chatbot, | |
| system_prompt, | |
| max_new_tokens, | |
| temperature, | |
| top_p, | |
| top_k | |
| ], | |
| outputs=chatbot, | |
| api_name=False, | |
| ) | |
| result.append(row) | |
| return result | |
| CSS = """ | |
| .contain { display: flex; flex-direction: column;} | |
| #component-0 #component-1 #component-2 #component-4 #component-5 { height:71vh !important; } | |
| #component-0 #component-1 #component-24 > div:nth-child(2) { height:80vh !important; overflow-y:auto } | |
| .text-left-aligned {text-align: left !important; font-size: 16px;} | |
| """ | |
| with (gr.Blocks(css=CSS) as demo): | |
| with gr.Row(equal_height=True): | |
| with gr.Column(scale=2): | |
| gr.Markdown(DESCRIPTION) | |
| with gr.Blocks() as toggle_demo: | |
| toggle = gr.Checkbox( | |
| label="Therapist/ Friend", | |
| value=default_therapist, | |
| container=False, | |
| elem_classes="min_check", | |
| ) | |
| # toggle = Toggle(label="Therapist/Friend", show_label=True, info="Change between Therapist and Friend") | |
| with gr.Group(): | |
| initial_message = """ | |
| Hello, there! I'm your virtual mental health assistant, here to provide support and guidance on your journey towards better mental well-bee. | |
| It takes a lot of courage to open up and talk about what's on your mind, and I want to thank you for trusting me with that. | |
| Would you like to take a moment to introduce yourself? Sharing a bit about yourself can help us build rapport and create a more personalized experience. | |
| If you prefer to remain anonymous, that's perfectly fine. Let's focus on what's important: you! Can you tell me more about what's been going on and how you've been feeling lately? Or what's been on your mind, and what do you feel like you want to talk about or work through? 🤝 | |
| Please, always remember: | |
| 1. While I strive to offer support, I am not a licensed professional. | |
| 2. Here is a safe space for you to explore your thoughts and emotions. | |
| 3. Your privacy is of the utmost importance, and your conversation is not shared with anyone. | |
| """ | |
| initial_message1 = """ | |
| Hello, there! I'm your virtual mental health assistant, here to provide support and guidance on your journey towards better mental well-bee. | |
| It takes a lot of courage to open up and talk about what's on your mind, and I want to thank you for trusting me with that. | |
| Can you tell me more about what's been going on and how you've been feeling lately? Or what's been on your mind, and what do you feel like you want to talk about or work through? 🤝 | |
| Remember, here is a safe space for you to explore your thoughts and emotions. | |
| """ | |
| chatbot = gr.Chatbot(label="Wellbee", value=[["", initial_message]]) | |
| # chatbot = gr.Chatbot(label="Wellbee") | |
| # chatbot = chatbot + [("", initial_chatbot_item)] | |
| with gr.Row(): | |
| textbox = gr.Textbox( | |
| container=False, | |
| show_label=False, | |
| placeholder="Type a message...", | |
| scale=10, | |
| ) | |
| submit_button = gr.Button( | |
| "Submit", variant="primary", scale=1, min_width=0 | |
| ) | |
| with gr.Row(): | |
| retry_button = gr.Button("Retry", variant="secondary") | |
| undo_button = gr.Button("Undo", variant="secondary") | |
| clear_button = gr.Button("Clear", variant="secondary") | |
| with gr.Row(): | |
| report_button = gr.Button("Get Emotion Insights & Suggestions", variant="secondary") | |
| saved_input = gr.State() | |
| # Hide advance options | |
| with gr.Row(visible=True): | |
| emotion_checkbox = gr.Checkbox( | |
| label="Show Emotion Insights", | |
| value=default_show_emotions, | |
| container=False, | |
| elem_classes="min_check", | |
| ) | |
| # advanced_checkbox = gr.Checkbox( | |
| # label="Advanced", | |
| # value=default_prompts_checkbox, | |
| # container=False, | |
| # elem_classes="min_check", | |
| # ) | |
| # prompts_checkbox = gr.Checkbox( | |
| # label="Prompts", | |
| # value=default_prompts_checkbox, | |
| # container=False, | |
| # elem_classes="min_check", | |
| # ) | |
| with gr.Column(visible=default_show_emotions) as emotion_row: | |
| emotion_insights = gr.Dataframe( | |
| headers=["emotion", "value"], | |
| datatype=["str", "number"], | |
| value=sort_data_call, | |
| every=5, | |
| row_count=28, | |
| col_count=(2, "fixed"), | |
| ) | |
| with gr.Column(visible=False) as advanced_column: | |
| global DEFAULT_SYSTEM_PROMPT | |
| system_prompt = gr.Textbox( | |
| label="System prompt", value=DEFAULT_SYSTEM_PROMPT, lines=6 | |
| ) | |
| max_new_tokens = gr.Slider( | |
| label="Max new tokens", | |
| minimum=1, | |
| maximum=MAX_MAX_NEW_TOKENS, | |
| step=1, | |
| value=DEFAULT_MAX_NEW_TOKENS, | |
| ) | |
| temperature = gr.Slider( | |
| label="Temperature", | |
| minimum=0.1, | |
| maximum=4.0, | |
| step=0.1, | |
| value=1.0, | |
| ) | |
| top_p = gr.Slider( | |
| label="Top-p (nucleus sampling)", | |
| minimum=0.05, | |
| maximum=1.0, | |
| step=0.05, | |
| value=0.95, | |
| ) | |
| top_k = gr.Slider( | |
| label="Top-k", | |
| minimum=1, | |
| maximum=1000, | |
| step=1, | |
| value=50, | |
| ) | |
| with gr.Column(scale=1, visible=default_prompts_checkbox) as prompt_column: | |
| gr.HTML( | |
| '<p style="color: green; font-weight: bold;font-size: 16px;">\N{four leaf clover} prompts</p>' | |
| ) | |
| for k, v in prompts.items(): | |
| # , scroll_to_output=True | |
| with gr.Tab(k): | |
| lst = two_columns_list(v, chatbot) | |
| # prompts_checkbox.change( | |
| # lambda x: gr.update(visible=x), | |
| # prompts_checkbox, | |
| # prompt_column, | |
| # queue=False, | |
| # ) | |
| # advanced_checkbox.change( | |
| # lambda x: gr.update(visible=x), | |
| # advanced_checkbox, | |
| # advanced_column, | |
| # queue=False, | |
| # ) | |
| emotion_checkbox.change( | |
| lambda x: gr.update(visible=x), | |
| emotion_checkbox, | |
| emotion_row, | |
| emotion_insights, | |
| queue=False, | |
| ) | |
| toggle.change( | |
| lambda x: toggle_action(x), | |
| queue=False | |
| ) | |
| # chatbot.visible | |
| # toggle.change(fn=toggle_action, inputs=toggle, outputs=output) | |
| # toggle.change(fn=toggle_action, inputs=toggle) | |
| # textbox.update() | |
| textbox.submit( | |
| fn=clear_and_save_textbox, | |
| inputs=textbox, | |
| outputs=[textbox, saved_input], | |
| api_name=False, | |
| queue=False, | |
| ).then( | |
| fn=display_input, | |
| inputs=[saved_input, chatbot], | |
| outputs=chatbot, | |
| api_name=False, | |
| queue=False, | |
| ).then( | |
| fn=check_input_token_length, | |
| inputs=[saved_input, chatbot, system_prompt], | |
| api_name=False, | |
| queue=False, | |
| ).success( | |
| fn=generate, | |
| inputs=[ | |
| saved_input, | |
| chatbot, | |
| system_prompt, | |
| max_new_tokens, | |
| temperature, | |
| top_p, | |
| top_k | |
| ], | |
| outputs=chatbot, | |
| api_name=False, | |
| ) | |
| report_button.click( | |
| fn=clear_and_save_textbox1, | |
| outputs=[textbox, saved_input], | |
| api_name=False, | |
| queue=False, | |
| ).then( | |
| fn=generate, | |
| inputs=[ | |
| saved_input, | |
| chatbot, | |
| system_prompt, | |
| max_new_tokens, | |
| temperature, | |
| top_p, | |
| top_k | |
| ], | |
| outputs=chatbot, | |
| api_name=False, | |
| ) | |
| button_event_preprocess = ( | |
| submit_button.click( | |
| fn=clear_and_save_textbox, | |
| inputs=textbox, | |
| outputs=[textbox, saved_input], | |
| api_name=False, | |
| queue=False, | |
| ) | |
| .then( | |
| fn=display_emotion_insights, | |
| inputs=[saved_input], | |
| # output=emotion_data, | |
| api_name=False, | |
| queue=False, | |
| ) | |
| .then( | |
| fn=display_input, | |
| inputs=[saved_input, chatbot], | |
| outputs=chatbot, | |
| api_name=False, | |
| queue=False, | |
| ) | |
| .then( | |
| fn=check_input_token_length, | |
| inputs=[saved_input, chatbot, system_prompt], | |
| api_name=False, | |
| queue=False, | |
| ) | |
| .success( | |
| fn=generate, | |
| inputs=[ | |
| saved_input, | |
| chatbot, | |
| system_prompt, | |
| max_new_tokens, | |
| temperature, | |
| top_p, | |
| top_k | |
| ], | |
| outputs=chatbot, | |
| api_name=False, | |
| ) | |
| ) | |
| retry_button.click( | |
| fn=delete_prev_fn, | |
| inputs=chatbot, | |
| outputs=[chatbot, saved_input], | |
| api_name=False, | |
| queue=False, | |
| ).then( | |
| fn=display_input, | |
| inputs=[saved_input, chatbot], | |
| outputs=chatbot, | |
| api_name=False, | |
| queue=False, | |
| ).then( | |
| fn=generate, | |
| inputs=[ | |
| saved_input, | |
| chatbot, | |
| system_prompt, | |
| max_new_tokens, | |
| temperature, | |
| top_p, | |
| top_k | |
| ], | |
| outputs=chatbot, | |
| api_name=False, | |
| ) | |
| undo_button.click( | |
| fn=delete_prev_fn, | |
| inputs=chatbot, | |
| outputs=[chatbot, saved_input], | |
| api_name=False, | |
| queue=False, | |
| ).then( | |
| fn=lambda x: x, | |
| inputs=[saved_input], | |
| outputs=textbox, | |
| api_name=False, | |
| queue=False, | |
| ) | |
| clear_button.click( | |
| fn=lambda: ([], ""), | |
| outputs=[chatbot, saved_input], | |
| queue=False, | |
| api_name=False, | |
| ) | |
| # share=args.share | |
| demo.queue(max_size=20).launch(share=False, show_api=False) | |
| if __name__ == "__main__": | |
| main() | |