Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import yaml | |
| import os | |
| import datetime | |
| from llm import AVAILABLE_LLMS, create_client, get_response_from_llm, get_batch_responses_from_llm | |
| from character import load_character_config, get_character_response, build_prompt, build_system_prompt | |
| # Directory containing character YAML files | |
| CHARACTER_DIR = "characters" | |
| # Directory to save conversations | |
| CONVERSATION_DIR = "conversations" | |
| def list_character_files(): | |
| """List all YAML files in the character directory.""" | |
| return [f for f in os.listdir(CHARACTER_DIR) if f.endswith('.yaml')] | |
| def load_all_characters(): | |
| """Load all character configurations from YAML files.""" | |
| characters = {} | |
| character_files = list_character_files() | |
| for filename in character_files: | |
| filepath = os.path.join(CHARACTER_DIR, filename) | |
| config = load_character_config(filepath) | |
| character_name = config['character']['name'] | |
| characters[character_name] = {'config': config, 'file': filepath} | |
| return characters | |
| # Load all characters | |
| all_characters = load_all_characters() | |
| class ChatApp: | |
| """A class to encapsulate the chat application logic.""" | |
| def __init__(self): | |
| self.available_llms = AVAILABLE_LLMS | |
| self.api_key_dict = {} # To store API keys for different LLMs | |
| self.all_characters = all_characters | |
| self.character_config = None | |
| self.load_default_character() | |
| # Ensure the conversation directory exists | |
| if not os.path.exists(CONVERSATION_DIR): | |
| os.makedirs(CONVERSATION_DIR) | |
| def load_default_character(self): | |
| """Load the default character configuration.""" | |
| if self.all_characters: | |
| default_character_name = list(self.all_characters.keys())[0] | |
| self.character_config = self.all_characters[default_character_name]['config'] | |
| else: | |
| self.character_config = None | |
| def set_api_key(self, api_key, selected_llm): | |
| """Set the API key based on the selected LLM.""" | |
| api_key = api_key.strip() | |
| self.api_key_dict[selected_llm] = api_key # Store the API key | |
| return gr.update(value='', placeholder='API Key Set!') | |
| def select_character(self, character_name): | |
| """Update the selected character.""" | |
| if character_name == "New Character": | |
| # Initialize a new character configuration with empty fields | |
| self.character_config = { | |
| 'character': { | |
| 'name': '', | |
| 'pronouns': '', | |
| 'alternate_names': [], | |
| 'age': '', | |
| 'core_description': '', | |
| 'motivations': [], | |
| 'flaws': [], | |
| 'dialogue_style': '', | |
| 'example_dialogue': [] | |
| }, | |
| 'personality': { | |
| 'traits': [], | |
| 'mood': {}, | |
| 'personality': {} | |
| }, | |
| 'knowledge_and_cognition': { | |
| 'details': [], | |
| 'dementia_facts': [], | |
| 'loneliness_info': [] | |
| } | |
| } | |
| # Clear the UI components | |
| return ('', '', '', '', '', '', '', '', '', '', '', '', '', '', '', f"Creating a new character.") | |
| elif character_name in self.all_characters: | |
| self.character_config = self.all_characters[character_name]['config'] | |
| # Update the UI components with the character's data | |
| return ( | |
| self.character_config['character']['name'], | |
| self.character_config['character']['pronouns'], | |
| ', '.join(self.character_config['character'].get('alternate_names', [])), | |
| self.character_config['character']['age'], | |
| self.character_config['character']['core_description'], | |
| '\n'.join(self.character_config['character']['motivations']), | |
| '\n'.join(self.character_config['character']['flaws']), | |
| self.character_config['character']['dialogue_style'], | |
| '\n'.join([f"{list(d.keys())[0]}: {list(d.values())[0]}" for d in self.character_config['character']['example_dialogue']]), | |
| ', '.join(self.character_config['personality']['traits']), | |
| ', '.join([f"{k}: {v}" for k, v in self.character_config['personality']['mood'].items()]), | |
| ', '.join([f"{k}: {v}" for k, v in self.character_config['personality']['personality'].items()]), | |
| '\n'.join(self.character_config['knowledge_and_cognition']['details']), | |
| '\n'.join(self.character_config['knowledge_and_cognition']['dementia_facts']), | |
| '\n'.join(self.character_config['knowledge_and_cognition']['loneliness_info']), | |
| f"Character {character_name} loaded successfully." | |
| ) | |
| else: | |
| return (gr.update(),) * 15 + (f"Character {character_name} not found.",) | |
| def update_and_save_character_config( | |
| self, | |
| name, pronouns, alternate_names, age, core_description, motivations, flaws, | |
| dialogue_style, example_dialogue, | |
| traits, mood, personality, | |
| details, dementia_facts, loneliness_info, | |
| selected_character_name | |
| ): | |
| """Update the character configuration based on user input and save it.""" | |
| if self.character_config is None: | |
| return "No character selected.", gr.update() | |
| # Update character details | |
| self.character_config['character']['name'] = name | |
| self.character_config['character']['pronouns'] = pronouns | |
| self.character_config['character']['alternate_names'] = [n.strip() for n in alternate_names.split(',') if n.strip()] | |
| self.character_config['character']['age'] = age | |
| self.character_config['character']['core_description'] = core_description | |
| self.character_config['character']['motivations'] = [m.strip() for m in motivations.split('\n') if m.strip()] | |
| self.character_config['character']['flaws'] = [f.strip() for f in flaws.split('\n') if f.strip()] | |
| self.character_config['character']['dialogue_style'] = dialogue_style | |
| # Process example dialogue | |
| example_dialogue_list = [] | |
| if example_dialogue.strip(): | |
| lines = example_dialogue.strip().split('\n') | |
| for line in lines: | |
| if ':' in line: | |
| speaker, text = line.split(':', 1) | |
| example_dialogue_list.append({speaker.strip(): text.strip()}) | |
| self.character_config['character']['example_dialogue'] = example_dialogue_list | |
| # Update personality traits | |
| self.character_config['personality']['traits'] = [trait.strip() for trait in traits.split(',') if trait.strip()] | |
| # Update mood | |
| mood_dict = {} | |
| for item in mood.split(','): | |
| if ':' in item: | |
| key, value = item.split(':', 1) | |
| mood_dict[key.strip()] = float(value.strip()) | |
| self.character_config['personality']['mood'] = mood_dict | |
| # Update personality metrics | |
| personality_dict = {} | |
| for item in personality.split(','): | |
| if ':' in item: | |
| key, value = item.split(':', 1) | |
| personality_dict[key.strip()] = float(value.strip()) | |
| self.character_config['personality']['personality'] = personality_dict | |
| # Update knowledge and cognition | |
| self.character_config['knowledge_and_cognition']['details'] = [d.strip() for d in details.split('\n') if d.strip()] | |
| self.character_config['knowledge_and_cognition']['dementia_facts'] = [d.strip() for d in dementia_facts.split('\n') if d.strip()] | |
| self.character_config['knowledge_and_cognition']['loneliness_info'] = [d.strip() for d in loneliness_info.split('\n') if d.strip()] | |
| if selected_character_name == "New Character": | |
| # Create a new character file | |
| new_character_name = name.strip() | |
| if not new_character_name: | |
| return "Please enter a name for the new character.", gr.update() | |
| # Generate a filename based on the character's name | |
| filename = new_character_name.lower().replace(' ', '_') + '.yaml' | |
| filepath = os.path.join(CHARACTER_DIR, filename) | |
| if os.path.exists(filepath): | |
| return f"A character with the name '{new_character_name}' already exists.", gr.update() | |
| else: | |
| try: | |
| with open(filepath, "w") as file: | |
| yaml.dump(self.character_config, file) | |
| # Update the all_characters dictionary and the dropdown options | |
| self.all_characters[new_character_name] = {'config': self.character_config, 'file': filepath} | |
| # Update the character dropdown options | |
| character_names = list(self.all_characters.keys()) | |
| return ( | |
| f"New character '{new_character_name}' created and saved successfully.", | |
| gr.update(choices=["New Character"] + character_names, value=new_character_name) | |
| ) | |
| except Exception as e: | |
| return f"Error saving new character: {e}", gr.update() | |
| else: | |
| # Save the updated character configuration | |
| try: | |
| filepath = self.all_characters[selected_character_name]['file'] | |
| with open(filepath, "w") as file: | |
| yaml.dump(self.character_config, file) | |
| return f"Character configuration for '{selected_character_name}' updated and saved successfully.", gr.update() | |
| except Exception as e: | |
| return f"Error saving character configuration: {e}", gr.update() | |
| def chat(self, user_input, chat_history, selected_llm): | |
| """Handle the chat interaction.""" | |
| if not user_input: | |
| return "", [] | |
| if self.character_config is None: | |
| return "", [{"role": "assistant", "content": "No character selected."}] | |
| # Set the API key based on the selected LLM | |
| api_key = self.api_key_dict.get(selected_llm, None) | |
| if api_key: | |
| self.set_environment_api_key(selected_llm, api_key) | |
| else: | |
| return "", [{"role": "assistant", "content": "Please set the API key for the selected LLM."}] | |
| # Get the character's response using the provided function | |
| try: | |
| response_content = get_character_response(user_input, self.character_config, llm_model=selected_llm) | |
| # Create new messages list with proper format | |
| messages = chat_history + [ | |
| {"role": "user", "content": user_input}, | |
| {"role": "assistant", "content": response_content} | |
| ] | |
| return "", messages | |
| except Exception as e: | |
| error_message = [{"role": "assistant", "content": f"Error during LLM processing: {e}"}] | |
| return "", error_message | |
| def set_environment_api_key(self, llm, api_key): | |
| """Set the environment variable for the API key based on the LLM.""" | |
| if llm.startswith('gpt') or llm in ["o1-preview-2024-09-12", "o1-mini-2024-09-12"]: | |
| os.environ["OPENAI_API_KEY"] = api_key | |
| elif llm.startswith('deepseek'): | |
| os.environ["DEEPSEEK_API_KEY"] = api_key | |
| elif llm in ["llama3.1-405b", "llama3.1-405b-instruct"]: | |
| os.environ["OPENROUTER_API_KEY"] = api_key | |
| def save_conversation(self, chat_history): | |
| """Save the conversation to a file in the /data/conversations directory.""" | |
| # Use the persistent storage path for Hugging Face Spaces | |
| folder = os.path.join("/data/conversations") | |
| if not os.path.exists(folder): | |
| os.makedirs(folder, exist_ok=True) | |
| timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f") | |
| character_name = self.character_config['character']['name'] if self.character_config else "AI" | |
| safe_char_name = "".join(c for c in character_name if c.isalnum() or c in ('-', '_')).lower() | |
| filename = f"chat_{timestamp}_{safe_char_name}.txt" | |
| filepath = os.path.join(folder, filename) | |
| try: | |
| with open(filepath, 'w', encoding='utf-8') as f: | |
| f.write(f"Conversation with {character_name}\n") | |
| f.write(f"Timestamp: {timestamp}\n") | |
| f.write("-" * 50 + "\n\n") | |
| for message in chat_history: | |
| role = message["role"] | |
| content = message["content"] | |
| if role == "user": | |
| f.write(f"User: {content}\n") | |
| else: | |
| f.write(f"{character_name}: {content}\n") | |
| f.write("\n") | |
| return f"Conversation saved as {filename}" | |
| except Exception as e: | |
| return f"Error saving conversation: {e}" | |
| def new_conversation(self): | |
| """Clear the chat history.""" | |
| return [], [] | |
| def collect_data(self, user_question, k, selected_llm): | |
| """Have LLM respond K times to a user question for data collection.""" | |
| if not user_question: | |
| return "Please enter a question.", None | |
| if self.character_config is None: | |
| return "No character selected.", None | |
| api_key = self.api_key_dict.get(selected_llm, None) | |
| if api_key: | |
| self.set_environment_api_key(selected_llm, api_key) | |
| else: | |
| return "Please set the API key for the selected LLM.", None | |
| prompt = build_prompt(user_question, self.character_config) | |
| system_prompt = build_system_prompt(self.character_config['character']['name']) | |
| client, model = create_client(selected_llm) | |
| try: | |
| responses, _ = get_batch_responses_from_llm( | |
| msg=prompt, | |
| client=client, | |
| model=model, | |
| system_message=system_prompt, | |
| temperature=0.9, | |
| n_responses=int(k) | |
| ) | |
| # Format responses for the Dataframe with separate up/down columns | |
| responses_data = [[str(i+1), resp, "⬆️", "⬇️"] for i, resp in enumerate(responses)] | |
| return "", responses_data | |
| except Exception as e: | |
| return f"Error during LLM processing: {e}", None | |
| def save_data_collection(self, user_question, ranked_responses): | |
| """Save the question, responses, and their rankings in a format optimized for DPO training.""" | |
| # Use the persistent storage path for Hugging Face Spaces | |
| folder = os.path.join("/data/dpo_training") | |
| if not os.path.exists(folder): | |
| os.makedirs(folder, exist_ok=True) | |
| try: | |
| timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f") | |
| character_config = self.character_config or {} | |
| character_name = character_config.get('character', {}).get('name', "Unknown") | |
| safe_char_name = "".join(c for c in character_name if c.isalnum() or c in ('-', '_')).lower() | |
| jsonl_filename = f"dpo_{timestamp}_{safe_char_name}.jsonl" | |
| jsonl_file = os.path.join(folder, jsonl_filename) | |
| system_prompt = build_system_prompt(character_name) if self.character_config else "" | |
| import json | |
| with open(jsonl_file, 'w', encoding='utf-8') as f: | |
| for i, (rank1, chosen_response, _, _) in enumerate(ranked_responses[:-1]): | |
| for j, (rank2, rejected_response, _, _) in enumerate(ranked_responses[i+1:], i+1): | |
| messages = [ | |
| { | |
| "role": "system", | |
| "content": system_prompt | |
| }, | |
| { | |
| "role": "user", | |
| "content": user_question | |
| } | |
| ] | |
| dpo_example = { | |
| "messages": messages, | |
| "chosen": chosen_response, | |
| "rejected": rejected_response, | |
| "prompt": user_question, | |
| "system_prompt": system_prompt, | |
| "character_name": character_name, | |
| "ranking_info": { | |
| "chosen_rank": int(rank1), | |
| "rejected_rank": int(rank2) | |
| }, | |
| "metadata": { | |
| "timestamp": timestamp, | |
| "session_id": timestamp | |
| } | |
| } | |
| f.write(json.dumps(dpo_example, ensure_ascii=False) + '\n') | |
| return f"Rankings saved to {jsonl_filename}" | |
| except Exception as e: | |
| return f"Error saving rankings: {e}" | |
| def move_row(self, data, evt: gr.SelectData): | |
| """Move a row up or down based on which column was clicked.""" | |
| if not data or not evt: | |
| return data | |
| row_idx = evt.index[0] | |
| col_idx = evt.index[1] # Get column index instead of name | |
| if col_idx == 2 and row_idx > 0: # Up column (index 2) | |
| # Swap with row above | |
| data[row_idx], data[row_idx-1] = data[row_idx-1], data[row_idx] | |
| elif col_idx == 3 and row_idx < len(data) - 1: # Down column (index 3) | |
| # Swap with row below | |
| data[row_idx], data[row_idx+1] = data[row_idx+1], data[row_idx] | |
| # Update ranks | |
| for i, row in enumerate(data): | |
| row[0] = str(i + 1) | |
| return data | |
| # Instantiate the ChatApp | |
| app = ChatApp() | |
| with gr.Blocks() as demo: | |
| with gr.Tab("Settings"): | |
| gr.Markdown("## LLM Selection & Configuration") | |
| llm_dropdown = gr.Dropdown( | |
| label="Select LLM", choices=app.available_llms | |
| ) | |
| api_key_input = gr.Textbox( | |
| label="Enter API Key (if required)", type="password", placeholder="Your API Key" | |
| ) | |
| set_api_button = gr.Button("Set API Key") | |
| set_api_button.click( | |
| app.set_api_key, inputs=[api_key_input, llm_dropdown], outputs=api_key_input | |
| ) | |
| gr.Markdown("## AI Character Configuration") | |
| # Character Selection Dropdown | |
| character_names = list(app.all_characters.keys()) | |
| character_dropdown = gr.Dropdown( | |
| label="Select Character", choices=["New Character"] + character_names, value=character_names[0] if character_names else "New Character" | |
| ) | |
| # Character Details | |
| with gr.Accordion("Character Details", open=True): | |
| name_input = gr.Textbox(label="Name") | |
| pronouns_input = gr.Textbox(label="Pronouns") | |
| alternate_names_input = gr.Textbox(label="Alternate Names (comma-separated)") | |
| age_input = gr.Textbox(label="Age") | |
| core_description_input = gr.Textbox(label="Core Description", lines=5) | |
| motivations_input = gr.Textbox(label="Motivations (one per line)", lines=3) | |
| flaws_input = gr.Textbox(label="Flaws (one per line)", lines=3) | |
| dialogue_style_input = gr.Textbox(label="Dialogue Style") | |
| example_dialogue_input = gr.Textbox(label="Example Dialogue (format: speaker: text)", lines=5) | |
| # Personality Traits | |
| with gr.Accordion("Personality", open=False): | |
| traits_input = gr.Textbox(label="Traits (comma-separated)") | |
| mood_input = gr.Textbox(label="Mood (format: emotion: value, comma-separated)") | |
| personality_input = gr.Textbox(label="Personality Metrics (format: trait: value, comma-separated)") | |
| # Knowledge and Cognition | |
| with gr.Accordion("Knowledge and Cognition", open=False): | |
| details_input = gr.Textbox(label="Details (one per line)", lines=5) | |
| dementia_facts_input = gr.Textbox(label="Dementia Facts (one per line)", lines=5) | |
| loneliness_info_input = gr.Textbox(label="Loneliness Info (one per line)", lines=5) | |
| # Update and Save Button | |
| update_and_save_button = gr.Button("Update and Save Character Configuration") | |
| update_status = gr.Textbox(label="Status", interactive=False) | |
| update_and_save_button.click( | |
| app.update_and_save_character_config, | |
| inputs=[ | |
| name_input, pronouns_input, alternate_names_input, age_input, core_description_input, motivations_input, | |
| flaws_input, dialogue_style_input, example_dialogue_input, | |
| traits_input, mood_input, personality_input, | |
| details_input, dementia_facts_input, loneliness_info_input, | |
| character_dropdown | |
| ], | |
| outputs=[update_status, character_dropdown], | |
| ) | |
| # Character Selection Handling | |
| character_dropdown.change( | |
| app.select_character, | |
| inputs=[character_dropdown], | |
| outputs=[ | |
| name_input, pronouns_input, alternate_names_input, age_input, core_description_input, motivations_input, | |
| flaws_input, dialogue_style_input, example_dialogue_input, | |
| traits_input, mood_input, personality_input, | |
| details_input, dementia_facts_input, loneliness_info_input, | |
| update_status | |
| ] | |
| ) | |
| with gr.Tab("Chat"): | |
| gr.Markdown("## Chat Interface") | |
| chatbot = gr.Chatbot(type="messages") | |
| user_input = gr.Textbox(label="Your Message:", placeholder="Type your message here...") | |
| send_button = gr.Button("Send") | |
| def submit_message(user_input, chat_history, selected_llm): | |
| new_user_input, chat_history = app.chat(user_input, chat_history, selected_llm) | |
| return "", chat_history | |
| send_button.click( | |
| submit_message, | |
| inputs=[user_input, chatbot, llm_dropdown], | |
| outputs=[user_input, chatbot] | |
| ) | |
| user_input.submit( | |
| submit_message, | |
| inputs=[user_input, chatbot, llm_dropdown], | |
| outputs=[user_input, chatbot] | |
| ) | |
| new_conversation_button = gr.Button("🆕 New Conversation") | |
| new_conversation_button.click(app.new_conversation, outputs=[chatbot, chatbot]) | |
| save_button = gr.Button("💾 Save Conversation") | |
| save_status = gr.Textbox(label="Save Status", interactive=False) | |
| save_button.click(app.save_conversation, inputs=[chatbot], outputs=save_status) | |
| with gr.Tab("Data Collection"): | |
| gr.Markdown(""" | |
| ## Data Collection Interface | |
| This interface helps collect multiple AI responses for the same question to evaluate response quality. | |
| ### How to use: | |
| 1. Enter your question | |
| 2. Choose how many responses you want | |
| 3. Generate responses | |
| 4. Use ⬆️ and ⬇️ buttons to reorder responses (top = best) | |
| 5. Save the rankings | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| data_question_input = gr.Textbox( | |
| label="Question for the AI Character", | |
| placeholder="Type your question here...", | |
| lines=3 | |
| ) | |
| with gr.Column(scale=1): | |
| k_input = gr.Slider( | |
| minimum=2, | |
| maximum=10, | |
| value=5, | |
| step=1, | |
| label="Number of Responses to Generate" | |
| ) | |
| llm_dropdown_data = gr.Dropdown( | |
| label="Select Language Model", | |
| choices=app.available_llms, | |
| value=app.available_llms[0] if app.available_llms else None | |
| ) | |
| generate_button = gr.Button("🔄 Generate Responses", variant="primary") | |
| collection_status = gr.Textbox( | |
| label="Generation Status", | |
| interactive=False, | |
| visible=False | |
| ) | |
| # New interface for ranking responses | |
| responses_df = gr.Dataframe( | |
| headers=["Rank", "Response", "Up", "Down"], | |
| datatype=["str", "str", "str", "str"], | |
| col_count=(4, "fixed"), | |
| interactive=True, | |
| wrap=True, | |
| row_count=10, | |
| label="Click ⬆️ or ⬇️ to reorder responses (top = best)", | |
| type="array" | |
| ) | |
| def move_row(data, evt: gr.SelectData): | |
| """Move a row up or down based on which column was clicked.""" | |
| if not data or not evt: | |
| return data | |
| row_idx = evt.index[0] | |
| col_idx = evt.index[1] # Get column index instead of name | |
| if col_idx == 2 and row_idx > 0: # Up column (index 2) | |
| # Swap with row above | |
| data[row_idx], data[row_idx-1] = data[row_idx-1], data[row_idx] | |
| elif col_idx == 3 and row_idx < len(data) - 1: # Down column (index 3) | |
| # Swap with row below | |
| data[row_idx], data[row_idx+1] = data[row_idx+1], data[row_idx] | |
| # Update ranks | |
| for i, row in enumerate(data): | |
| row[0] = str(i + 1) | |
| return data | |
| # Add click handler for both Up and Down columns | |
| responses_df.select( | |
| move_row, | |
| inputs=[responses_df], | |
| outputs=[responses_df] | |
| ) | |
| submit_ranking_button = gr.Button("💾 Save Rankings", variant="secondary") | |
| data_save_status = gr.Textbox( | |
| label="Save Status", | |
| interactive=False, | |
| visible=False | |
| ) | |
| # Show status messages when they contain content | |
| collection_status.change( | |
| lambda x: gr.update(visible=bool(x.strip())), | |
| inputs=[collection_status], | |
| outputs=[collection_status] | |
| ) | |
| data_save_status.change( | |
| lambda x: gr.update(visible=bool(x.strip())), | |
| inputs=[data_save_status], | |
| outputs=[data_save_status] | |
| ) | |
| generate_button.click( | |
| app.collect_data, | |
| inputs=[data_question_input, k_input, llm_dropdown_data], | |
| outputs=[collection_status, responses_df] | |
| ) | |
| submit_ranking_button.click( | |
| app.save_data_collection, | |
| inputs=[data_question_input, responses_df], | |
| outputs=[data_save_status] | |
| ) | |
| # Initialize UI components with default character data | |
| if app.character_config: | |
| character_dropdown.value = app.character_config['character']['name'] | |
| name_input.value = app.character_config['character']['name'] | |
| pronouns_input.value = app.character_config['character']['pronouns'] | |
| alternate_names_input.value = ', '.join(app.character_config['character'].get('alternate_names', [])) | |
| age_input.value = app.character_config['character']['age'] | |
| core_description_input.value = app.character_config['character']['core_description'] | |
| motivations_input.value = '\n'.join(app.character_config['character']['motivations']) | |
| flaws_input.value = '\n'.join(app.character_config['character']['flaws']) | |
| dialogue_style_input.value = app.character_config['character']['dialogue_style'] | |
| example_dialogue_input.value = '\n'.join([f"{list(d.keys())[0]}: {list(d.values())[0]}" for d in app.character_config['character']['example_dialogue']]) | |
| traits_input.value = ', '.join(app.character_config['personality']['traits']) | |
| mood_input.value = ', '.join([f"{k}: {v}" for k, v in app.character_config['personality']['mood'].items()]) | |
| personality_input.value = ', '.join([f"{k}: {v}" for k, v in app.character_config['personality']['personality'].items()]) | |
| details_input.value = '\n'.join(app.character_config['knowledge_and_cognition']['details']) | |
| dementia_facts_input.value = '\n'.join(app.character_config['knowledge_and_cognition']['dementia_facts']) | |
| loneliness_info_input.value = '\n'.join(app.character_config['knowledge_and_cognition']['loneliness_info']) | |
| demo.launch() | |