import streamlit as st import os import sys import asyncio # Add backend to path for imports backend_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'backend') sys.path.insert(0, backend_path) # Use lightweight character manager for HuggingFace Spaces from backend.models.lightweight_character_manager import CharacterManager # Page config st.set_page_config( page_title="🎭 Roleplay Chat Box", page_icon="🎭", layout="wide" ) # Initialize session state if 'character_manager' not in st.session_state: st.session_state.character_manager = None if 'messages' not in st.session_state: st.session_state.messages = [] if 'current_character' not in st.session_state: st.session_state.current_character = 'moses' def initialize_models(): """Initialize the character manager""" if st.session_state.character_manager is None: with st.spinner("🔄 Loading character models..."): try: st.session_state.character_manager = CharacterManager() loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(st.session_state.character_manager.initialize()) loop.close() st.success("✅ Models loaded successfully!") return True except Exception as e: st.error(f"❌ Error loading models: {str(e)}") import traceback st.error(traceback.format_exc()) return False return True # Sidebar for character selection with st.sidebar: st.title("🎭 Characters") character = st.radio( "Choose Character", options=["moses", "samsung_employee", "jinx"], format_func=lambda x: { "moses": "📜 Moses - Biblical Prophet", "samsung_employee": "💼 Samsung Employee - Tech Expert", "jinx": "💥 Jinx - Chaotic Genius" }[x], key="character_selector" ) # Update character if changed if character != st.session_state.current_character: st.session_state.current_character = character st.session_state.messages = [] st.divider() if st.button("🗑️ Clear Chat"): st.session_state.messages = [] st.rerun() st.divider() st.markdown("### About") st.markdown(""" This app uses LoRA (Low-Rank Adaptation) to create unique character personalities. - **Base Model**: Qwen3-0.6B - **Adapters**: Character-specific LoRA weights - **Memory Efficient**: Shares one base model """) # Main chat interface st.title("🎭 Roleplay Chat Box") st.markdown(f"Currently chatting with: **{st.session_state.current_character}**") # Initialize models on first run if not initialize_models(): st.stop() # Display chat messages for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Chat input if prompt := st.chat_input("Type your message here..."): # Add user message to chat st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) # Generate response with st.chat_message("assistant"): with st.spinner("Thinking..."): try: # Convert history for context conversation_history = [] for msg in st.session_state.messages[-6:]: # Last 3 exchanges if msg["role"] == "user": conversation_history.append({"role": "user", "content": msg["content"]}) else: conversation_history.append({"role": "assistant", "content": msg["content"]}) # Generate response response = st.session_state.character_manager.generate_response( character_id=st.session_state.current_character, user_message=prompt, conversation_history=conversation_history[:-1] # Exclude current message ) st.markdown(response) st.session_state.messages.append({"role": "assistant", "content": response}) except Exception as e: error_msg = f"❌ Error: {str(e)}" st.error(error_msg) st.session_state.messages.append({"role": "assistant", "content": error_msg})