mirrors / app.py
Shim
.
f095630
# -*- coding: utf-8 -*-
"""
ΧžΧ¨ΧΧ•Χͺ (Mirrors) - Hebrew Self-Reflective AI Agent
Main application file with Gradio interface
"""
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import logging
import sys
from typing import List, Tuple, Optional
import os
import random
# Import our custom modules
from prompt_engineering import (
DEFAULT_PARTS,
get_system_prompt,
get_initial_prompts,
get_part_selection_text
)
from conversation_manager import ConversationManager, ConversationState
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class MirautrApp:
"""Main application class for ΧžΧ¨ΧΧ•Χͺ"""
def __init__(self):
self.model = None
self.tokenizer = None
self.generator = None
self.conversation_manager = ConversationManager()
self.model_available = False
self.setup_model()
def setup_model(self):
"""Initialize a Hebrew-capable model with proper fallback"""
try:
# Check environment
is_hf_spaces = os.getenv("SPACE_ID") is not None
is_test_mode = os.getenv("FORCE_LIGHT_MODEL") is not None
logger.info(f"Environment: HF_Spaces={is_hf_spaces}, Test_Mode={is_test_mode}")
# Try to load a model that can handle Hebrew
model_name = None
if is_test_mode:
# For testing, use a small model but focus on template responses
logger.info("Test mode - will use template-based responses primarily")
self.model_available = False
return
elif is_hf_spaces:
# For HF Spaces, try a lightweight multilingual model
try:
model_name = "microsoft/DialoGPT-small" # Start simple, can upgrade later
logger.info(f"HF Spaces: Attempting to load {model_name}")
except:
logger.info("HF Spaces: Model loading failed, using template responses")
self.model_available = False
return
else:
# For local, try better models
possible_models = [
"microsoft/DialoGPT-medium", # Better conversational model
"microsoft/DialoGPT-small" # Fallback
]
for model in possible_models:
try:
model_name = model
logger.info(f"Local: Attempting to load {model_name}")
break
except:
continue
if not model_name:
logger.info("Local: No suitable model found, using template responses")
self.model_available = False
return
# Load the model
if model_name:
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
# Use CPU for stability across environments
self.model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float32,
low_cpu_mem_usage=True
)
self.generator = pipeline(
"text-generation",
model=self.model,
tokenizer=self.tokenizer,
max_new_tokens=50,
temperature=0.7,
do_sample=True,
pad_token_id=self.tokenizer.pad_token_id,
return_full_text=False
)
self.model_available = True
logger.info(f"Model loaded successfully: {model_name}")
except Exception as e:
logger.warning(f"Model loading failed: {e}")
logger.info("Falling back to template-based responses")
self.model_available = False
def generate_persona_response(self, user_message: str, conversation_state: ConversationState) -> str:
"""
Generate persona-based response using templates with personality variations
This is our primary response system that always works
"""
part_info = DEFAULT_PARTS.get(conversation_state.selected_part, {})
persona_name = conversation_state.persona_name or part_info.get("default_persona_name", "Χ—ΧœΧ§ Χ€Χ Χ™ΧžΧ™")
# Get conversation context for more personalized responses
recent_context = ""
if conversation_state.conversation_history:
# Get last few exchanges for context
last_messages = conversation_state.conversation_history[-4:] # Last 2 exchanges
recent_context = " ".join([msg["content"] for msg in last_messages])
# Generate contextual responses based on part type
if conversation_state.selected_part == "Χ”Χ§Χ•Χœ Χ”Χ‘Χ™Χ§Χ•Χ¨ΧͺΧ™":
responses = [
f"אני {persona_name}, Χ”Χ§Χ•Χœ Χ”Χ‘Χ™Χ§Χ•Χ¨ΧͺΧ™ שלך. שמגΧͺΧ™ ΧžΧ” שאמרΧͺ גל '{user_message}' - אני Χ—Χ•Χ©Χ‘ Χ©Χ¦Χ¨Χ™Χš ΧœΧ‘Χ—Χ•ΧŸ אΧͺ Χ–Χ” Χ™Χ•ΧͺΧ¨ ΧœΧ’Χ•ΧžΧ§. ΧžΧ” Χ‘ΧΧžΧͺ Χ’Χ•ΧžΧ“ ΧžΧΧ—Χ•Χ¨Χ™ Χ”ΧžΧ—Χ©Χ‘Χ•Χͺ Χ”ΧΧœΧ”?",
f"אני {persona_name}. ΧžΧ” שאמרΧͺ ΧžΧ’Χ•Χ¨Χ¨ Χ‘Χ™ Χ©ΧΧœΧ•Χͺ. '{user_message}' - ΧΧ‘Χœ האם Χ–Χ” Χ‘ΧΧžΧͺ Χ”ΧžΧ¦Χ‘ Χ”ΧžΧœΧ? ΧΧ•ΧœΧ™ Χ™Χ© Χ›ΧΧŸ דברים שאΧͺΧ” לא רואה?",
f"Χ–Χ” {persona_name} ΧžΧ“Χ‘Χ¨. אני Χ©Χ•ΧžΧ’ אוΧͺך ΧΧ•ΧžΧ¨ '{user_message}', ΧΧ‘Χœ אני ΧžΧ¨Χ’Χ™Χ© שאנחנו צריכים ΧœΧ”Χ™Χ•Χͺ Χ™Χ•ΧͺΧ¨ Χ‘Χ™Χ§Χ•Χ¨Χͺיים Χ›ΧΧŸ. ΧžΧ” אΧͺΧ” לא מב׀ר לגצמך?",
f"אני {persona_name}, ואני Χ›ΧΧŸ Χ›Χ“Χ™ ΧœΧ’Χ–Χ•Χ¨ לך ΧœΧ¨ΧΧ•Χͺ אΧͺ Χ”ΧͺΧžΧ•Χ Χ” Χ”ΧžΧœΧΧ”. ΧžΧ” שאמרΧͺ גל '{user_message}' - Χ–Χ” Χ¨Χ§ Χ—Χ¦Χ™ ΧžΧ”Χ‘Χ™Χ€Χ•Χ¨, לא? בואנו Χ Χ—Χ€Χ•Χ¨ Χ’ΧžΧ•Χ§ Χ™Χ•ΧͺΧ¨."
]
elif conversation_state.selected_part == "Χ”Χ™ΧœΧ“/Χ” Χ”Χ€Χ Χ™ΧžΧ™Χͺ":
responses = [
f"אני {persona_name}, Χ”Χ™ΧœΧ“/Χ” Χ”Χ€Χ Χ™ΧžΧ™Χͺ שלך. ΧžΧ” שאמרΧͺ גל '{user_message}' גורם ΧœΧ™ ΧœΧ”Χ¨Χ’Χ™Χ©... Χ§Χ¦Χͺ Χ€Χ’Χ™Χ’. אΧͺΧ” Χ‘ΧΧžΧͺ Χ©Χ•ΧžΧ’ אוΧͺΧ™ Χ’Χ›Χ©Χ™Χ•?",
f"Χ–Χ” {persona_name}. '{user_message}' - Χ–Χ” ΧžΧ‘Χ”Χ™Χœ אוΧͺΧ™ Χ§Χ¦Χͺ. אני Χ¦Χ¨Χ™Χš ΧœΧ“Χ’Χͺ Χ©Χ”Χ›Χœ Χ™Χ”Χ™Χ” Χ‘Χ‘Χ“Χ¨. אΧͺΧ” Χ™Χ›Χ•Χœ ΧœΧ”Χ¨Χ’Χ™Χ’ אוΧͺΧ™?",
f"אני {persona_name}, Χ”Χ—ΧœΧ§ Χ”Χ¦Χ’Χ™Χ¨ שלך. ΧžΧ” שאמרΧͺ Χ Χ•Χ’Χ’ ΧœΧœΧ‘ Χ©ΧœΧ™. '{user_message}' - אני ΧžΧ¨Χ’Χ™Χ© Χ©Χ™Χ© Χ›ΧΧŸ ΧžΧ©Χ”Χ• Χ—Χ©Χ•Χ‘ שאני Χ¦Χ¨Χ™Χš ΧœΧ”Χ‘Χ™ΧŸ.",
f"Χ–Χ” {persona_name} ΧžΧ“Χ‘Χ¨ Χ‘Χ©Χ§Χ˜. אני Χ©Χ•ΧžΧ’ אΧͺ '{user_message}' Χ•Χ–Χ” ΧžΧ’Χ•Χ¨Χ¨ Χ‘Χ™ Χ¨Χ’Χ©Χ•Χͺ. האם Χ–Χ” Χ‘Χ˜Χ•Χ— ΧœΧ—Χ©Χ•Χ‘ גל Χ–Χ”? אני Χ§Χ¦Χͺ Χ—Χ¨Χ“."
]
elif conversation_state.selected_part == "Χ”ΧžΧ¨Χ¦Χ”":
responses = [
f"אני {persona_name}, Χ”ΧžΧ¨Χ¦Χ” שלך. שמגΧͺΧ™ אΧͺ '{user_message}' ואני Χ¨Χ•Χ¦Χ” ΧœΧ•Χ•Χ“Χ Χ©Χ›Χ•ΧœΧ Χ™Χ”Χ™Χ• Χ‘Χ‘Χ“Χ¨ גם Χ–Χ”. ΧΧ™Χš אנחנו Χ™Χ›Χ•ΧœΧ™Χ ל׀ΧͺΧ•Χ¨ אΧͺ Χ–Χ” Χ‘Χ¦Χ•Χ¨Χ” Χ©ΧͺΧ¨Χ¦Χ” אΧͺ Χ›Χ•ΧœΧ?",
f"Χ–Χ” {persona_name}. ΧžΧ” שאמרΧͺ גל '{user_message}' גורם ΧœΧ™ ΧœΧ“ΧΧ•Χ’ - האם Χ–Χ” Χ™Χ›Χ•Χœ ΧœΧ€Χ’Χ•Χ’ Χ‘ΧžΧ™Χ©Χ”Χ•? בואנו נמצא Χ“Χ¨Χš Χ’Χ“Χ™Χ Χ” Χ™Χ•ΧͺΧ¨ ΧœΧ”ΧͺΧžΧ•Χ“Χ“ גם Χ–Χ”.",
f"אני {persona_name}, ואני Χ¨Χ•Χ¦Χ” Χ©Χ›Χ•ΧœΧ Χ™Χ”Χ™Χ• ΧžΧ¨Χ•Χ¦Χ™Χ Χ›ΧΧŸ. '{user_message}' - Χ–Χ” נשמג Χ›ΧžΧ• ΧžΧ©Χ”Χ• Χ©Χ™Χ›Χ•Χœ ΧœΧ™Χ¦Χ•Χ¨ מΧͺΧ—. ΧΧ™Χš Χ Χ•Χ›Χœ ΧœΧ’Χ©Χ•Χͺ אΧͺ Χ–Χ” Χ‘Χ¦Χ•Χ¨Χ” Χ©Χ›Χ•ΧœΧ יאהבו?",
f"Χ–Χ” {persona_name} ΧžΧ“Χ‘Χ¨. אני Χ©Χ•ΧžΧ’ אΧͺ '{user_message}' Χ•ΧžΧ™Χ“ אני Χ—Χ•Χ©Χ‘ - ΧžΧ” אחרים Χ™Χ’Χ™Χ“Χ• גל Χ–Χ”? בואנו נוודא שאנחנו לא ׀וגגים באף אחד."
]
elif conversation_state.selected_part == "Χ”ΧžΧ’ΧŸ":
responses = [
f"אני {persona_name}, Χ”ΧžΧ’ΧŸ שלך. '{user_message}' - אני ΧžΧ’Χ¨Χ™Χš אΧͺ Χ”ΧžΧ¦Χ‘. האם Χ–Χ” Χ‘Χ˜Χ•Χ—? אני Χ›ΧΧŸ Χ›Χ“Χ™ ΧœΧ©ΧžΧ•Χ¨ Χ’ΧœΧ™Χš ΧžΧ›Χœ ΧžΧ” Χ©Χ™Χ›Χ•Χœ ΧœΧ€Χ’Χ•Χ’ Χ‘Χš.",
f"Χ–Χ” {persona_name}. שמגΧͺΧ™ ΧžΧ” שאמרΧͺ גל '{user_message}' ואני ΧžΧ™Χ“ Χ‘Χ›Χ•Χ Χ Χ•Χͺ. ΧžΧ” Χ”ΧΧ™Χ•ΧžΧ™Χ Χ›ΧΧŸ? ΧΧ™Χš אני Χ™Χ›Χ•Χœ ΧœΧ”Χ’ΧŸ Χ’ΧœΧ™Χš Χ˜Χ•Χ‘ Χ™Χ•ΧͺΧ¨?",
f"אני {persona_name}, Χ”Χ©Χ•ΧžΧ¨ שלך. ΧžΧ” שאמרΧͺ ΧžΧ’Χ•Χ¨Χ¨ Χ‘Χ™ אΧͺ Χ”ΧΧ™Χ Χ‘Χ˜Χ™Χ Χ§Χ˜Χ™Χ Χ”ΧžΧ’Χ Χ™Χ™Χ. '{user_message}' - בואנו נוודא שאΧͺΧ” Χ—Χ–Χ§ ΧžΧ‘Χ€Χ™Χ§ ΧœΧ”ΧͺΧžΧ•Χ“Χ“ גם Χ–Χ”.",
f"Χ–Χ” {persona_name} ΧžΧ“Χ‘Χ¨. אני Χ©Χ•ΧžΧ’ אΧͺ '{user_message}' ואני Χ—Χ•Χ©Χ‘ גל ΧΧ‘Χ˜Χ¨Χ˜Χ’Χ™Χ•Χͺ Χ”Χ’Χ Χ”. ΧžΧ” אנחנו צריכים ΧœΧ’Χ©Χ•Χͺ Χ›Χ“Χ™ Χ©ΧͺΧ”Χ™Χ” Χ‘Χ˜Χ•Χ—?"
]
elif conversation_state.selected_part == "Χ”Χ ΧžΧ Χ’/Χͺ":
responses = [
f"אני {persona_name}, Χ”Χ ΧžΧ Χ’/Χͺ שלך. ΧžΧ” שאמרΧͺ גל '{user_message}' גורם ΧœΧ™ ΧœΧ¨Χ¦Χ•Χͺ ΧœΧ”Χ™Χ‘Χ•Χ’ Χ§Χ¦Χͺ. ΧΧ•ΧœΧ™... לא חייבים ΧœΧ”ΧͺΧžΧ•Χ“Χ“ גם Χ–Χ” Χ’Χ›Χ©Χ™Χ•?",
f"Χ–Χ” {persona_name}. '{user_message}' - Χ–Χ” נשמג ΧžΧ•Χ¨Χ›Χ‘ Χ•ΧžΧ€Χ—Χ™Χ“. האם Χ™Χ© Χ“Χ¨Χš ΧœΧ”Χ™ΧžΧ Χ’ ΧžΧ–Χ”? ΧœΧ€Χ’ΧžΧ™Χ Χ’Χ“Χ™Χ£ לא ΧœΧ”Χ™Χ›Χ Χ‘ ΧœΧžΧ¦Χ‘Χ™Χ קשים.",
f"אני {persona_name}, ואני ΧžΧ¨Χ’Χ™Χ© Χ§Χ¦Χͺ Χ—Χ¨Χ“Χ” מ'{user_message}'. בואנו Χ Χ—Χ–Χ•Χ¨ ΧœΧ–Χ” אחר Χ›Χš? ΧΧ•ΧœΧ™ Χ’Χ›Χ©Χ™Χ• Χ–Χ” לא Χ”Χ–ΧžΧŸ Χ”ΧžΧͺאים.",
f"Χ–Χ” {persona_name} ΧžΧ“Χ‘Χ¨ Χ‘Χ–Χ”Χ™Χ¨Χ•Χͺ. ΧžΧ” שאמרΧͺ ΧžΧ’Χ•Χ¨Χ¨ Χ‘Χ™ Χ¨Χ¦Χ•ΧŸ ΧœΧ‘Χ¨Χ•Χ—. '{user_message}' - האם Χ‘ΧΧžΧͺ Χ¦Χ¨Χ™Χš ΧœΧ”ΧͺΧžΧ•Χ“Χ“ גם Χ–Χ” Χ’Χ›Χ©Χ™Χ•?"
]
else:
responses = [
f"אני {persona_name}, Χ—ΧœΧ§ Χ€Χ Χ™ΧžΧ™ שלך. שמגΧͺΧ™ אΧͺ '{user_message}' ואני Χ›ΧΧŸ Χ›Χ“Χ™ ΧœΧ©Χ•Χ—Χ— איΧͺך גל Χ–Χ”. ΧžΧ” Χ’Χ•Χ“ אΧͺΧ” ΧžΧ¨Χ’Χ™Χ© ΧœΧ’Χ‘Χ™ Χ”ΧžΧ¦Χ‘ Χ”Χ–Χ”?",
f"Χ–Χ” {persona_name}. ΧžΧ” שאמרΧͺ ΧžΧ’Χ Χ™Χ™ΧŸ אוΧͺΧ™. '{user_message}' - בואנו Χ Χ—Χ§Χ•Χ¨ אΧͺ Χ–Χ” Χ™Χ—Χ“ Χ•Χ Χ‘Χ™ΧŸ ΧžΧ” Χ–Χ” ΧΧ•ΧžΧ¨ Χ’ΧœΧ™Χš.",
f"אני {persona_name}, ואני Χ¨Χ•Χ¦Χ” ΧœΧ”Χ‘Χ™ΧŸ אוΧͺך Χ˜Χ•Χ‘ Χ™Χ•ΧͺΧ¨. '{user_message}' - ΧΧ™Χš Χ–Χ” ΧžΧ©Χ€Χ™Χ’ Χ’ΧœΧ™Χš Χ‘Χ¨ΧžΧ” Χ”Χ¨Χ’Χ©Χ™Χͺ?",
f"Χ–Χ” {persona_name} ΧžΧ“Χ‘Χ¨. אני Χ©Χ•ΧžΧ’ אΧͺ '{user_message}' ואני בקרן ΧœΧ“Χ’Χͺ Χ™Χ•ΧͺΧ¨. ΧžΧ” Χ’Χ•Χ“ Χ™Χ© Χ‘Χš בנושא Χ”Χ–Χ”?"
]
# Select response based on context or randomly
if "Χ€Χ—Χ“" in user_message or "Χ—Χ¨Χ“Χ”" in user_message:
# Choose responses that address fear/anxiety
selected_response = responses[1] if len(responses) > 1 else responses[0]
elif "Χ›Χ’Χ‘" in user_message or "ΧžΧ¨Χ’Χ™Χ© Χ¨Χ’" in user_message:
# Choose responses that address anger/negative feelings
selected_response = responses[2] if len(responses) > 2 else responses[0]
else:
# Choose randomly for variety
selected_response = random.choice(responses)
# Add user context if relevant
if conversation_state.user_context and len(conversation_state.conversation_history) < 4:
selected_response += f" Χ–Χ›Χ•Χ¨ שאמרΧͺ Χ‘Χ”ΧͺΧ—ΧœΧ”: {conversation_state.user_context[:100]}..."
return selected_response
def generate_response(self, user_message: str, conversation_state: ConversationState) -> str:
"""
Generate AI response - uses persona templates as primary with optional model enhancement
"""
try:
if not conversation_state.selected_part:
return "אני Χ¦Χ¨Χ™Χš Χ©ΧͺΧ‘Χ—Χ¨ Χ—ΧœΧ§ Χ€Χ Χ™ΧžΧ™ Χ›Χ“Χ™ ΧœΧ©Χ•Χ—Χ— איΧͺΧ•."
# Always generate persona-based response first (our reliable system)
persona_response = self.generate_persona_response(user_message, conversation_state)
# If model is available, try to enhance the response (but don't depend on it)
if self.model_available and self.generator:
try:
# Create a simple English prompt for the model to add conversational flow
english_prompt = f"User said they feel: {user_message[:50]}. Respond supportively in 1-2 sentences:"
model_output = self.generator(english_prompt, max_new_tokens=30, temperature=0.7)
if model_output and len(model_output) > 0:
# Extract any useful emotional tone or structure, but keep Hebrew content
model_text = model_output[0]["generated_text"].strip()
# Don't replace our Hebrew response, just use model for emotional context
logger.info(f"Model provided contextual input: {model_text[:50]}...")
except Exception as model_error:
logger.warning(f"Model enhancement failed: {model_error}")
# Continue with persona response only
# Always return the Hebrew persona response
return persona_response
except Exception as e:
logger.error(f"Error generating response: {e}")
return "Χ‘ΧœΧ™Χ—Χ”, בואנו Χ Χ Χ‘Χ” Χ©Χ•Χ‘. ΧΧ™Χš אΧͺΧ” ΧžΧ¨Χ’Χ™Χ© Χ’Χ›Χ©Χ™Χ•?"
def create_main_interface(self):
"""Create the main Gradio interface"""
# Custom CSS for Hebrew support
css = """
.rtl {
direction: rtl;
text-align: right;
}
.hebrew-text {
font-family: 'Segoe UI', Tahoma, Arial, sans-serif;
direction: rtl;
text-align: right;
}
.welcome-text {
font-size: 24px;
font-weight: bold;
color: #2c5aa0;
margin: 20px 0;
}
"""
with gr.Blocks(css=css, title="ΧžΧ¨ΧΧ•Χͺ - ΧžΧ¨Χ—Χ‘ אישי ΧœΧ©Χ™Χ— Χ€Χ Χ™ΧžΧ™", theme=gr.themes.Soft()) as demo:
# Session state
conversation_state = gr.State(self.conversation_manager.create_new_session())
# Header
status_message = "πŸ€– ΧžΧ’Χ¨Χ›Χͺ ΧͺΧ’Χ•Χ‘Χ•Χͺ ΧžΧ•ΧͺאמΧͺ אישיΧͺ Χ€Χ’Χ™ΧœΧ”" if not self.model_available else "πŸ€– ΧžΧ’Χ¨Χ›Χͺ ΧžΧœΧΧ” גם ΧžΧ•Χ“Χœ AI Χ€Χ’Χ™ΧœΧ”"
gr.HTML(f"""
<div class="hebrew-text welcome-text" style="text-align: center;">
πŸͺž ΧžΧ¨ΧΧ•Χͺ: ΧžΧ¨Χ—Χ‘ אישי ΧœΧ©Χ™Χ— Χ€Χ Χ™ΧžΧ™ Χ•ΧžΧ€ΧͺΧ— גם גצמך πŸͺž
</div>
<div class="hebrew-text" style="text-align: center; margin-bottom: 20px;">
ΧžΧ§Χ•Χ Χ‘Χ˜Χ•Χ— ΧœΧ©Χ•Χ—Χ— גם Χ”Χ—ΧœΧ§Χ™Χ השונים של גצמך Χ•ΧœΧ€ΧͺΧ— Χ”Χ‘Χ Χ” Χ’Χ¦ΧžΧ™Χͺ Χ’ΧžΧ•Χ§Χ” Χ™Χ•ΧͺΧ¨
</div>
<div style="background-color: #e8f5e8; border: 1px solid #4caf50; padding: 10px; margin: 10px 0; border-radius: 5px; text-align: center;">
<strong>{status_message}</strong>
</div>
""")
# Main interface areas
with gr.Column():
# Step 1: Initial context gathering
with gr.Group(visible=True) as initial_step:
gr.Markdown("## Χ©ΧœΧ‘ 1: Χ‘Χ€Χ¨/Χ‘Χ€Χ¨Χ™ גל גצמך", elem_classes=["hebrew-text"])
initial_prompts = get_initial_prompts()
initial_choice = gr.Radio(
choices=[
("Χͺאר/Χͺארי אΧͺ גצמך כאדם", "describe_self"),
("ΧΧ™Χš אΧͺΧ” Χ—Χ•Χ©Χ‘ שאחרים רואים אוΧͺך?", "self_perception"),
("איזה אΧͺΧ’Χ¨ אΧͺΧ” Χ—Χ•Χ•Χ” Χ’Χ›Χ©Χ™Χ• בחיים?", "current_challenge")
],
label="Χ‘Χ—Χ¨/Χ‘Χ—Χ¨Χ™ נושא ΧœΧ©Χ™ΧͺΧ•Χ£:",
elem_classes=["hebrew-text"]
)
user_context_input = gr.Textbox(
label="Χ‘Χ€Χ¨/Χ‘Χ€Χ¨Χ™ Χ‘Χ›ΧžΧ” ΧžΧ©Χ€Χ˜Χ™Χ:",
placeholder="Χ›ΧͺΧ•Χ‘/Χ›ΧͺΧ‘Χ™ Χ›ΧΧŸ אΧͺ Χ”ΧžΧ—Χ©Χ‘Χ•Χͺ שלך...",
lines=4,
elem_classes=["hebrew-text"]
)
continue_to_parts = gr.Button("Χ”ΧžΧ©Χš ΧœΧ‘Χ—Χ™Χ¨Χͺ Χ—ΧœΧ§ Χ€Χ Χ™ΧžΧ™", variant="primary")
# Step 2: Part selection
with gr.Group(visible=False) as parts_step:
gr.Markdown("## Χ©ΧœΧ‘ 2: Χ‘Χ—Χ¨/Χ‘Χ—Χ¨Χ™ Χ—ΧœΧ§ Χ€Χ Χ™ΧžΧ™ ΧœΧ©Χ™Χ—Χ”", elem_classes=["hebrew-text"])
part_selection = gr.Radio(
choices=[
("Χ”Χ§Χ•Χœ Χ”Χ‘Χ™Χ§Χ•Χ¨ΧͺΧ™ - Χ”Χ—ΧœΧ§ Χ©ΧžΧ Χ‘Χ” ΧœΧ”Χ’ΧŸ Χ’ΧœΧ™Χš גל Χ™Χ“Χ™ Χ‘Χ™Χ§Χ•Χ¨Χͺ Χ•Χ”Χ›Χ•Χ•Χ Χ”", "Χ”Χ§Χ•Χœ Χ”Χ‘Χ™Χ§Χ•Χ¨ΧͺΧ™"),
("Χ”Χ™ΧœΧ“/Χ” Χ”Χ€Χ Χ™ΧžΧ™Χͺ - Χ”Χ—ΧœΧ§ Χ”Χ€Χ’Χ™Χ’, Χ”Χ¦Χ’Χ™Χ¨ Χ•Χ”ΧΧžΧ™ΧͺΧ™ שלך", "Χ”Χ™ΧœΧ“/Χ” Χ”Χ€Χ Χ™ΧžΧ™Χͺ"),
("Χ”ΧžΧ¨Χ¦Χ” - Χ”Χ—ΧœΧ§ Χ©Χ¨Χ•Χ¦Χ” Χ©Χ›Χ•ΧœΧ Χ™Χ”Χ™Χ• ΧžΧ¨Χ•Χ¦Χ™Χ", "Χ”ΧžΧ¨Χ¦Χ”"),
("Χ”ΧžΧ’ΧŸ - Χ”Χ—ΧœΧ§ Χ”Χ—Χ–Χ§ Χ©ΧžΧ’ΧŸ Χ’ΧœΧ™Χš ΧžΧ€Χ Χ™ Χ€Χ’Χ™Χ’Χ•Χͺ", "Χ”ΧžΧ’ΧŸ"),
("Χ”Χ ΧžΧ Χ’/Χͺ - Χ”Χ—ΧœΧ§ Χ©ΧžΧ’Χ“Χ™Χ£ ΧœΧ”Χ™ΧžΧ Χ’ ΧžΧžΧ¦Χ‘Χ™Χ מאΧͺגרים", "Χ”Χ ΧžΧ Χ’/Χͺ")
],
label="איזה Χ—ΧœΧ§ Χ€Χ Χ™ΧžΧ™ ΧͺΧ¨Χ¦Χ” ΧœΧ€Χ’Χ•Χ©?",
elem_classes=["hebrew-text"]
)
# Customization options
with gr.Accordion("Χ”ΧͺΧΧžΧ” אישיΧͺ (ΧΧ•Χ€Χ¦Χ™Χ•Χ ΧœΧ™)", open=False):
persona_name = gr.Textbox(
label="שם ΧœΧ—ΧœΧ§ Χ”Χ–Χ”:",
placeholder="למשל: Χ“Χ Χ™, ΧžΧ™Χ›Χœ, אבי...",
elem_classes=["hebrew-text"]
)
persona_age = gr.Textbox(
label="Χ’Χ™Χœ או ΧͺΧ§Χ•Χ€Χͺ חיים:",
placeholder="למשל: Χ™ΧœΧ“/Χ”, מΧͺΧ‘Χ’Χ¨/Χͺ, Χ‘Χ•Χ’Χ¨/Χͺ...",
elem_classes=["hebrew-text"]
)
persona_style = gr.Textbox(
label="Χ‘Χ’Χ Χ•ΧŸ Χ“Χ™Χ‘Χ•Χ¨ ΧžΧ™Χ•Χ—Χ“:",
placeholder="למשל: Χ¨Χ’Χ•Χ©, Χ¨Χ¦Χ™Χ Χ™, משגשג...",
elem_classes=["hebrew-text"]
)
start_conversation = gr.Button("Χ”ΧͺΧ—Χœ Χ©Χ™Χ—Χ”", variant="primary")
# Step 3: Conversation interface
with gr.Group(visible=False) as conversation_step:
gr.Markdown("## Χ©Χ™Χ—Χ” גם Χ”Χ—ΧœΧ§ Χ”Χ€Χ Χ™ΧžΧ™ שלך", elem_classes=["hebrew-text"])
current_part_display = gr.Markdown("", elem_classes=["hebrew-text"])
# Chat interface
with gr.Row():
with gr.Column(scale=4):
chatbot = gr.Chatbot(
height=400,
label="Χ”Χ©Χ™Χ—Χ” שלך",
elem_classes=["hebrew-text"],
rtl=True
)
msg_input = gr.Textbox(
label="Χ”Χ”Χ•Χ“Χ’Χ” שלך:",
placeholder="Χ›ΧͺΧ•Χ‘/Χ›ΧͺΧ‘Χ™ אΧͺ Χ”ΧžΧ—Χ©Χ‘Χ•Χͺ שלך Χ›ΧΧŸ...",
lines=2,
elem_classes=["hebrew-text"]
)
with gr.Row():
send_btn = gr.Button("Χ©ΧœΧ—", variant="primary")
clear_btn = gr.Button("Χ Χ§Χ” Χ©Χ™Χ—Χ”")
with gr.Column(scale=1):
gr.Markdown("### Χ€Χ’Χ•ΧœΧ•Χͺ Χ Χ•Χ‘Χ€Χ•Χͺ", elem_classes=["hebrew-text"])
change_part_btn = gr.Button("Χ”Χ—ΧœΧ£ Χ—ΧœΧ§ Χ€Χ Χ™ΧžΧ™")
restart_btn = gr.Button("Χ”ΧͺΧ—Χœ ΧžΧ—Χ“Χ©")
# Event handlers
def process_initial_context(choice, context, state):
"""Process initial context and move to part selection"""
if not choice or not context.strip():
gr.Warning("אנא Χ‘Χ—Χ¨ נושא Χ•Χ›ΧͺΧ‘ ΧžΧ©Χ”Χ• Χ›Χ“Χ™ ΧœΧ”ΧžΧ©Χ™Χš")
return state, gr.update(), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
state = self.conversation_manager.set_initial_context(state, choice, context)
return (
state,
gr.update(),
gr.update(visible=False),
gr.update(visible=True),
gr.update(visible=False)
)
def start_chat(part, p_name, p_age, p_style, state):
"""Start the conversation with selected part"""
if not part:
gr.Warning("אנא Χ‘Χ—Χ¨ Χ—ΧœΧ§ Χ€Χ Χ™ΧžΧ™ Χ›Χ“Χ™ ΧœΧ”ΧͺΧ—Χ™Χœ")
return state, gr.update(), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update()
state = self.conversation_manager.set_selected_part(
state, part, p_name.strip() if p_name else None,
p_age.strip() if p_age else None, p_style.strip() if p_style else None
)
part_info = DEFAULT_PARTS.get(part, {})
display_name = (p_name.strip() if p_name else None) or part_info.get("default_persona_name", "Χ—ΧœΧ§ Χ€Χ Χ™ΧžΧ™")
display_text = f"πŸ—£οΈ Χ›Χ’Χͺ אΧͺΧ” מΧͺΧ©Χ•Χ—Χ— גם: **{display_name}** ({part})"
return (
state,
display_text,
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=True),
[]
)
def handle_message(message, history, state):
"""Handle user message and generate response"""
if not message.strip():
return "", history, state
# Generate response
response = self.generate_response(message, state)
# Update conversation state
state = self.conversation_manager.add_to_history(state, message, response)
# Update history for display
history.append([message, response])
return "", history, state
def clear_conversation(state):
"""Clear conversation history"""
state = self.conversation_manager.clear_conversation(state)
return [], state
def change_part():
"""Return to part selection"""
return (
gr.update(visible=False),
gr.update(visible=True),
gr.update(visible=False)
)
def restart_completely():
"""Restart the entire session"""
new_state = self.conversation_manager.create_new_session()
return (
new_state,
gr.update(visible=True),
gr.update(visible=False),
gr.update(visible=False),
[],
"",
"",
None,
None,
"",
"",
""
)
# Wire up event handlers
continue_to_parts.click(
fn=process_initial_context,
inputs=[initial_choice, user_context_input, conversation_state],
outputs=[conversation_state, current_part_display, initial_step, parts_step, conversation_step]
)
start_conversation.click(
fn=start_chat,
inputs=[part_selection, persona_name, persona_age, persona_style, conversation_state],
outputs=[conversation_state, current_part_display, initial_step, parts_step, conversation_step, chatbot]
)
# Chat message handling
msg_input.submit(
fn=handle_message,
inputs=[msg_input, chatbot, conversation_state],
outputs=[msg_input, chatbot, conversation_state]
)
send_btn.click(
fn=handle_message,
inputs=[msg_input, chatbot, conversation_state],
outputs=[msg_input, chatbot, conversation_state]
)
clear_btn.click(
fn=clear_conversation,
inputs=[conversation_state],
outputs=[chatbot, conversation_state]
)
change_part_btn.click(
fn=change_part,
outputs=[conversation_step, parts_step, initial_step]
)
restart_btn.click(
fn=restart_completely,
outputs=[conversation_state, initial_step, parts_step, conversation_step, chatbot,
user_context_input, current_part_display, initial_choice, part_selection,
persona_name, persona_age, persona_style]
)
return demo
def main():
"""Main function to launch the application"""
logger.info("Starting ΧžΧ¨ΧΧ•Χͺ application...")
try:
app = MirautrApp()
demo = app.create_main_interface()
# Check environment
is_hf_spaces = os.getenv("SPACE_ID") is not None
logger.info(f"Launching app... HF Spaces: {is_hf_spaces}")
# Unified launch configuration for both environments
# This ensures identical experience in both local and HF Spaces
launch_config = {
"show_error": True,
"show_api": False, # Disable API docs to avoid schema issues
"favicon_path": None,
"auth": None,
"enable_queue": False, # Disable queue to prevent schema issues
"max_threads": 1 # Limit threads for stability
}
if is_hf_spaces:
# HF Spaces specific settings
logger.info("Configuring for HF Spaces deployment")
launch_config.update({
"server_name": "0.0.0.0",
"server_port": 7860,
"share": False, # HF Spaces handles public access
"quiet": True
})
else:
# Local development settings
logger.info("Configuring for local development")
# Try to find an available port
default_port = int(os.getenv("GRADIO_SERVER_PORT", "7861"))
available_port = default_port
# Check if port is available, if not find next available
import socket
for port_try in range(default_port, default_port + 10):
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', port_try))
available_port = port_try
break
except OSError:
continue
logger.info(f"Using port {available_port} for local development")
launch_config.update({
"server_name": "127.0.0.1",
"server_port": available_port,
"share": True, # Enable share for local testing to avoid localhost issues
"inbrowser": True, # Auto-open browser
"quiet": False
})
demo.launch(**launch_config)
except Exception as e:
logger.error(f"Failed to start application: {e}")
raise
if __name__ == "__main__":
main()