|
|
|
|
|
""" |
|
|
ΧΧ¨ΧΧΧͺ (Mirrors) - Hebrew Self-Reflective AI Agent |
|
|
Main application file with Gradio interface |
|
|
""" |
|
|
|
|
|
import gradio as gr |
|
|
import torch |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
import logging |
|
|
import sys |
|
|
from typing import List, Tuple, Optional |
|
|
import os |
|
|
import random |
|
|
|
|
|
|
|
|
from prompt_engineering import ( |
|
|
DEFAULT_PARTS, |
|
|
get_system_prompt, |
|
|
get_initial_prompts, |
|
|
get_part_selection_text |
|
|
) |
|
|
from conversation_manager import ConversationManager, ConversationState |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class MirautrApp: |
|
|
"""Main application class for ΧΧ¨ΧΧΧͺ""" |
|
|
|
|
|
def __init__(self): |
|
|
self.model = None |
|
|
self.tokenizer = None |
|
|
self.generator = None |
|
|
self.conversation_manager = ConversationManager() |
|
|
self.model_available = False |
|
|
self.setup_model() |
|
|
|
|
|
def setup_model(self): |
|
|
"""Initialize a Hebrew-capable model with proper fallback""" |
|
|
try: |
|
|
|
|
|
is_hf_spaces = os.getenv("SPACE_ID") is not None |
|
|
is_test_mode = os.getenv("FORCE_LIGHT_MODEL") is not None |
|
|
|
|
|
logger.info(f"Environment: HF_Spaces={is_hf_spaces}, Test_Mode={is_test_mode}") |
|
|
|
|
|
|
|
|
model_name = None |
|
|
|
|
|
if is_test_mode: |
|
|
|
|
|
logger.info("Test mode - will use template-based responses primarily") |
|
|
self.model_available = False |
|
|
return |
|
|
elif is_hf_spaces: |
|
|
|
|
|
try: |
|
|
model_name = "microsoft/DialoGPT-small" |
|
|
logger.info(f"HF Spaces: Attempting to load {model_name}") |
|
|
except: |
|
|
logger.info("HF Spaces: Model loading failed, using template responses") |
|
|
self.model_available = False |
|
|
return |
|
|
else: |
|
|
|
|
|
possible_models = [ |
|
|
"microsoft/DialoGPT-medium", |
|
|
"microsoft/DialoGPT-small" |
|
|
] |
|
|
|
|
|
for model in possible_models: |
|
|
try: |
|
|
model_name = model |
|
|
logger.info(f"Local: Attempting to load {model_name}") |
|
|
break |
|
|
except: |
|
|
continue |
|
|
|
|
|
if not model_name: |
|
|
logger.info("Local: No suitable model found, using template responses") |
|
|
self.model_available = False |
|
|
return |
|
|
|
|
|
|
|
|
if model_name: |
|
|
self.tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
if self.tokenizer.pad_token is None: |
|
|
self.tokenizer.pad_token = self.tokenizer.eos_token |
|
|
|
|
|
|
|
|
self.model = AutoModelForCausalLM.from_pretrained( |
|
|
model_name, |
|
|
torch_dtype=torch.float32, |
|
|
low_cpu_mem_usage=True |
|
|
) |
|
|
|
|
|
self.generator = pipeline( |
|
|
"text-generation", |
|
|
model=self.model, |
|
|
tokenizer=self.tokenizer, |
|
|
max_new_tokens=50, |
|
|
temperature=0.7, |
|
|
do_sample=True, |
|
|
pad_token_id=self.tokenizer.pad_token_id, |
|
|
return_full_text=False |
|
|
) |
|
|
|
|
|
self.model_available = True |
|
|
logger.info(f"Model loaded successfully: {model_name}") |
|
|
|
|
|
except Exception as e: |
|
|
logger.warning(f"Model loading failed: {e}") |
|
|
logger.info("Falling back to template-based responses") |
|
|
self.model_available = False |
|
|
|
|
|
def generate_persona_response(self, user_message: str, conversation_state: ConversationState) -> str: |
|
|
""" |
|
|
Generate persona-based response using templates with personality variations |
|
|
This is our primary response system that always works |
|
|
""" |
|
|
part_info = DEFAULT_PARTS.get(conversation_state.selected_part, {}) |
|
|
persona_name = conversation_state.persona_name or part_info.get("default_persona_name", "ΧΧΧ§ Χ€Χ ΧΧΧ") |
|
|
|
|
|
|
|
|
recent_context = "" |
|
|
if conversation_state.conversation_history: |
|
|
|
|
|
last_messages = conversation_state.conversation_history[-4:] |
|
|
recent_context = " ".join([msg["content"] for msg in last_messages]) |
|
|
|
|
|
|
|
|
if conversation_state.selected_part == "ΧΧ§ΧΧ ΧΧΧΧ§ΧΧ¨ΧͺΧ": |
|
|
responses = [ |
|
|
f"ΧΧ Χ {persona_name}, ΧΧ§ΧΧ ΧΧΧΧ§ΧΧ¨ΧͺΧ Χ©ΧΧ. Χ©ΧΧ’ΧͺΧ ΧΧ Χ©ΧΧΧ¨Χͺ Χ’Χ '{user_message}' - ΧΧ Χ ΧΧΧ©Χ Χ©Χ¦Χ¨ΧΧ ΧΧΧΧΧ ΧΧͺ ΧΧ ΧΧΧͺΧ¨ ΧΧ’ΧΧΧ§. ΧΧ ΧΧΧΧͺ Χ’ΧΧΧ ΧΧΧΧΧ¨Χ ΧΧΧΧ©ΧΧΧͺ ΧΧΧΧ?", |
|
|
f"ΧΧ Χ {persona_name}. ΧΧ Χ©ΧΧΧ¨Χͺ ΧΧ’ΧΧ¨Χ¨ ΧΧ Χ©ΧΧΧΧͺ. '{user_message}' - ΧΧΧ ΧΧΧ ΧΧ ΧΧΧΧͺ ΧΧΧ¦Χ ΧΧΧΧ? ΧΧΧΧ ΧΧ© ΧΧΧ ΧΧΧ¨ΧΧ Χ©ΧΧͺΧ ΧΧ Χ¨ΧΧΧ?", |
|
|
f"ΧΧ {persona_name} ΧΧΧΧ¨. ΧΧ Χ Χ©ΧΧΧ’ ΧΧΧͺΧ ΧΧΧΧ¨ '{user_message}', ΧΧΧ ΧΧ Χ ΧΧ¨ΧΧΧ© Χ©ΧΧ ΧΧ Χ Χ¦Χ¨ΧΧΧΧ ΧΧΧΧΧͺ ΧΧΧͺΧ¨ ΧΧΧ§ΧΧ¨ΧͺΧΧΧ ΧΧΧ. ΧΧ ΧΧͺΧ ΧΧ ΧΧ‘Χ€Χ¨ ΧΧ’Χ¦ΧΧ?", |
|
|
f"ΧΧ Χ {persona_name}, ΧΧΧ Χ ΧΧΧ ΧΧΧ ΧΧ’ΧΧΧ¨ ΧΧ ΧΧ¨ΧΧΧͺ ΧΧͺ ΧΧͺΧΧΧ Χ ΧΧΧΧΧ. ΧΧ Χ©ΧΧΧ¨Χͺ Χ’Χ '{user_message}' - ΧΧ Χ¨Χ§ ΧΧ¦Χ ΧΧΧ‘ΧΧ€ΧΧ¨, ΧΧ? ΧΧΧΧ Χ Χ ΧΧ€ΧΧ¨ Χ’ΧΧΧ§ ΧΧΧͺΧ¨." |
|
|
] |
|
|
|
|
|
elif conversation_state.selected_part == "ΧΧΧΧ/Χ ΧΧ€Χ ΧΧΧΧͺ": |
|
|
responses = [ |
|
|
f"ΧΧ Χ {persona_name}, ΧΧΧΧ/Χ ΧΧ€Χ ΧΧΧΧͺ Χ©ΧΧ. ΧΧ Χ©ΧΧΧ¨Χͺ Χ’Χ '{user_message}' ΧΧΧ¨Χ ΧΧ ΧΧΧ¨ΧΧΧ©... Χ§Χ¦Χͺ Χ€ΧΧΧ’. ΧΧͺΧ ΧΧΧΧͺ Χ©ΧΧΧ’ ΧΧΧͺΧ Χ’ΧΧ©ΧΧ?", |
|
|
f"ΧΧ {persona_name}. '{user_message}' - ΧΧ ΧΧΧΧΧ ΧΧΧͺΧ Χ§Χ¦Χͺ. ΧΧ Χ Χ¦Χ¨ΧΧ ΧΧΧ’Χͺ Χ©ΧΧΧ ΧΧΧΧ ΧΧ‘ΧΧ¨. ΧΧͺΧ ΧΧΧΧ ΧΧΧ¨ΧΧΧ’ ΧΧΧͺΧ?", |
|
|
f"ΧΧ Χ {persona_name}, ΧΧΧΧ§ ΧΧ¦Χ’ΧΧ¨ Χ©ΧΧ. ΧΧ Χ©ΧΧΧ¨Χͺ Χ ΧΧΧ’ ΧΧΧ Χ©ΧΧ. '{user_message}' - ΧΧ Χ ΧΧ¨ΧΧΧ© Χ©ΧΧ© ΧΧΧ ΧΧ©ΧΧ ΧΧ©ΧΧ Χ©ΧΧ Χ Χ¦Χ¨ΧΧ ΧΧΧΧΧ.", |
|
|
f"ΧΧ {persona_name} ΧΧΧΧ¨ ΧΧ©Χ§Χ. ΧΧ Χ Χ©ΧΧΧ’ ΧΧͺ '{user_message}' ΧΧΧ ΧΧ’ΧΧ¨Χ¨ ΧΧ Χ¨ΧΧ©ΧΧͺ. ΧΧΧ ΧΧ ΧΧΧΧ ΧΧΧ©ΧΧ Χ’Χ ΧΧ? ΧΧ Χ Χ§Χ¦Χͺ ΧΧ¨Χ." |
|
|
] |
|
|
|
|
|
elif conversation_state.selected_part == "ΧΧΧ¨Χ¦Χ": |
|
|
responses = [ |
|
|
f"ΧΧ Χ {persona_name}, ΧΧΧ¨Χ¦Χ Χ©ΧΧ. Χ©ΧΧ’ΧͺΧ ΧΧͺ '{user_message}' ΧΧΧ Χ Χ¨ΧΧ¦Χ ΧΧΧΧΧ Χ©ΧΧΧΧ ΧΧΧΧ ΧΧ‘ΧΧ¨ Χ’Χ ΧΧ. ΧΧΧ ΧΧ ΧΧ Χ ΧΧΧΧΧΧ ΧΧ€ΧͺΧΧ¨ ΧΧͺ ΧΧ ΧΧ¦ΧΧ¨Χ Χ©ΧͺΧ¨Χ¦Χ ΧΧͺ ΧΧΧΧ?", |
|
|
f"ΧΧ {persona_name}. ΧΧ Χ©ΧΧΧ¨Χͺ Χ’Χ '{user_message}' ΧΧΧ¨Χ ΧΧ ΧΧΧΧΧ - ΧΧΧ ΧΧ ΧΧΧΧ ΧΧ€ΧΧΧ’ ΧΧΧΧ©ΧΧ? ΧΧΧΧ Χ Χ ΧΧ¦Χ ΧΧ¨Χ Χ’ΧΧΧ Χ ΧΧΧͺΧ¨ ΧΧΧͺΧΧΧΧ Χ’Χ ΧΧ.", |
|
|
f"ΧΧ Χ {persona_name}, ΧΧΧ Χ Χ¨ΧΧ¦Χ Χ©ΧΧΧΧ ΧΧΧΧ ΧΧ¨ΧΧ¦ΧΧ ΧΧΧ. '{user_message}' - ΧΧ Χ Χ©ΧΧ’ ΧΧΧ ΧΧ©ΧΧ Χ©ΧΧΧΧ ΧΧΧ¦ΧΧ¨ ΧΧͺΧ. ΧΧΧ Χ ΧΧΧ ΧΧ’Χ©ΧΧͺ ΧΧͺ ΧΧ ΧΧ¦ΧΧ¨Χ Χ©ΧΧΧΧ ΧΧΧΧΧ?", |
|
|
f"ΧΧ {persona_name} ΧΧΧΧ¨. ΧΧ Χ Χ©ΧΧΧ’ ΧΧͺ '{user_message}' ΧΧΧΧ ΧΧ Χ ΧΧΧ©Χ - ΧΧ ΧΧΧ¨ΧΧ ΧΧΧΧΧ Χ’Χ ΧΧ? ΧΧΧΧ Χ Χ ΧΧΧΧ Χ©ΧΧ ΧΧ Χ ΧΧ Χ€ΧΧΧ’ΧΧ ΧΧΧ£ ΧΧΧ." |
|
|
] |
|
|
|
|
|
elif conversation_state.selected_part == "ΧΧΧΧ": |
|
|
responses = [ |
|
|
f"ΧΧ Χ {persona_name}, ΧΧΧΧ Χ©ΧΧ. '{user_message}' - ΧΧ Χ ΧΧ’Χ¨ΧΧ ΧΧͺ ΧΧΧ¦Χ. ΧΧΧ ΧΧ ΧΧΧΧ? ΧΧ Χ ΧΧΧ ΧΧΧ ΧΧ©ΧΧΧ¨ Χ’ΧΧΧ ΧΧΧ ΧΧ Χ©ΧΧΧΧ ΧΧ€ΧΧΧ’ ΧΧ.", |
|
|
f"ΧΧ {persona_name}. Χ©ΧΧ’ΧͺΧ ΧΧ Χ©ΧΧΧ¨Χͺ Χ’Χ '{user_message}' ΧΧΧ Χ ΧΧΧ ΧΧΧΧ Χ ΧΧͺ. ΧΧ ΧΧΧΧΧΧΧ ΧΧΧ? ΧΧΧ ΧΧ Χ ΧΧΧΧ ΧΧΧΧ Χ’ΧΧΧ ΧΧΧ ΧΧΧͺΧ¨?", |
|
|
f"ΧΧ Χ {persona_name}, ΧΧ©ΧΧΧ¨ Χ©ΧΧ. ΧΧ Χ©ΧΧΧ¨Χͺ ΧΧ’ΧΧ¨Χ¨ ΧΧ ΧΧͺ ΧΧΧΧ Χ‘ΧΧΧ Χ§ΧΧΧ ΧΧΧΧ ΧΧΧ. '{user_message}' - ΧΧΧΧ Χ Χ ΧΧΧΧ Χ©ΧΧͺΧ ΧΧΧ§ ΧΧ‘Χ€ΧΧ§ ΧΧΧͺΧΧΧΧ Χ’Χ ΧΧ.", |
|
|
f"ΧΧ {persona_name} ΧΧΧΧ¨. ΧΧ Χ Χ©ΧΧΧ’ ΧΧͺ '{user_message}' ΧΧΧ Χ ΧΧΧ©Χ Χ’Χ ΧΧ‘ΧΧ¨ΧΧΧΧΧͺ ΧΧΧ Χ. ΧΧ ΧΧ ΧΧ Χ Χ¦Χ¨ΧΧΧΧ ΧΧ’Χ©ΧΧͺ ΧΧΧ Χ©ΧͺΧΧΧ ΧΧΧΧ?" |
|
|
] |
|
|
|
|
|
elif conversation_state.selected_part == "ΧΧ ΧΧ Χ’/Χͺ": |
|
|
responses = [ |
|
|
f"ΧΧ Χ {persona_name}, ΧΧ ΧΧ Χ’/Χͺ Χ©ΧΧ. ΧΧ Χ©ΧΧΧ¨Χͺ Χ’Χ '{user_message}' ΧΧΧ¨Χ ΧΧ ΧΧ¨Χ¦ΧΧͺ ΧΧΧΧ‘ΧΧ Χ§Χ¦Χͺ. ΧΧΧΧ... ΧΧ ΧΧΧΧΧΧ ΧΧΧͺΧΧΧΧ Χ’Χ ΧΧ Χ’ΧΧ©ΧΧ?", |
|
|
f"ΧΧ {persona_name}. '{user_message}' - ΧΧ Χ Χ©ΧΧ’ ΧΧΧ¨ΧΧ ΧΧΧ€ΧΧΧ. ΧΧΧ ΧΧ© ΧΧ¨Χ ΧΧΧΧΧ Χ’ ΧΧΧ? ΧΧ€Χ’ΧΧΧ Χ’ΧΧΧ£ ΧΧ ΧΧΧΧΧ Χ‘ ΧΧΧ¦ΧΧΧ Χ§Χ©ΧΧ.", |
|
|
f"ΧΧ Χ {persona_name}, ΧΧΧ Χ ΧΧ¨ΧΧΧ© Χ§Χ¦Χͺ ΧΧ¨ΧΧ Χ'{user_message}'. ΧΧΧΧ Χ Χ ΧΧΧΧ¨ ΧΧΧ ΧΧΧ¨ ΧΧ? ΧΧΧΧ Χ’ΧΧ©ΧΧ ΧΧ ΧΧ ΧΧΧΧ ΧΧΧͺΧΧΧ.", |
|
|
f"ΧΧ {persona_name} ΧΧΧΧ¨ ΧΧΧΧΧ¨ΧΧͺ. ΧΧ Χ©ΧΧΧ¨Χͺ ΧΧ’ΧΧ¨Χ¨ ΧΧ Χ¨Χ¦ΧΧ ΧΧΧ¨ΧΧ. '{user_message}' - ΧΧΧ ΧΧΧΧͺ Χ¦Χ¨ΧΧ ΧΧΧͺΧΧΧΧ Χ’Χ ΧΧ Χ’ΧΧ©ΧΧ?" |
|
|
] |
|
|
|
|
|
else: |
|
|
responses = [ |
|
|
f"ΧΧ Χ {persona_name}, ΧΧΧ§ Χ€Χ ΧΧΧ Χ©ΧΧ. Χ©ΧΧ’ΧͺΧ ΧΧͺ '{user_message}' ΧΧΧ Χ ΧΧΧ ΧΧΧ ΧΧ©ΧΧΧ ΧΧΧͺΧ Χ’Χ ΧΧ. ΧΧ Χ’ΧΧ ΧΧͺΧ ΧΧ¨ΧΧΧ© ΧΧΧΧ ΧΧΧ¦Χ ΧΧΧ?", |
|
|
f"ΧΧ {persona_name}. ΧΧ Χ©ΧΧΧ¨Χͺ ΧΧ’Χ ΧΧΧ ΧΧΧͺΧ. '{user_message}' - ΧΧΧΧ Χ Χ ΧΧ§ΧΧ¨ ΧΧͺ ΧΧ ΧΧΧ ΧΧ ΧΧΧ ΧΧ ΧΧ ΧΧΧΧ¨ Χ’ΧΧΧ.", |
|
|
f"ΧΧ Χ {persona_name}, ΧΧΧ Χ Χ¨ΧΧ¦Χ ΧΧΧΧΧ ΧΧΧͺΧ ΧΧΧ ΧΧΧͺΧ¨. '{user_message}' - ΧΧΧ ΧΧ ΧΧ©Χ€ΧΧ’ Χ’ΧΧΧ ΧΧ¨ΧΧ ΧΧ¨ΧΧ©ΧΧͺ?", |
|
|
f"ΧΧ {persona_name} ΧΧΧΧ¨. ΧΧ Χ Χ©ΧΧΧ’ ΧΧͺ '{user_message}' ΧΧΧ Χ Χ‘Χ§Χ¨Χ ΧΧΧ’Χͺ ΧΧΧͺΧ¨. ΧΧ Χ’ΧΧ ΧΧ© ΧΧ ΧΧ ΧΧ©Χ ΧΧΧ?" |
|
|
] |
|
|
|
|
|
|
|
|
if "Χ€ΧΧ" in user_message or "ΧΧ¨ΧΧ" in user_message: |
|
|
|
|
|
selected_response = responses[1] if len(responses) > 1 else responses[0] |
|
|
elif "ΧΧ’Χ‘" in user_message or "ΧΧ¨ΧΧΧ© Χ¨Χ’" in user_message: |
|
|
|
|
|
selected_response = responses[2] if len(responses) > 2 else responses[0] |
|
|
else: |
|
|
|
|
|
selected_response = random.choice(responses) |
|
|
|
|
|
|
|
|
if conversation_state.user_context and len(conversation_state.conversation_history) < 4: |
|
|
selected_response += f" ΧΧΧΧ¨ Χ©ΧΧΧ¨Χͺ ΧΧΧͺΧΧΧ: {conversation_state.user_context[:100]}..." |
|
|
|
|
|
return selected_response |
|
|
|
|
|
def generate_response(self, user_message: str, conversation_state: ConversationState) -> str: |
|
|
""" |
|
|
Generate AI response - uses persona templates as primary with optional model enhancement |
|
|
""" |
|
|
try: |
|
|
if not conversation_state.selected_part: |
|
|
return "ΧΧ Χ Χ¦Χ¨ΧΧ Χ©ΧͺΧΧΧ¨ ΧΧΧ§ Χ€Χ ΧΧΧ ΧΧΧ ΧΧ©ΧΧΧ ΧΧΧͺΧ." |
|
|
|
|
|
|
|
|
persona_response = self.generate_persona_response(user_message, conversation_state) |
|
|
|
|
|
|
|
|
if self.model_available and self.generator: |
|
|
try: |
|
|
|
|
|
english_prompt = f"User said they feel: {user_message[:50]}. Respond supportively in 1-2 sentences:" |
|
|
|
|
|
model_output = self.generator(english_prompt, max_new_tokens=30, temperature=0.7) |
|
|
|
|
|
if model_output and len(model_output) > 0: |
|
|
|
|
|
model_text = model_output[0]["generated_text"].strip() |
|
|
|
|
|
logger.info(f"Model provided contextual input: {model_text[:50]}...") |
|
|
|
|
|
except Exception as model_error: |
|
|
logger.warning(f"Model enhancement failed: {model_error}") |
|
|
|
|
|
|
|
|
|
|
|
return persona_response |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Error generating response: {e}") |
|
|
return "Χ‘ΧΧΧΧ, ΧΧΧΧ Χ Χ Χ Χ‘Χ Χ©ΧΧ. ΧΧΧ ΧΧͺΧ ΧΧ¨ΧΧΧ© Χ’ΧΧ©ΧΧ?" |
|
|
|
|
|
def create_main_interface(self): |
|
|
"""Create the main Gradio interface""" |
|
|
|
|
|
|
|
|
css = """ |
|
|
.rtl { |
|
|
direction: rtl; |
|
|
text-align: right; |
|
|
} |
|
|
.hebrew-text { |
|
|
font-family: 'Segoe UI', Tahoma, Arial, sans-serif; |
|
|
direction: rtl; |
|
|
text-align: right; |
|
|
} |
|
|
.welcome-text { |
|
|
font-size: 24px; |
|
|
font-weight: bold; |
|
|
color: #2c5aa0; |
|
|
margin: 20px 0; |
|
|
} |
|
|
""" |
|
|
|
|
|
with gr.Blocks(css=css, title="ΧΧ¨ΧΧΧͺ - ΧΧ¨ΧΧ ΧΧΧ©Χ ΧΧ©ΧΧ Χ€Χ ΧΧΧ", theme=gr.themes.Soft()) as demo: |
|
|
|
|
|
|
|
|
conversation_state = gr.State(self.conversation_manager.create_new_session()) |
|
|
|
|
|
|
|
|
status_message = "π€ ΧΧ’Χ¨ΧΧͺ ΧͺΧΧΧΧΧͺ ΧΧΧͺΧΧΧͺ ΧΧΧ©ΧΧͺ Χ€Χ’ΧΧΧ" if not self.model_available else "π€ ΧΧ’Χ¨ΧΧͺ ΧΧΧΧ Χ’Χ ΧΧΧΧ AI Χ€Χ’ΧΧΧ" |
|
|
|
|
|
gr.HTML(f""" |
|
|
<div class="hebrew-text welcome-text" style="text-align: center;"> |
|
|
πͺ ΧΧ¨ΧΧΧͺ: ΧΧ¨ΧΧ ΧΧΧ©Χ ΧΧ©ΧΧ Χ€Χ ΧΧΧ ΧΧΧ€ΧͺΧ Χ’Χ Χ’Χ¦ΧΧ πͺ |
|
|
</div> |
|
|
<div class="hebrew-text" style="text-align: center; margin-bottom: 20px;"> |
|
|
ΧΧ§ΧΧ ΧΧΧΧ ΧΧ©ΧΧΧ Χ’Χ ΧΧΧΧ§ΧΧ ΧΧ©ΧΧ ΧΧ Χ©Χ Χ’Χ¦ΧΧ ΧΧΧ€ΧͺΧ ΧΧΧ Χ Χ’Χ¦ΧΧΧͺ Χ’ΧΧΧ§Χ ΧΧΧͺΧ¨ |
|
|
</div> |
|
|
<div style="background-color: #e8f5e8; border: 1px solid #4caf50; padding: 10px; margin: 10px 0; border-radius: 5px; text-align: center;"> |
|
|
<strong>{status_message}</strong> |
|
|
</div> |
|
|
""") |
|
|
|
|
|
|
|
|
with gr.Column(): |
|
|
|
|
|
|
|
|
with gr.Group(visible=True) as initial_step: |
|
|
gr.Markdown("## Χ©ΧΧ 1: Χ‘Χ€Χ¨/Χ‘Χ€Χ¨Χ Χ’Χ Χ’Χ¦ΧΧ", elem_classes=["hebrew-text"]) |
|
|
|
|
|
initial_prompts = get_initial_prompts() |
|
|
|
|
|
initial_choice = gr.Radio( |
|
|
choices=[ |
|
|
("ΧͺΧΧ¨/ΧͺΧΧ¨Χ ΧΧͺ Χ’Χ¦ΧΧ ΧΧΧΧ", "describe_self"), |
|
|
("ΧΧΧ ΧΧͺΧ ΧΧΧ©Χ Χ©ΧΧΧ¨ΧΧ Χ¨ΧΧΧΧ ΧΧΧͺΧ?", "self_perception"), |
|
|
("ΧΧΧΧ ΧΧͺΧΧ¨ ΧΧͺΧ ΧΧΧΧ Χ’ΧΧ©ΧΧ ΧΧΧΧΧ?", "current_challenge") |
|
|
], |
|
|
label="ΧΧΧ¨/ΧΧΧ¨Χ Χ ΧΧ©Χ ΧΧ©ΧΧͺΧΧ£:", |
|
|
elem_classes=["hebrew-text"] |
|
|
) |
|
|
|
|
|
user_context_input = gr.Textbox( |
|
|
label="Χ‘Χ€Χ¨/Χ‘Χ€Χ¨Χ ΧΧΧΧ ΧΧ©Χ€ΧΧΧ:", |
|
|
placeholder="ΧΧͺΧΧ/ΧΧͺΧΧ ΧΧΧ ΧΧͺ ΧΧΧΧ©ΧΧΧͺ Χ©ΧΧ...", |
|
|
lines=4, |
|
|
elem_classes=["hebrew-text"] |
|
|
) |
|
|
|
|
|
continue_to_parts = gr.Button("ΧΧΧ©Χ ΧΧΧΧΧ¨Χͺ ΧΧΧ§ Χ€Χ ΧΧΧ", variant="primary") |
|
|
|
|
|
|
|
|
with gr.Group(visible=False) as parts_step: |
|
|
gr.Markdown("## Χ©ΧΧ 2: ΧΧΧ¨/ΧΧΧ¨Χ ΧΧΧ§ Χ€Χ ΧΧΧ ΧΧ©ΧΧΧ", elem_classes=["hebrew-text"]) |
|
|
|
|
|
part_selection = gr.Radio( |
|
|
choices=[ |
|
|
("ΧΧ§ΧΧ ΧΧΧΧ§ΧΧ¨ΧͺΧ - ΧΧΧΧ§ Χ©ΧΧ Χ‘Χ ΧΧΧΧ Χ’ΧΧΧ Χ’Χ ΧΧΧ ΧΧΧ§ΧΧ¨Χͺ ΧΧΧΧΧΧ Χ", "ΧΧ§ΧΧ ΧΧΧΧ§ΧΧ¨ΧͺΧ"), |
|
|
("ΧΧΧΧ/Χ ΧΧ€Χ ΧΧΧΧͺ - ΧΧΧΧ§ ΧΧ€ΧΧΧ’, ΧΧ¦Χ’ΧΧ¨ ΧΧΧΧΧΧͺΧ Χ©ΧΧ", "ΧΧΧΧ/Χ ΧΧ€Χ ΧΧΧΧͺ"), |
|
|
("ΧΧΧ¨Χ¦Χ - ΧΧΧΧ§ Χ©Χ¨ΧΧ¦Χ Χ©ΧΧΧΧ ΧΧΧΧ ΧΧ¨ΧΧ¦ΧΧ", "ΧΧΧ¨Χ¦Χ"), |
|
|
("ΧΧΧΧ - ΧΧΧΧ§ ΧΧΧΧ§ Χ©ΧΧΧ Χ’ΧΧΧ ΧΧ€Χ Χ Χ€ΧΧΧ’ΧΧͺ", "ΧΧΧΧ"), |
|
|
("ΧΧ ΧΧ Χ’/Χͺ - ΧΧΧΧ§ Χ©ΧΧ’ΧΧΧ£ ΧΧΧΧΧ Χ’ ΧΧΧ¦ΧΧΧ ΧΧΧͺΧΧ¨ΧΧ", "ΧΧ ΧΧ Χ’/Χͺ") |
|
|
], |
|
|
label="ΧΧΧΧ ΧΧΧ§ Χ€Χ ΧΧΧ ΧͺΧ¨Χ¦Χ ΧΧ€ΧΧΧ©?", |
|
|
elem_classes=["hebrew-text"] |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Accordion("ΧΧͺΧΧΧ ΧΧΧ©ΧΧͺ (ΧΧΧ€Χ¦ΧΧΧ ΧΧ)", open=False): |
|
|
persona_name = gr.Textbox( |
|
|
label="Χ©Χ ΧΧΧΧ§ ΧΧΧ:", |
|
|
placeholder="ΧΧΧ©Χ: ΧΧ Χ, ΧΧΧΧ, ΧΧΧ...", |
|
|
elem_classes=["hebrew-text"] |
|
|
) |
|
|
persona_age = gr.Textbox( |
|
|
label="ΧΧΧ ΧΧ ΧͺΧ§ΧΧ€Χͺ ΧΧΧΧ:", |
|
|
placeholder="ΧΧΧ©Χ: ΧΧΧ/Χ, ΧΧͺΧΧΧ¨/Χͺ, ΧΧΧΧ¨/Χͺ...", |
|
|
elem_classes=["hebrew-text"] |
|
|
) |
|
|
persona_style = gr.Textbox( |
|
|
label="Χ‘ΧΧ ΧΧ ΧΧΧΧΧ¨ ΧΧΧΧΧ:", |
|
|
placeholder="ΧΧΧ©Χ: Χ¨ΧΧΧ©, Χ¨Χ¦ΧΧ Χ, ΧΧ©Χ’Χ©Χ’...", |
|
|
elem_classes=["hebrew-text"] |
|
|
) |
|
|
|
|
|
start_conversation = gr.Button("ΧΧͺΧΧ Χ©ΧΧΧ", variant="primary") |
|
|
|
|
|
|
|
|
with gr.Group(visible=False) as conversation_step: |
|
|
gr.Markdown("## Χ©ΧΧΧ Χ’Χ ΧΧΧΧ§ ΧΧ€Χ ΧΧΧ Χ©ΧΧ", elem_classes=["hebrew-text"]) |
|
|
|
|
|
current_part_display = gr.Markdown("", elem_classes=["hebrew-text"]) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=4): |
|
|
chatbot = gr.Chatbot( |
|
|
height=400, |
|
|
label="ΧΧ©ΧΧΧ Χ©ΧΧ", |
|
|
elem_classes=["hebrew-text"], |
|
|
rtl=True |
|
|
) |
|
|
|
|
|
msg_input = gr.Textbox( |
|
|
label="ΧΧΧΧΧ’Χ Χ©ΧΧ:", |
|
|
placeholder="ΧΧͺΧΧ/ΧΧͺΧΧ ΧΧͺ ΧΧΧΧ©ΧΧΧͺ Χ©ΧΧ ΧΧΧ...", |
|
|
lines=2, |
|
|
elem_classes=["hebrew-text"] |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
send_btn = gr.Button("Χ©ΧΧ", variant="primary") |
|
|
clear_btn = gr.Button("Χ Χ§Χ Χ©ΧΧΧ") |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### Χ€Χ’ΧΧΧΧͺ Χ ΧΧ‘Χ€ΧΧͺ", elem_classes=["hebrew-text"]) |
|
|
change_part_btn = gr.Button("ΧΧΧΧ£ ΧΧΧ§ Χ€Χ ΧΧΧ") |
|
|
restart_btn = gr.Button("ΧΧͺΧΧ ΧΧΧΧ©") |
|
|
|
|
|
|
|
|
def process_initial_context(choice, context, state): |
|
|
"""Process initial context and move to part selection""" |
|
|
if not choice or not context.strip(): |
|
|
gr.Warning("ΧΧ Χ ΧΧΧ¨ Χ ΧΧ©Χ ΧΧΧͺΧ ΧΧ©ΧΧ ΧΧΧ ΧΧΧΧ©ΧΧ") |
|
|
return state, gr.update(), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) |
|
|
|
|
|
state = self.conversation_manager.set_initial_context(state, choice, context) |
|
|
return ( |
|
|
state, |
|
|
gr.update(), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=True), |
|
|
gr.update(visible=False) |
|
|
) |
|
|
|
|
|
def start_chat(part, p_name, p_age, p_style, state): |
|
|
"""Start the conversation with selected part""" |
|
|
if not part: |
|
|
gr.Warning("ΧΧ Χ ΧΧΧ¨ ΧΧΧ§ Χ€Χ ΧΧΧ ΧΧΧ ΧΧΧͺΧΧΧ") |
|
|
return state, gr.update(), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update() |
|
|
|
|
|
state = self.conversation_manager.set_selected_part( |
|
|
state, part, p_name.strip() if p_name else None, |
|
|
p_age.strip() if p_age else None, p_style.strip() if p_style else None |
|
|
) |
|
|
|
|
|
part_info = DEFAULT_PARTS.get(part, {}) |
|
|
display_name = (p_name.strip() if p_name else None) or part_info.get("default_persona_name", "ΧΧΧ§ Χ€Χ ΧΧΧ") |
|
|
|
|
|
display_text = f"π£οΈ ΧΧ’Χͺ ΧΧͺΧ ΧΧͺΧ©ΧΧΧ Χ’Χ: **{display_name}** ({part})" |
|
|
|
|
|
return ( |
|
|
state, |
|
|
display_text, |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=True), |
|
|
[] |
|
|
) |
|
|
|
|
|
def handle_message(message, history, state): |
|
|
"""Handle user message and generate response""" |
|
|
if not message.strip(): |
|
|
return "", history, state |
|
|
|
|
|
|
|
|
response = self.generate_response(message, state) |
|
|
|
|
|
|
|
|
state = self.conversation_manager.add_to_history(state, message, response) |
|
|
|
|
|
|
|
|
history.append([message, response]) |
|
|
|
|
|
return "", history, state |
|
|
|
|
|
def clear_conversation(state): |
|
|
"""Clear conversation history""" |
|
|
state = self.conversation_manager.clear_conversation(state) |
|
|
return [], state |
|
|
|
|
|
def change_part(): |
|
|
"""Return to part selection""" |
|
|
return ( |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=True), |
|
|
gr.update(visible=False) |
|
|
) |
|
|
|
|
|
def restart_completely(): |
|
|
"""Restart the entire session""" |
|
|
new_state = self.conversation_manager.create_new_session() |
|
|
return ( |
|
|
new_state, |
|
|
gr.update(visible=True), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False), |
|
|
[], |
|
|
"", |
|
|
"", |
|
|
None, |
|
|
None, |
|
|
"", |
|
|
"", |
|
|
"" |
|
|
) |
|
|
|
|
|
|
|
|
continue_to_parts.click( |
|
|
fn=process_initial_context, |
|
|
inputs=[initial_choice, user_context_input, conversation_state], |
|
|
outputs=[conversation_state, current_part_display, initial_step, parts_step, conversation_step] |
|
|
) |
|
|
|
|
|
start_conversation.click( |
|
|
fn=start_chat, |
|
|
inputs=[part_selection, persona_name, persona_age, persona_style, conversation_state], |
|
|
outputs=[conversation_state, current_part_display, initial_step, parts_step, conversation_step, chatbot] |
|
|
) |
|
|
|
|
|
|
|
|
msg_input.submit( |
|
|
fn=handle_message, |
|
|
inputs=[msg_input, chatbot, conversation_state], |
|
|
outputs=[msg_input, chatbot, conversation_state] |
|
|
) |
|
|
|
|
|
send_btn.click( |
|
|
fn=handle_message, |
|
|
inputs=[msg_input, chatbot, conversation_state], |
|
|
outputs=[msg_input, chatbot, conversation_state] |
|
|
) |
|
|
|
|
|
clear_btn.click( |
|
|
fn=clear_conversation, |
|
|
inputs=[conversation_state], |
|
|
outputs=[chatbot, conversation_state] |
|
|
) |
|
|
|
|
|
change_part_btn.click( |
|
|
fn=change_part, |
|
|
outputs=[conversation_step, parts_step, initial_step] |
|
|
) |
|
|
|
|
|
restart_btn.click( |
|
|
fn=restart_completely, |
|
|
outputs=[conversation_state, initial_step, parts_step, conversation_step, chatbot, |
|
|
user_context_input, current_part_display, initial_choice, part_selection, |
|
|
persona_name, persona_age, persona_style] |
|
|
) |
|
|
|
|
|
return demo |
|
|
|
|
|
def main(): |
|
|
"""Main function to launch the application""" |
|
|
logger.info("Starting ΧΧ¨ΧΧΧͺ application...") |
|
|
|
|
|
try: |
|
|
app = MirautrApp() |
|
|
demo = app.create_main_interface() |
|
|
|
|
|
|
|
|
is_hf_spaces = os.getenv("SPACE_ID") is not None |
|
|
|
|
|
logger.info(f"Launching app... HF Spaces: {is_hf_spaces}") |
|
|
|
|
|
|
|
|
|
|
|
launch_config = { |
|
|
"show_error": True, |
|
|
"show_api": False, |
|
|
"favicon_path": None, |
|
|
"auth": None, |
|
|
"enable_queue": False, |
|
|
"max_threads": 1 |
|
|
} |
|
|
|
|
|
if is_hf_spaces: |
|
|
|
|
|
logger.info("Configuring for HF Spaces deployment") |
|
|
launch_config.update({ |
|
|
"server_name": "0.0.0.0", |
|
|
"server_port": 7860, |
|
|
"share": False, |
|
|
"quiet": True |
|
|
}) |
|
|
else: |
|
|
|
|
|
logger.info("Configuring for local development") |
|
|
|
|
|
|
|
|
default_port = int(os.getenv("GRADIO_SERVER_PORT", "7861")) |
|
|
available_port = default_port |
|
|
|
|
|
|
|
|
import socket |
|
|
for port_try in range(default_port, default_port + 10): |
|
|
try: |
|
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: |
|
|
s.bind(('127.0.0.1', port_try)) |
|
|
available_port = port_try |
|
|
break |
|
|
except OSError: |
|
|
continue |
|
|
|
|
|
logger.info(f"Using port {available_port} for local development") |
|
|
|
|
|
launch_config.update({ |
|
|
"server_name": "127.0.0.1", |
|
|
"server_port": available_port, |
|
|
"share": True, |
|
|
"inbrowser": True, |
|
|
"quiet": False |
|
|
}) |
|
|
|
|
|
demo.launch(**launch_config) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Failed to start application: {e}") |
|
|
raise |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |