Spaces:
Sleeping
Sleeping
| """ | |
| EmotionMirror - Emotional Analysis Application | |
| A Streamlit application for analyzing emotions using computer vision. | |
| """ | |
| import os | |
| import time | |
| import uuid | |
| import logging | |
| import streamlit as st | |
| import pandas as pd | |
| import numpy as np | |
| import cv2 | |
| from datetime import datetime | |
| from PIL import Image | |
| # Import app modules | |
| from config import settings | |
| from agent_framework.agent_manager import AgentManager | |
| from utils.file_utils import allowed_file, save_uploaded_file | |
| from utils.export_utils import get_download_link | |
| from utils.preprocessing_ui import show_preprocessing_ui | |
| from utils.simple_face_labeling import simple_face_detection_and_labeling_ui | |
| from utils.face_validation import validate_image_faces, display_face_validation_result, should_continue_processing | |
| from services.database_service import DatabaseService | |
| from services.image_service import ImageService | |
| from services.face_service import FaceDetectionService | |
| from services.emotion_service import EmotionService as EmotionAnalysisService | |
| # Importar el nuevo módulo para la página About | |
| from utils.pages.about_page import render_about_page | |
| # Importar el nuevo módulo para la página Home | |
| from utils.pages.home_page import render_home_page | |
| # Importar el nuevo módulo para la página History | |
| from utils.pages.history_page import render_history_page | |
| # Definimos funciones básicas de reemplazo para no alterar el código | |
| def display_image_with_controls(image, caption=None, use_column_width=False, title=None, allow_zoom=False, allow_download=False): | |
| """Versión simplificada que solo muestra la imagen sin controles adicionales""" | |
| # Usar caption si está definido, o title si caption no está definido | |
| display_caption = caption if caption is not None else title | |
| # Ajustamos el tamaño de la imagen para que no sea tan grande | |
| return st.image(image, caption=display_caption, use_column_width=use_column_width, width=400) | |
| def create_image_tabs(original_image, processed_image, key_prefix="img_tab"): | |
| """Versión simplificada que crea pestañas para mostrar imágenes original y procesada""" | |
| tabs = st.tabs(["Original Image", "Processed Image"]) | |
| with tabs[0]: | |
| st.image(original_image, caption="Original Image", use_column_width=False, width=400) | |
| with tabs[1]: | |
| st.image(processed_image, caption="Processed Image", use_column_width=False, width=400) | |
| return {"tabs": tabs} | |
| # Configure logging | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' | |
| ) | |
| logger = logging.getLogger(__name__) | |
| # Page configuration | |
| st.set_page_config( | |
| page_title="EmotionMirror", | |
| page_icon="📊", | |
| layout="wide", | |
| initial_sidebar_state="expanded", | |
| menu_items={ | |
| 'Get Help': 'https://www.example.com/help', | |
| 'Report a bug': 'https://www.example.com/bug', | |
| 'About': 'EmotionMirror is an emotion analysis application.' | |
| } | |
| ) | |
| # Apply custom CSS to improve stability and reduce flickering | |
| st.markdown(""" | |
| <style> | |
| /* Reduce animation and transitions to minimize flickering */ | |
| * { | |
| transition: none !important; | |
| animation: none !important; | |
| transform: none !important; | |
| } | |
| /* Make containers more stable */ | |
| .stApp { | |
| transform: translateZ(0); | |
| backface-visibility: hidden; | |
| perspective: 1000px; | |
| } | |
| /* Further stabilize the main content area */ | |
| .main .block-container { | |
| transform: translateZ(0); | |
| will-change: auto; | |
| } | |
| /* Ensure column stability */ | |
| [data-testid="column"] { | |
| transform: translateZ(0); | |
| } | |
| /* Improve scrolling stability */ | |
| .main { | |
| overflow-y: auto; | |
| overflow-x: hidden; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| # Initialize agent manager | |
| def get_agent_manager(): | |
| """Get or create the agent manager singleton""" | |
| return AgentManager() | |
| # Initialize database service | |
| def get_database_service(): | |
| """Get or create the database service singleton""" | |
| return DatabaseService() | |
| # Initialize image service for enhanced image handling | |
| def get_image_service(): | |
| """ | |
| Get or create the image service singleton. | |
| Part of Step 3 implementation: Added for image validation, dimension and quality analysis. | |
| """ | |
| return ImageService() | |
| # Initialize face service for face detection | |
| def get_face_service(): | |
| """Get or create the face detection service singleton""" | |
| return FaceDetectionService() | |
| # Initialize emotion service for emotion analysis | |
| def get_emotion_service(): | |
| """Get or create the emotion analysis service singleton""" | |
| return EmotionAnalysisService() | |
| # Session state initialization | |
| if "session_id" not in st.session_state: | |
| st.session_state.session_id = str(uuid.uuid4()) | |
| logger.info(f"New session started: {st.session_state.session_id}") | |
| if "upload_history" not in st.session_state: | |
| st.session_state.upload_history = [] | |
| # Store the advanced emotion setting in session state to persist between pages | |
| if 'use_advanced_emotion' not in st.session_state: | |
| st.session_state.use_advanced_emotion = settings.USE_ADVANCED_EMOTION | |
| # App title and description | |
| st.title("EmotionMirror") | |
| st.markdown(""" | |
| Welcome to EmotionMirror, an application for analyzing emotions using computer vision. | |
| This is a prototype version that demonstrates the basic functionality. | |
| """) | |
| # Sidebar | |
| with st.sidebar: | |
| st.title("EmotionMirror") | |
| st.subheader("Facial Emotion Analysis") | |
| # Navigation options | |
| page = st.radio( | |
| "Navigation", | |
| ["Home", "Visual Analysis", "History", "About"] | |
| ) | |
| st.divider() | |
| # Settings section in sidebar | |
| st.subheader("Settings") | |
| # Add option to switch between basic and advanced emotion detection | |
| use_advanced = st.checkbox( | |
| "Use Advanced Emotion Detection", | |
| value=st.session_state.use_advanced_emotion, | |
| help="When enabled, DeepFace will be used for more accurate emotion detection" | |
| ) | |
| # Update the setting if changed | |
| if st.session_state.use_advanced_emotion != use_advanced: | |
| st.session_state.use_advanced_emotion = use_advanced | |
| settings.USE_ADVANCED_EMOTION = use_advanced | |
| # Show a note about reloading | |
| if use_advanced: | |
| st.info("Advanced detection enabled") | |
| else: | |
| st.info("Basic detection enabled") | |
| # General confidence threshold | |
| confidence_threshold = st.slider( | |
| "Detection Confidence", | |
| min_value=0.1, | |
| max_value=1.0, | |
| value=0.45, | |
| step=0.05, | |
| help="Adjust the confidence threshold for detections" | |
| ) | |
| st.divider() | |
| st.caption(f"Session ID: {st.session_state.session_id}") | |
| st.caption(f"Version: 0.1.3 (Phase 1.3)") | |
| # Home page | |
| if page == "Home": | |
| render_home_page() | |
| # Visual Analysis page | |
| elif page == "Visual Analysis": | |
| # Use modular page handler with correct parameters | |
| st.header("Visual Emotion Analysis") | |
| st.markdown(""" | |
| Upload an image to analyze emotions. | |
| For best results, use a clear image of a face with good lighting. | |
| """) | |
| # Initialize services explicitly | |
| agent_mgr = get_agent_manager() | |
| img_service = get_image_service() | |
| database_service = get_database_service() | |
| face_service = get_face_service() | |
| emotion_service = get_emotion_service() | |
| # Initialize the visual agent at the start | |
| visual_agent = agent_mgr.get_agent("VisualAgent") | |
| if not visual_agent: | |
| st.warning("Visual agent not available. The system is initializing or there is a configuration issue.") | |
| logger.error("Failed to get VisualAgent from agent_manager") | |
| # Create numbered sections for clear navigation | |
| st.header("1. Upload an Image") | |
| # Add reset button for clearing current image | |
| if "original_image" in st.session_state: | |
| col1, col2 = st.columns([3, 1]) | |
| with col2: | |
| if st.button("Clear Current Image", key="clear_image"): | |
| # Clear the session state | |
| if "original_image" in st.session_state: | |
| del st.session_state["original_image"] | |
| if "processed_image" in st.session_state: | |
| del st.session_state["processed_image"] | |
| if "current_image_path" in st.session_state: | |
| del st.session_state["current_image_path"] | |
| st.experimental_rerun() | |
| # Create file uploader | |
| uploaded_file = st.file_uploader( | |
| "Choose an image...", | |
| type=["jpg", "jpeg", "png"], | |
| help="Upload a clear image of a face for analysis." | |
| ) | |
| # Display information about detection methods | |
| with st.expander("About the Detection Methods", expanded=False): | |
| st.markdown(""" | |
| ### About the Detection Methods | |
| Currently using: **Advanced Detection** | |
| * **Basic detection** is faster but less accurate. It works by analyzing simple facial features. | |
| * **Advanced detection (DeepFace)** uses deep learning models that are trained on thousands of faces to recognize subtle emotional cues. | |
| You can change the default detection method in the sidebar settings. | |
| """) | |
| # Display image and interface when uploaded | |
| if uploaded_file is not None: | |
| try: | |
| # Process the uploaded file | |
| from PIL import Image | |
| img_array = np.array(Image.open(uploaded_file)) | |
| # Verificar la cantidad de rostros en la imagen apenas se carga | |
| img_bgr = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR) | |
| face_service = get_face_service() | |
| # Use the face validation module to check if there are too many faces | |
| face_validation_result = validate_image_faces(face_service, img_bgr) | |
| display_face_validation_result(face_validation_result, img_array) | |
| # Only continue if we don't have too many faces | |
| if not should_continue_processing(): | |
| # Si hay demasiados rostros, terminamos la ejecución aquí | |
| st.stop() | |
| # 2. Validar dimensiones (como en la versión local) | |
| try: | |
| dimension_result = img_service.validate_image_dimensions(img_array) | |
| # Asegurarnos de que dimension_result contiene las claves necesarias | |
| if not isinstance(dimension_result, dict): | |
| dimension_result = {} | |
| # Asegurarnos de que contiene las claves necesarias | |
| if "width" not in dimension_result: | |
| height, width = img_array.shape[:2] | |
| dimension_result["width"] = width | |
| dimension_result["height"] = height | |
| # Verificar si es óptimo basado en las dimensiones (si no existe ya la clave) | |
| if "is_optimal" not in dimension_result: | |
| width = dimension_result["width"] | |
| height = dimension_result["height"] | |
| dimension_result["is_optimal"] = width >= 640 and height >= 480 | |
| except Exception as e: | |
| st.error(f"Error validating dimensions: {str(e)}") | |
| # Crear un resultado predeterminado si hay un error | |
| height, width = img_array.shape[:2] | |
| dimension_result = { | |
| "width": width, | |
| "height": height, | |
| "is_optimal": width >= 640 and height >= 480 | |
| } | |
| # 3. Evaluar calidad (como en la versión local) | |
| try: | |
| quality_result = img_service.check_image_quality(img_array) | |
| # Asegurarnos de que quality_result contiene las claves necesarias | |
| if not isinstance(quality_result, dict): | |
| quality_result = {} | |
| # Añadir claves faltantes si es necesario | |
| if "score" not in quality_result: | |
| quality_result["score"] = 50 # Valor predeterminado | |
| if "label" not in quality_result: | |
| score = quality_result["score"] | |
| if score >= 70: | |
| quality_result["label"] = "Good" | |
| elif score >= 40: | |
| quality_result["label"] = "Fair" | |
| else: | |
| quality_result["label"] = "Poor" | |
| if "brightness" not in quality_result: | |
| # Calcular brillo si no existe | |
| gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY) | |
| quality_result["brightness"] = np.mean(gray) / 2.55 # Convertir a porcentaje | |
| if "contrast" not in quality_result: | |
| # Calcular contraste si no existe | |
| gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY) | |
| quality_result["contrast"] = np.std(gray) / 2.55 # Convertir a porcentaje | |
| if "sharpness_label" not in quality_result: | |
| quality_result["sharpness_label"] = "Good" # Valor predeterminado | |
| if "recommendations" not in quality_result: | |
| quality_result["recommendations"] = [] | |
| except Exception as e: | |
| st.error(f"Error checking quality: {str(e)}") | |
| # Crear un resultado predeterminado si hay un error | |
| quality_result = { | |
| "score": 50, | |
| "label": "Fair", | |
| "brightness": 50, | |
| "contrast": 30, | |
| "sharpness_label": "Fair", | |
| "recommendations": ["Use a clearer image for better analysis."] | |
| } | |
| # Guardar la imagen en session_state para mantener consistencia | |
| st.session_state.original_image = img_array | |
| # 4. Verificar si se debe usar imagen mejorada | |
| use_improved = "use_improved_image" in st.session_state and st.session_state["use_improved_image"] | |
| img_to_process = img_array # Por defecto usamos la imagen original | |
| if use_improved: | |
| try: | |
| # Convertir a BGR para procesamiento OpenCV | |
| img_bgr = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR) | |
| # Aplicar técnicas avanzadas de preprocesamiento | |
| enhanced_bgr = img_service.enhance_image_for_facial_detection(img_bgr) | |
| # Convertir de vuelta a RGB | |
| img_to_process = cv2.cvtColor(enhanced_bgr, cv2.COLOR_BGR2RGB) | |
| except Exception as e: | |
| st.warning(f"Could not apply advanced image enhancements: {str(e)}") | |
| # Ya tenemos img_to_process = img_array por defecto, no necesitamos asignarlo de nuevo | |
| except Exception as e: | |
| st.error(f"Error processing image: {str(e)}") | |
| # Crear resultados predeterminados si hay un error general | |
| img_array = np.zeros((100, 100, 3), dtype=np.uint8) # Imagen en negro como fallback | |
| img_to_process = img_array # Definir img_to_process para evitar errores posteriores | |
| dimension_result = {"width": 0, "height": 0, "is_optimal": False} | |
| quality_result = {"score": 0, "label": "Unknown", "brightness": 0, "contrast": 0, "sharpness_label": "Unknown", "recommendations": []} | |
| # Add space between sections | |
| st.markdown("<div style='height: 20px;'></div>", unsafe_allow_html=True) | |
| # Display image analysis from validation - EXACTAMENTE como la versión local | |
| st.header("2. Image Analysis") | |
| # Crear dos columnas principales: imagen a la izquierda, datos a la derecha | |
| img_analysis_cols = st.columns([1, 1]) | |
| with img_analysis_cols[0]: | |
| # Mostrar la imagen cargada a la izquierda | |
| st.image(img_array, caption="Uploaded Image", width=400) | |
| with img_analysis_cols[1]: | |
| # Mostrar los datos de análisis a la derecha (siguiendo exactamente la versión local) | |
| # Show dimensions info with validation status | |
| dimensions_str = f"{dimension_result['width']}x{dimension_result['height']} pixels" | |
| is_optimal = dimension_result["is_optimal"] | |
| optimal_icon = "✅" if is_optimal else "⚠️" | |
| optimal_label = "Optimal" if is_optimal else "Not Optimal" | |
| st.markdown(f"**Dimensions**: {dimensions_str} ({optimal_icon} {optimal_label})") | |
| # Show quality score | |
| quality_score = quality_result["score"] | |
| quality_label = quality_result["label"] | |
| # Calculate percentage for progress bar (0-100) | |
| quality_percentage = quality_score / 100 | |
| # Display quality score with color-coded progress bar | |
| st.markdown(f"**Image Quality**: {quality_score}% ({quality_label})") | |
| # Show progress bar for quality | |
| st.progress(quality_percentage, text=None) | |
| # Display factors in a more compact way | |
| st.markdown("### Quality Factors") | |
| factor_cols = st.columns(3) | |
| with factor_cols[0]: | |
| st.markdown("**Sharpness**") | |
| st.markdown(f"**{quality_result['sharpness_label']}**") | |
| with factor_cols[1]: | |
| st.markdown("**Brightness**") | |
| st.markdown(f"**{int(quality_result['brightness'])}%**") | |
| with factor_cols[2]: | |
| st.markdown("**Contrast**") | |
| st.markdown(f"**{int(quality_result['contrast'])}%**") | |
| # Display any recommendations if quality is not good | |
| if quality_result["score"] < 70: | |
| with st.expander("Recommendations for better results", expanded=True): | |
| for recommendation in quality_result["recommendations"]: | |
| st.markdown(f"- {recommendation}") | |
| # Add space between sections | |
| st.markdown("<div style='height: 20px;'></div>", unsafe_allow_html=True) | |
| # NOW display the preprocessing UI | |
| st.header("3. Image Preprocessing") | |
| preprocessing_result = show_preprocessing_ui(img_service, img_array) | |
| if not preprocessing_result.get("success", False): | |
| st.error(f"Error in preprocessing: {preprocessing_result.get('message', 'Unknown error')}") | |
| # Store path in session state for future use | |
| if "current_image_path" not in st.session_state: | |
| # Save the file for reference | |
| save_path = img_service.save_uploaded_image(img_to_process) | |
| st.session_state["current_image_path"] = save_path | |
| # Add some space to improve layout | |
| st.markdown("<div style='height: 20px;'></div>", unsafe_allow_html=True) | |
| # Face detection section | |
| st.header("4. Detect Face and Labeling") | |
| # Encapsulate the face detection UI in an expander | |
| face_detection_container = st.container() | |
| with st.expander("Show Face Detection and Labeling", expanded=False): | |
| with face_detection_container: | |
| # Asegurarse de que existe una imagen para procesar | |
| img_to_use = None | |
| # Usamos una estructura condicional más directa para garantizar que siempre tenemos una imagen | |
| if "improved_image" in st.session_state and st.session_state.get("selected_image_mode") == "improved": | |
| img_to_use = st.session_state["improved_image"] | |
| st.info("Using the improved image for face detection.") | |
| elif "uploaded_image" in st.session_state: | |
| img_to_use = st.session_state["uploaded_image"] | |
| st.info("Using the original image for face detection.") | |
| elif "original_image" in st.session_state: # Agregando esta condición para capturar la imagen original | |
| img_to_use = st.session_state["original_image"] | |
| st.info("Using the original image for face detection.") | |
| # Solo proceder si tenemos una imagen para procesar | |
| if img_to_use is not None: | |
| # Usar nuestro nuevo módulo para la detección y etiquetado facial | |
| face_detection_result = simple_face_detection_and_labeling_ui(img_to_use, face_service) | |
| # Guardar el resultado para la sección de análisis emocional | |
| if face_detection_result.get("proceed_to_analysis", False): | |
| st.session_state["ready_for_emotion_analysis"] = True | |
| st.session_state["faces_for_analysis"] = face_detection_result.get("faces_to_analyze", []) | |
| # Recargar la página para mostrar la sección de análisis | |
| st.experimental_rerun() | |
| else: | |
| st.warning("Please upload an image in the previous section to continue with facial detection.") | |
| # Add a clear separator to prevent section overlap | |
| st.markdown("<div style='height: 40px;'></div>", unsafe_allow_html=True) | |
| # Analysis section - put in a separate container to prevent overlap | |
| emotion_analysis_container = st.container() | |
| with emotion_analysis_container: | |
| st.header("5. Emotion Analysis") | |
| st.info("Image successfully uploaded. Emotion analysis functionality will be available soon.") | |
| # Add a disabled button as placeholder for future functionality | |
| st.button("Analyze Emotions (Coming Soon)", disabled=True, key="analyze_button") | |
| # History page | |
| elif page == "History": | |
| # Inicializar el servicio de base de datos y pasarlo como parámetro | |
| db_service = get_database_service() | |
| render_history_page(db_service) | |
| # About page | |
| elif page == "About": | |
| render_about_page() | |
| # Footer | |
| st.markdown("---") | |
| st.markdown(" EmotionMirror | Developed as a prototype application") | |