"""
EmotionMirror - Emotional Analysis Application
A Streamlit application for analyzing emotions using computer vision.
"""
import os
import time
import uuid
import logging
import streamlit as st
from datetime import datetime
from PIL import Image
import numpy as np
import cv2
import json
import pandas as pd
# Import app modules
from config import settings
from agent_framework.agent_manager import AgentManager
from utils.file_utils import allowed_file, save_uploaded_file
from utils.export_utils import export_to_json, export_to_csv, get_download_link, generate_emotion_summary
from utils.preprocessing_ui import display_preprocessing_comparison, setup_preprocessing_controls, display_processing_status, get_processing_image, show_preprocessing_ui
from services.database_service import DatabaseService
from services.image_service import ImageService
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Page configuration
st.set_page_config(
page_title="EmotionMirror",
page_icon="๐",
layout="wide",
initial_sidebar_state="expanded"
)
# Initialize agent manager
@st.cache_resource
def get_agent_manager():
"""Get or create the agent manager singleton"""
return AgentManager()
# Initialize database service
@st.cache_resource
def get_database_service():
"""Get or create the database service singleton"""
return DatabaseService()
# Initialize image service for enhanced image handling
@st.cache_resource
def get_image_service():
"""
Get or create the image service singleton.
Part of Step 3 implementation: Added for image validation, dimension and quality analysis.
"""
return ImageService()
# Session state initialization
if "session_id" not in st.session_state:
st.session_state.session_id = str(uuid.uuid4())
logger.info(f"New session started: {st.session_state.session_id}")
if "upload_history" not in st.session_state:
st.session_state.upload_history = []
# Store the advanced emotion setting in session state to persist between pages
if 'use_advanced_emotion' not in st.session_state:
st.session_state.use_advanced_emotion = settings.USE_ADVANCED_EMOTION
# App title and description
st.title("EmotionMirror")
st.markdown("""
Welcome to EmotionMirror, an application for analyzing emotions using computer vision.
This is a prototype version that demonstrates the basic functionality.
""")
# Sidebar
with st.sidebar:
st.title("EmotionMirror")
st.subheader("Facial Emotion Analysis")
# Navigation options
page = st.radio(
"Navigation",
["Home", "Visual Analysis", "History", "About"]
)
st.divider()
# Settings section in sidebar
st.subheader("Settings")
# Add option to switch between basic and advanced emotion detection
use_advanced = st.checkbox(
"Use Advanced Emotion Detection",
value=st.session_state.use_advanced_emotion,
help="When enabled, DeepFace will be used for more accurate emotion detection"
)
# Update the setting if changed
if st.session_state.use_advanced_emotion != use_advanced:
st.session_state.use_advanced_emotion = use_advanced
settings.USE_ADVANCED_EMOTION = use_advanced
# Show a note about reloading
if use_advanced:
st.info("Advanced detection enabled")
else:
st.info("Basic detection enabled")
# General confidence threshold
confidence_threshold = st.slider(
"Detection Confidence",
min_value=0.1,
max_value=1.0,
value=0.45,
step=0.05,
help="Adjust the confidence threshold for detections"
)
st.divider()
st.caption(f"Session ID: {st.session_state.session_id}")
st.caption(f"Version: 0.1.3 (Phase 1.3)")
# Home page
if page == "Home":
st.header("EmotionMirror - Emotional Analysis System")
st.subheader("Features")
col1, col2, col3 = st.columns(3)
with col1:
st.markdown("### ๐ท Visual Analysis")
st.markdown("Upload images to analyze facial expressions and emotions.")
with col2:
st.markdown("### ๐ Emotion Tracking")
st.markdown("Track emotions over time with detailed analytics. (Coming soon)")
with col3:
st.markdown("### ๐ง AI Recommendations")
st.markdown("Get personalized recommendations based on your emotional state. (Coming soon)")
st.subheader("Getting Started")
st.markdown("""
1. Navigate to the **Visual Analysis** page
2. Upload an image containing faces
3. View the analysis results
""")
# Visual Analysis page
elif page == "Visual Analysis":
st.title("Visual Emotion Analysis")
st.markdown("Upload an image to analyze emotions")
# Initialize the image service for improved image handling
# STEP 3: Using the enhanced image service for validation of dimensions and quality
image_service = get_image_service()
# Implement Steps 1, 2 & 3: Interface design, format validation, and dimension/quality analysis
uploaded_file = image_service.setup_image_upload_interface(st)
# Model settings in a cleaner expandable section
with st.expander("Detection Settings"):
# Add explanation text about the detection methods
detection_type = "Advanced Detection" if st.session_state.use_advanced_emotion else "Basic Detection"
st.markdown(f"""
### About the Detection Methods
Currently using: **{detection_type}**
- **Basic detection** is faster but less accurate. It works by analyzing simple facial features.
- **Advanced detection (DeepFace)** uses deep learning models that are trained on thousands of faces to recognize subtle emotional cues.
You can change the default detection method in the sidebar settings.
""")
# Display image and interface when uploaded
if uploaded_file is not None:
# We're removing redundant title and image display since our image service
# already handles this in the two-column layout
# Keep this for processing, but no display
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
uploaded_file.seek(0) # Reset the file pointer for further processing
# Get image service for processing and validation
image_service = get_image_service()
# Validate image file - includes format, size, and dimensions
validation_result = image_service.validate_image_file(
uploaded_file,
check_content=True,
check_dimensions=True
)
if validation_result["valid"]:
# Process the uploaded image to improve quality if possible
# STEP 4: Add image preprocessing functionality
try:
# Load image for processing
img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
# Use the modular preprocessing UI handler
preprocessing_result = show_preprocessing_ui(image_service, img)
if not preprocessing_result.get("success", False):
st.error(f"Error in preprocessing: {preprocessing_result.get('message', 'Unknown error')}")
except Exception as e:
st.error(f"Error preprocessing image: {str(e)}")
logging.error(f"Preprocessing error: {str(e)}", exc_info=True)
# Continue with original code - now using PIL to open the file
image = Image.open(uploaded_file)
# Add person name field
st.subheader("2. Person Information")
st.markdown("Enter the name of the person in the image")
person_name = st.text_input(
"Person Name",
key="person_name",
help="Enter the name of the person whose emotions you want to analyze"
)
# Process button - disabled if no name entered
process_button = st.button(
"Analyze Image",
disabled=(not person_name or len(person_name.strip()) == 0),
help="You must enter a person name before analyzing"
)
# Show warning if name field is empty
if not person_name or len(person_name.strip()) == 0:
st.warning("Please enter the person's name before analyzing the image")
# Only proceed if button is clicked and name is provided
if process_button:
# Validate person name again for safety
if not person_name or len(person_name.strip()) == 0:
st.error("Person name is required. Please enter a name to proceed.")
else:
with st.spinner("Processing image..."):
# Save the uploaded file
success, message, file_path = save_uploaded_file(
uploaded_file,
settings.UPLOADS_DIR
)
if not success:
st.error(message)
else:
# Add to history
st.session_state.upload_history.append({
"timestamp": datetime.now().isoformat(),
"file_path": file_path,
"file_name": uploaded_file.name
})
# Prepare data for processing
process_data = {
"image_path": file_path,
"person_name": person_name.strip(),
"detection_confidence": confidence_threshold,
"use_advanced_emotion": st.session_state.use_advanced_emotion
}
# STEP 4: Get the appropriate image for processing (original or preprocessed)
if "original_image" in st.session_state:
processing_img = get_processing_image(image_service, st.session_state.original_image)
# Convert to proper format if needed for agent processing
if "using_preprocessed_image" in st.session_state and st.session_state["using_preprocessed_image"]:
# Make sure we're using the preprocessed image for processing
process_data["use_preprocessed_image"] = True
process_data["preprocessed_image_path"] = st.session_state.get("preprocessed_image_path", "")
# Process the image
agent_manager = get_agent_manager()
results = agent_manager.process_visual(process_data)
# Display results
st.subheader("Analysis Results")
if "error" in results:
st.error(results["error"])
else:
# Display faces detected
st.markdown(f"**Faces Detected:** {results['face_count']}")
# Save analysis results to the database
try:
# Clean the person name
clean_name = person_name.strip()
# Save to database with person name as tag
# [April 2025] - Updated tag handling to use person name as the sole tag
# This ensures consistency with the local version and improves filtering
# Previously used generic tags like 'emotion_analysis' and emotion names
db_service = get_database_service()
result_id = db_service.save_analysis_results(
session_id=st.session_state.session_id,
image_path=file_path,
results=results,
tags=[clean_name]
)
# Print debugging information
print(f"[DEBUG] Analysis saved with ID: {result_id}")
print(f"[DEBUG] Person tag: {clean_name}")
st.success(f"Analysis saved to history with tag: {clean_name}")
except Exception as e:
print(f"[DEBUG] Error saving analysis results: {e}")
logger.error(f"Error saving analysis results: {e}")
if results["face_count"] > 0:
st.markdown("### Detected Faces")
# Load image for visualization
img_path = os.path.join(settings.STATIC_DIR, os.path.relpath(file_path, start=settings.STATIC_DIR))
img = cv2.imread(file_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Create a clean copy of the image without annotations
clean_img = img.copy()
# Draw bounding boxes and labels
for i, face in enumerate(results["faces"]):
x1, y1, x2, y2 = face["bbox"]
# Get emotion and corresponding color
emotion = face["emotion"]
confidence = face.get("confidence", 0.55)
# Create color based on emotion - default to green if no matching service
color = (0, 255, 0) # Default color (green)
# Draw rectangle with emotion-based color
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
# Draw just the face number in a small font at the top-left corner
# to minimize overlap issues
cv2.putText(
img,
f"{i+1}",
(x1 + 5, y1 + 15),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, # Smaller font
color,
2
)
# Display the annotated image
st.image(img, caption="Analysis Results", use_column_width=True)
# Create tabbed interface for emotion details with clear numbering
face_tabs = [f"1. Face {i+1}" for i in range(len(results["faces"]))]
tabs = st.tabs(face_tabs)
# Display detailed emotion information for each face
for i, (tab, face) in enumerate(zip(tabs, results["faces"])):
with tab:
# Create two columns for layout
col1, col2 = st.columns([2, 3])
with col1:
# 1. Display the primary emotion with text representation
emotion = face["emotion"]
emotion_name = emotion.capitalize()
# Use text representation instead of emoji to avoid display issues
emotion_icons = {
'joy': '(Happy)',
'sadness': '(Sad)',
'anger': '(Angry)',
'fear': '(Afraid)',
'surprise': '(Surprised)',
'disgust': '(Disgusted)',
'neutral': '(Neutral)',
'unknown': '(Unknown)'
}
emotion_text = emotion_icons.get(emotion, '')
st.markdown(f"## {emotion_name} {emotion_text}")
st.markdown(f"**Confidence:** {face.get('confidence', 0.0):.2f}")
# Show advanced data if available from DeepFace
if 'age' in face:
st.markdown(f"**Age Estimate:** {face['age']}")
if 'gender' in face:
gender = face['gender']
# Capitalize first letter
gender = gender[0].upper() + gender[1:].lower() if len(gender) > 0 else gender
st.markdown(f"**Gender Estimate:** {gender}")
# Get facial features
features = face.get("features", {})
# Fallback to emotion_features if features is not present
if not features and "emotion_features" in face:
features = face.get("emotion_features", {})
# Create metrics in a row - clearly labeled
st.markdown("### 1. Facial Metrics")
cols = st.columns(3)
with cols[0]:
st.metric("Brightness", f"{features.get('brightness', 0):.1f}")
with cols[1]:
st.metric("Contrast", f"{features.get('contrast', 0):.1f}")
with cols[2]:
st.metric("Symmetry", f"{features.get('symmetry', 0):.1f}")
with col2:
# 2. Create a bar chart of all emotions
# Obtener emociones o usar valores predeterminados
# Asegurar que todas las emociones estรฉn presentes
all_emotions = {
'anger': 0.0,
'disgust': 0.0,
'fear': 0.0,
'joy': 0.0,
'neutral': 0.0,
'sadness': 0.0,
'surprise': 0.0
}
# Actualizar con valores detectados
emotions = face.get('emotions', {'neutral': 1.0})
for emotion, value in emotions.items():
if emotion in all_emotions:
all_emotions[emotion] = value
# Create a title for the chart section
st.markdown("### 2. Emotion Confidence")
# Display the bar chart
import pandas as pd
emotions_df = pd.DataFrame({
'Emotion': list(all_emotions.keys()),
'Confidence': list(all_emotions.values())
})
# First show the chart
st.bar_chart(emotions_df.set_index('Emotion'))
# Then show the exact values in a clean format
values_col1, values_col2, values_col3 = st.columns(3)
# Split the emotion values into 3 columns for better presentation
emotions_list = list(all_emotions.items())
chunk_size = (len(emotions_list) + 2) // 3 # Ceiling division
chunks = [emotions_list[i:i + chunk_size] for i in range(0, len(emotions_list), chunk_size)]
# Add values to each column
for i, col in enumerate([values_col1, values_col2, values_col3]):
if i < len(chunks):
with col:
for emotion, value in chunks[i]:
# Format to 2 decimal places
st.markdown(f"**{emotion.capitalize()}**: {value:.2f}")
# 3. Display features that contributed to the emotion
st.markdown("### 3. Analysis Features")
# Explanation text with numbered sequence
st.markdown("""
### 4. How We Determined This Emotion
1. **Face Detection**: We located the face in the image
2. **Feature Extraction**: We analyzed brightness, contrast, and symmetry
3. **Emotion Classification**: We matched these features to emotional patterns
""")
# Display detailed results in expandable section with improved title and description
with st.expander("Technical Data (For Developers)"):
st.markdown("""
This section displays the raw JSON data from the analysis process.
It's intended for developers and technical users who need access to the exact values and parameters used in the emotion detection process.
**Uses for this data:**
- Transparency on how the system works
- Debugging and development
- Access to precise numerical values
- Export for use in other systems or tools
""")
st.json(results)
else:
st.info("No faces were detected in the image. Please try another image.")
except Exception as e:
logger.error(f"Error processing image: {e}")
st.error(f"Error processing image: {str(e)}")
# History page
elif page == "History":
st.header("Analysis History")
st.markdown("View your previous analyses and export results.")
# Initialize database service
db_service = get_database_service()
# Create tabs for different views - Using numbered sequence for clear navigation
history_tabs = ["1. Recent Analyses", "2. Statistics", "3. Export Data"]
tab1, tab2, tab3 = st.tabs(history_tabs)
with tab1:
st.subheader("Recent Emotion Analyses")
# Get available person tags from the database
# [April 2025] - Person filtering system synchronized with local version
# to show actual person names instead of emotion tags
try:
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=100)
person_tags = set()
for analysis in all_analyses:
if 'tags' in analysis and analysis['tags']:
for tag in analysis['tags']:
person_tags.add(tag)
person_tags = sorted(list(person_tags))
# Create a filter dropdown if there are person tags
selected_person = None
if person_tags:
filter_options = ["All People"] + person_tags
selected_filter = st.selectbox(
"Filter by Person",
options=filter_options,
index=0,
help="Select a person to filter the analysis history"
)
if selected_filter != "All People":
selected_person = selected_filter
st.info(f"Showing analyses for: **{selected_filter}**")
except Exception as e:
logger.error(f"Error retrieving tags: {e}")
person_tags = []
selected_person = None
# Get analysis history from database, filtered by person if selected
analyses = []
try:
# Get analyses, filtered by tag if a person is selected
if selected_person:
# For simplicity, we'll retrieve all analyses and filter them here
# In a production app, we'd want to filter in the database query
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=20)
analyses = [a for a in all_analyses if 'tags' in a and a['tags'] and selected_person in a['tags']]
else:
analyses = db_service.get_history(session_id=st.session_state.session_id, limit=10)
print(f"[DEBUG] Found {len(analyses)} analyses to display")
for a in analyses:
print(f"[DEBUG] Analysis ID: {a.get('id', 'unknown')} Timestamp: {a.get('timestamp', 'unknown')}")
except Exception as e:
logger.error(f"Error retrieving analyses: {e}")
print(f"[DEBUG] Error retrieving analyses: {e}")
if analyses:
for i, analysis in enumerate(analyses):
with st.expander(f"Analysis #{analysis['id']} - {analysis['timestamp'][:16]}", expanded=(i==0)):
# Create columns for layout
col1, col2 = st.columns([1, 2])
with col1:
# Show image if available and path exists
image_path = analysis['image_path']
if os.path.exists(image_path):
try:
img = Image.open(image_path)
st.image(img, caption=f"Analyzed Image", use_column_width=True)
except Exception as e:
st.error(f"Unable to load image: {str(e)}")
else:
st.warning("Image file no longer available")
with col2:
# Display analysis details
st.markdown(f"**Date:** {analysis['timestamp'][:19]}")
st.markdown(f"**Faces Detected:** {analysis['face_count']}")
# Show person name if available
if 'tags' in analysis and analysis['tags']:
st.markdown(f"**Person:** {analysis['tags'][0]}")
# Check if advanced detection was used
# Comprobar si los resultados ya son un diccionario o una cadena JSON
if isinstance(analysis['results'], dict):
results_dict = analysis['results']
else:
results_dict = json.loads(analysis['results'])
using_advanced = False
has_advanced_data = False
age_data = None
gender_data = None
# Check for advanced detection usage and data
if 'faces' in results_dict and len(results_dict['faces']) > 0:
first_face = results_dict['faces'][0]
using_advanced = first_face.get('using_advanced', False)
# Check for advanced data
if 'age' in first_face:
has_advanced_data = True
age_data = first_face['age']
if 'gender' in first_face:
has_advanced_data = True
gender_data = first_face['gender']
# Show detection method used
if using_advanced:
st.markdown("**Detection:** DeepFace", unsafe_allow_html=True)
# Show advanced data if available
if has_advanced_data:
st.markdown("### Advanced Data")
if age_data is not None:
st.markdown(f"**Age Estimate:** {age_data}")
if gender_data is not None:
# Capitalize gender
if isinstance(gender_data, str) and len(gender_data) > 0:
gender_data = gender_data[0].upper() + gender_data[1:].lower() if len(gender_data) > 0 else gender_data
st.markdown(f"**Gender Estimate:** {gender_data}")
# Show emotion data
if analysis['face_count'] > 0:
st.markdown("### 1. Detected Emotions")
# Display each face
for j, face in enumerate(results_dict['faces']):
# Get emotion data
emotion = face['emotion'].capitalize()
confidence = face.get('emotion_confidence', face.get('confidence', 0))
# Show badge for DeepFace if used
if face.get('using_advanced', False):
st.markdown(f"**Face {j+1}:** {emotion} DeepFace (Confidence: {confidence:.2f})", unsafe_allow_html=True)
else:
st.markdown(f"**Face {j+1}:** {emotion} (Confidence: {confidence:.2f})")
# Create emotion dataframe for visualization
if 'emotions' in face:
emotions = face['emotions']
emotion_df = pd.DataFrame({
'Emotion': [k.capitalize() for k in emotions.keys()],
'Score': list(emotions.values())
})
emotion_df = emotion_df.sort_values('Score', ascending=False)
# Display mini chart
st.bar_chart(emotion_df.set_index('Emotion'), height=150)
# Option to delete analysis
if st.button(f"Delete Analysis #{analysis['id']}", key=f"del_{analysis['id']}"):
if db_service.delete_record(analysis['id']):
st.success("Analysis deleted successfully!")
st.experimental_rerun()
else:
st.error("Failed to delete analysis")
else:
st.info("No previous analyses found in the database.")
# Display session upload history if available
if st.session_state.upload_history:
st.markdown("### Recent Uploads (Not yet saved to database)")
for i, upload in enumerate(st.session_state.upload_history):
st.markdown(f"**{i+1}. {upload['file_name']}**")
st.markdown(f"* Timestamp: {upload['timestamp']}")
with tab2:
st.subheader("Emotion Analytics")
# Add filter by person to statistics view
try:
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=100)
person_tags = set()
for analysis in all_analyses:
if 'tags' in analysis and analysis['tags']:
for tag in analysis['tags']:
person_tags.add(tag)
person_tags = sorted(list(person_tags))
# Create a filter dropdown if there are person tags
selected_person = None
if person_tags:
filter_options = ["All People"] + person_tags
selected_filter = st.selectbox(
"1. Select Person to Analyze",
options=filter_options,
index=0,
help="Select a person to view their emotion statistics"
)
if selected_filter != "All People":
selected_person = selected_filter
except Exception as e:
logger.error(f"Error retrieving tags for statistics: {e}")
person_tags = []
selected_person = None
# Get emotion statistics
emotion_stats = db_service.get_emotion_stats(session_id=st.session_state.session_id)
if emotion_stats:
# Display chart of emotion frequencies
st.markdown("### 2. Emotion Distribution")
# Create dataframe for visualization
stats_df = pd.DataFrame({
'Emotion': list(emotion_stats.keys()),
'Frequency': list(emotion_stats.values())
})
# Sort by frequency for better visualization
stats_df = stats_df.sort_values('Frequency', ascending=False)
# Display chart
st.bar_chart(stats_df.set_index('Emotion'))
# Show numeric values below the chart
st.markdown("### 3. Detailed Percentages")
# Create columns for better presentation
cols = st.columns(3)
for i, (emotion, frequency) in enumerate(zip(stats_df['Emotion'], stats_df['Frequency'])):
col_idx = i % 3
with cols[col_idx]:
st.metric(
emotion.capitalize(),
f"{frequency:.1%}",
delta=None
)
# Add explanation of statistics
st.markdown("---")
st.markdown("### 4. Understanding These Statistics")
st.markdown("""
The data above shows the distribution of emotions detected across all analyses:
1. **Emotion Distribution** - The bar chart visualizes the relative frequency of each emotion
2. **Detailed Percentages** - Exact percentage values for each emotion detected
3. **Sample Size** - Based on all faces detected in the selected analyses
""")
else:
st.info("No emotion data available for analysis yet.")
with tab3:
st.subheader("Export Analysis Data")
# Add filter by person for export
selected_person_tag = None
try:
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=100)
person_tags = set()
for analysis in all_analyses:
if 'tags' in analysis and analysis['tags']:
for tag in analysis['tags']:
person_tags.add(tag)
person_tags = sorted(list(person_tags))
# Create a person filter dropdown if person tags exist
if person_tags:
person_options = ["All People"] + person_tags
person_filter = st.selectbox(
"1. Select Person to Export",
options=person_options,
index=0,
help="Filter export data to include only analyses for a specific person"
)
if person_filter != "All People":
selected_person_tag = person_filter
st.info(f"Export will only include analyses for: {selected_person_tag}")
except Exception as e:
logger.error(f"Error retrieving person tags for export: {e}")
# Options for export format
export_type = st.radio(
"2. Select Export Format",
["JSON", "CSV"],
horizontal=True
)
export_scope = st.radio(
"3. Select Data Scope",
["Current Session", "All Sessions"],
horizontal=True
)
session_id = st.session_state.session_id if export_scope == "Current Session" else None
# Option to limit number of records
record_limit = st.slider(
"4. Maximum Records to Export",
min_value=1,
max_value=100,
value=20,
step=1
)
# Generate export button
if st.button("Generate Export"):
with st.spinner("Preparing export..."):
# Get export data
export_data = db_service.export_data(
session_id=session_id,
limit=record_limit
)
# Filter data by person if selected
if selected_person_tag and 'analyses' in export_data:
filtered_analyses = []
for analysis in export_data['analyses']:
if 'tags' in analysis and analysis['tags'] and selected_person_tag in analysis['tags']:
filtered_analyses.append(analysis)
export_data['analyses'] = filtered_analyses
export_data['metadata']['record_count'] = len(filtered_analyses)
export_data['metadata']['person_filter'] = selected_person_tag
if not export_data or not export_data.get('analyses'):
st.warning("No data available to export")
else:
st.success(f"Export generated with {len(export_data.get('analyses', []))} analyses")
# Create download options based on export type
if export_type == "JSON":
json_str = export_to_json(export_data)
filename = f"emotion_analysis_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
# Create download button
st.download_button(
label="Download JSON",
data=json_str,
file_name=filename,
mime="application/json"
)
else: # CSV
csv_str = export_to_csv(export_data)
filename = f"emotion_analysis_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
# Create download button
st.download_button(
label="Download CSV",
data=csv_str,
file_name=filename,
mime="text/csv"
)
# Show summary statistics
summary = generate_emotion_summary(export_data)
st.markdown("### Export Summary")
st.markdown(f"**Total Analyses:** {summary['total_analyses']}")
st.markdown(f"**Total Faces Analyzed:** {summary['total_faces']}")
if summary['total_faces'] > 0:
# Display emotion distribution
st.markdown("**Emotion Distribution:**")
# Create dataframe for visualization
emotions_df = pd.DataFrame({
'Emotion': list(summary['emotion_percentages'].keys()),
'Percentage': list(summary['emotion_percentages'].values())
})
# Sort by percentage
emotions_df = emotions_df.sort_values('Percentage', ascending=False)
# Display chart
st.bar_chart(emotions_df.set_index('Emotion'))
# Add explanation of export formats below visualization
st.markdown("---")
st.markdown("### 5. About Export Formats")
st.markdown("""
The exported data includes analysis details in your chosen format:
1. **JSON Format** - Complete data structure with all details, ideal for further processing
2. **CSV Format** - Tabular data format, can be opened in spreadsheet software
3. **Data Contents** - Each export includes timestamps, emotion labels, confidence values, and facial features
4. **Person Information** - If you specified a person name, it's included with each analysis
""")
# About page
elif page == "About":
st.header("About EmotionMirror")
st.markdown("""
## EmotionMirror: Emotional Analysis System
EmotionMirror is an application that uses computer vision and artificial intelligence
to analyze emotions from facial expressions and body language.
### Technology Stack
* **Streamlit**: For the user interface
* **YOLOv8**: For object detection, pose estimation, and facial analysis
* **Agent Framework**: A custom multi-agent system for coordinated analysis
### Future Features
* Enhanced emotion recognition with Hume.ai integration
* Temporal emotion tracking and pattern analysis
* Personalized recommendations based on emotional states
* Guided emotional wellness sessions
### Privacy
* All image processing is done locally
* We don't store your images after processing
* No personal data is shared with third parties
""")
# Footer
st.markdown("---")
st.markdown(" EmotionMirror | Developed as a prototype application")