Spaces:
Sleeping
Sleeping
Commit
·
f7e620e
0
Parent(s):
Initial commit for EmotionMirror finalproject
Browse files- .gitattributes +35 -0
- .gitignore +47 -0
- .streamlit/config.toml +17 -0
- Dockerfile +36 -0
- README-HF.md +173 -0
- README-PREPROCESSING.md +58 -0
- README.md +222 -0
- agent_framework/__init__.py +4 -0
- agent_framework/agent_manager.py +91 -0
- agent_framework/base_agent.py +58 -0
- agent_framework/visual_agent.py +194 -0
- app.py +843 -0
- app.py.backup +927 -0
- config/__init__.py +3 -0
- config/settings.py +102 -0
- database/__init__.py +6 -0
- database/db_manager.py +463 -0
- download_haarcascade.py +49 -0
- finalproject/Dockerfile +36 -0
- finalproject/app.py +236 -0
- finalproject/packages.txt +7 -0
- finalproject/requirements.txt +9 -0
- packages.txt +7 -0
- requirements.txt +15 -0
- services/__init__.py +37 -0
- services/database_service.py +397 -0
- services/deepface_emotion_service.py +269 -0
- services/emotion_service.py +381 -0
- services/image_service.py +828 -0
- services/model_service.py +113 -0
- utils/__init__.py +4 -0
- utils/download_models.py +100 -0
- utils/export_utils.py +201 -0
- utils/file_utils.py +69 -0
- utils/image_visualization.py +283 -0
- utils/page_handlers.py +290 -0
- utils/preprocessing_ui.py +336 -0
.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Ignorar modelos grandes
|
| 2 |
+
*.pt
|
| 3 |
+
|
| 4 |
+
# Entornos virtuales
|
| 5 |
+
venv/
|
| 6 |
+
.venv/
|
| 7 |
+
ENV/
|
| 8 |
+
|
| 9 |
+
# Python
|
| 10 |
+
__pycache__/
|
| 11 |
+
*.py[cod]
|
| 12 |
+
*$py.class
|
| 13 |
+
*.so
|
| 14 |
+
.Python
|
| 15 |
+
|
| 16 |
+
# Archivos de caché y bytecode
|
| 17 |
+
.cache/
|
| 18 |
+
.pytest_cache/
|
| 19 |
+
|
| 20 |
+
# Archivos de distribución
|
| 21 |
+
dist/
|
| 22 |
+
build/
|
| 23 |
+
*.egg-info/
|
| 24 |
+
|
| 25 |
+
# Archivos de registro y bases de datos
|
| 26 |
+
*.log
|
| 27 |
+
*.db
|
| 28 |
+
*.sqlite
|
| 29 |
+
*.sqlite3
|
| 30 |
+
|
| 31 |
+
# Archivos de imagen y uploads (se deben crear en tiempo de ejecución)
|
| 32 |
+
/static/uploads/*
|
| 33 |
+
/static/results/*
|
| 34 |
+
!/static/uploads/.gitkeep
|
| 35 |
+
!/static/results/.gitkeep
|
| 36 |
+
|
| 37 |
+
# Directorios de modelos (muy pesados para git)
|
| 38 |
+
/models/*
|
| 39 |
+
!/models/.gitkeep
|
| 40 |
+
|
| 41 |
+
# Archivos de configuración locales
|
| 42 |
+
.env
|
| 43 |
+
.env.local
|
| 44 |
+
|
| 45 |
+
# Archivos del sistema operativo
|
| 46 |
+
.DS_Store
|
| 47 |
+
Thumbs.db
|
.streamlit/config.toml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[theme]
|
| 2 |
+
primaryColor = "#FF4B4B"
|
| 3 |
+
backgroundColor = "#0E1117"
|
| 4 |
+
secondaryBackgroundColor = "#262730"
|
| 5 |
+
textColor = "#FAFAFA"
|
| 6 |
+
font = "sans serif"
|
| 7 |
+
|
| 8 |
+
[server]
|
| 9 |
+
port = 8501
|
| 10 |
+
headless = true
|
| 11 |
+
enableCORS = false
|
| 12 |
+
enableXsrfProtection = false
|
| 13 |
+
maxUploadSize = 10
|
| 14 |
+
|
| 15 |
+
[browser]
|
| 16 |
+
serverAddress = "0.0.0.0"
|
| 17 |
+
gatherUsageStats = false
|
Dockerfile
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
# Instalar dependencias esenciales del sistema para OpenCV
|
| 6 |
+
RUN apt-get update && apt-get install -y \
|
| 7 |
+
libgl1-mesa-glx \
|
| 8 |
+
libglib2.0-0 \
|
| 9 |
+
libgomp1 \
|
| 10 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
+
|
| 12 |
+
# Configurar variables de entorno para Matplotlib y Ultralytics
|
| 13 |
+
ENV MPLCONFIGDIR=/tmp/matplotlib
|
| 14 |
+
ENV YOLO_CONFIG_DIR=/tmp/ultralytics
|
| 15 |
+
|
| 16 |
+
# Crear directorios necesarios
|
| 17 |
+
RUN mkdir -p /app/static /app/models /app/database
|
| 18 |
+
|
| 19 |
+
# Copiar requirements.txt primero para aprovechar caché de Docker
|
| 20 |
+
COPY requirements.txt .
|
| 21 |
+
|
| 22 |
+
# Instalar dependencias
|
| 23 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 24 |
+
|
| 25 |
+
# Copiar el resto de archivos
|
| 26 |
+
COPY . .
|
| 27 |
+
|
| 28 |
+
# Crear directorios necesarios para la aplicación
|
| 29 |
+
RUN mkdir -p /app/static/uploads /app/static/images /app/database \
|
| 30 |
+
&& chmod -R 777 /app /tmp
|
| 31 |
+
|
| 32 |
+
# Exponer el puerto que utiliza Streamlit por defecto
|
| 33 |
+
EXPOSE 8501
|
| 34 |
+
|
| 35 |
+
# Comando para ejecutar la aplicación
|
| 36 |
+
CMD ["streamlit", "run", "app.py", "--server.headless", "true", "--server.port=8501", "--server.address=0.0.0.0"]
|
README-HF.md
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EmotionMirror - Facial Emotion Analysis with Computer Vision
|
| 2 |
+
|
| 3 |
+
EmotionMirror is an emotional analysis application based on computer vision that uses artificial intelligence models to detect and analyze emotions from facial expressions.
|
| 4 |
+
|
| 5 |
+
## Key Features
|
| 6 |
+
|
| 7 |
+
- **Facial Analysis**: Detection of faces and recognition of basic emotions
|
| 8 |
+
- **Results Visualization**: Clear presentation of data with numbered and sequential text
|
| 9 |
+
- **Intuitive Interface**: Minimalist design with well-defined sections
|
| 10 |
+
- **Group Analysis**: Process group photos with up to 5 people
|
| 11 |
+
- **Mandatory Labeling**: All faces in group photos must be labeled with names
|
| 12 |
+
- **Comprehensive History**: View both group analyses and individual analyses from group photos
|
| 13 |
+
- **Enhanced Visualization**: View individual analyses with highlighted faces in their original group context
|
| 14 |
+
- **Detailed Facial Metrics**: Shows brightness, contrast, and symmetry metrics for each face
|
| 15 |
+
- **Demographic Analysis**: Includes age and gender estimation for each individual
|
| 16 |
+
- **Precise Emotion Values**: Displays numerical confidence values for all detected emotions
|
| 17 |
+
|
| 18 |
+
## Technical Architecture
|
| 19 |
+
|
| 20 |
+
EmotionMirror uses a modular architecture to improve maintainability and separation of concerns:
|
| 21 |
+
|
| 22 |
+
### Module Structure
|
| 23 |
+
|
| 24 |
+
```
|
| 25 |
+
EmotionMirror/
|
| 26 |
+
├── app.py # Streamlined main application entry point
|
| 27 |
+
├── controllers/ # UI and business logic controllers
|
| 28 |
+
│ ├── group_analysis_controller.py
|
| 29 |
+
│ ├── ui_controller.py # Main UI controller
|
| 30 |
+
├── services/ # Specialized service modules
|
| 31 |
+
│ ├── history_service.py # History management
|
| 32 |
+
│ ├── history_display_service.py # History visualization
|
| 33 |
+
│ ├── visual_analysis_service.py # Analysis visualization
|
| 34 |
+
│ ├── emotion_service.py
|
| 35 |
+
│ ├── group_analysis_service.py
|
| 36 |
+
│ └── ...
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
### Implementation Principles
|
| 40 |
+
|
| 41 |
+
The application follows these key principles:
|
| 42 |
+
|
| 43 |
+
- **Modular Services**: Each service focuses on one specific functionality
|
| 44 |
+
- **Separation of Concerns**: Clear separation between UI, business logic, and data management
|
| 45 |
+
- **Error Isolation**: Errors in one module don't affect others
|
| 46 |
+
- **Code Reusability**: Common functions are centralized in appropriate service modules
|
| 47 |
+
|
| 48 |
+
## Recent Enhancements (April 2025)
|
| 49 |
+
|
| 50 |
+
### 1. Code Architecture Modularization
|
| 51 |
+
|
| 52 |
+
The application has been refactored into a modular architecture:
|
| 53 |
+
|
| 54 |
+
- **Modular Services Architecture**: Implementation split into specialized service modules
|
| 55 |
+
- **Organized Hierarchy**: Clear structure with controllers and services
|
| 56 |
+
- **Enhanced Maintainability**: Smaller, focused files instead of one large application file
|
| 57 |
+
- **Easier Debugging**: Isolated components simplify troubleshooting
|
| 58 |
+
|
| 59 |
+
### 2. UI Improvements and Error Fixes
|
| 60 |
+
|
| 61 |
+
- **Nested Columns Error Fix**: Resolved StreamlitAPIException by flattening UI components
|
| 62 |
+
- **Group Analysis Improvements**:
|
| 63 |
+
- Added summary section showing overall group emotion distribution
|
| 64 |
+
- Improved visual separation between individual analyses
|
| 65 |
+
- Smaller, more compact face images for better overview
|
| 66 |
+
- Added handling for false positives and untagged faces
|
| 67 |
+
- **Sequential Layout**: Hierarchical display with clearly marked sections
|
| 68 |
+
|
| 69 |
+
### 3. History Visualization Enhancements
|
| 70 |
+
|
| 71 |
+
- **Individual Face Extracts**: Each person's face is now extracted and displayed with their analysis
|
| 72 |
+
- **Two-Column Layout**: Images and analysis data clearly separated
|
| 73 |
+
- **Visual Separators**: Clear dividers between different people in group analyses
|
| 74 |
+
- **Group Summaries**: Added group-level emotion distribution charts
|
| 75 |
+
|
| 76 |
+
### 4. Visual Analysis Improvements
|
| 77 |
+
|
| 78 |
+
- **Equivalent Visualization**: Visual Analysis shows exactly the same level of detail as History
|
| 79 |
+
- **Structured Analysis Display**: Clearly numbered sections (1. Facial Metrics, 2. Emotion Confidence, 3. Analysis Features)
|
| 80 |
+
- **Detailed Metrics**: Enhanced display of brightness, contrast, symmetry, age, and gender
|
| 81 |
+
- **Emotion Values**: Precise numerical confidence values for all emotions
|
| 82 |
+
|
| 83 |
+
### 5. Bug Fixes & Technical Improvements
|
| 84 |
+
|
| 85 |
+
- **AttributeError Fix**: Resolved 'ContextMember' object has no attribute 'face_data' error
|
| 86 |
+
- **Error Handling**: Added graceful fallbacks when face extraction fails
|
| 87 |
+
- **Data Handling**: Enhanced robustness when handling various data formats
|
| 88 |
+
|
| 89 |
+
## Technologies
|
| 90 |
+
|
| 91 |
+
- Streamlit for the user interface
|
| 92 |
+
- YOLOv8 for facial detection and pose analysis
|
| 93 |
+
- Custom agent framework for analysis coordination
|
| 94 |
+
- DeepFace for advanced emotion detection (when enabled)
|
| 95 |
+
- OpenCV for image visualization and face highlighting
|
| 96 |
+
|
| 97 |
+
## Usage
|
| 98 |
+
|
| 99 |
+
1. Navigate to the "Visual Analysis" page
|
| 100 |
+
2. Upload an image containing faces
|
| 101 |
+
3. Click on "Detect Faces in Image"
|
| 102 |
+
4. For single face images:
|
| 103 |
+
- Enter the person's name
|
| 104 |
+
- Click "Analyze Individual"
|
| 105 |
+
5. For group images (2-5 faces):
|
| 106 |
+
- Select which faces to analyze using the checkboxes
|
| 107 |
+
- Provide names for each selected face (mandatory)
|
| 108 |
+
- Enter a group name
|
| 109 |
+
- Click "Save Group Analysis"
|
| 110 |
+
6. View analysis results in the History page, which includes:
|
| 111 |
+
- Individual analyses (both from single photos and group photos)
|
| 112 |
+
- Individual analyses from groups show the full image with the specific person highlighted
|
| 113 |
+
- Group analyses with aggregate statistics
|
| 114 |
+
- Detailed facial metrics, demographic information, and emotion confidence values for each analysis
|
| 115 |
+
|
| 116 |
+
## Advanced Features
|
| 117 |
+
|
| 118 |
+
### Context-Aware History Display
|
| 119 |
+
|
| 120 |
+
When viewing individual analyses that originated from group photos:
|
| 121 |
+
- The original group image is shown with the specific person highlighted
|
| 122 |
+
- A green bounding box marks the analyzed individual
|
| 123 |
+
- The person's name appears as a label over their face
|
| 124 |
+
- A caption indicates which group the analysis belongs to
|
| 125 |
+
|
| 126 |
+
### Enhanced Group Analysis Visualization
|
| 127 |
+
|
| 128 |
+
When viewing group analyses in the History section:
|
| 129 |
+
- Each person's face is extracted and displayed in its own section
|
| 130 |
+
- A two-column layout separates face images from analysis data
|
| 131 |
+
- Clear visual separators distinguish between different people
|
| 132 |
+
- Each analysis includes detailed metrics and emotion values in a consistent format
|
| 133 |
+
|
| 134 |
+
### Comprehensive Analysis Metrics
|
| 135 |
+
|
| 136 |
+
Each analysis (both individual and group) now includes:
|
| 137 |
+
|
| 138 |
+
1. **Facial Metrics**:
|
| 139 |
+
- Brightness: Average brightness level of the facial region
|
| 140 |
+
- Contrast: Contrast level of the facial area
|
| 141 |
+
- Symmetry: Score (0-1) indicating facial symmetry
|
| 142 |
+
|
| 143 |
+
2. **Demographic Estimation**:
|
| 144 |
+
- Age: Approximate age of the detected individual
|
| 145 |
+
- Gender: Estimated gender of the detected person
|
| 146 |
+
|
| 147 |
+
3. **Detailed Emotion Breakdown**:
|
| 148 |
+
- Bar chart visualization showing all emotion confidence levels
|
| 149 |
+
- Precise numerical values for each emotion (anger, disgust, fear, joy, neutral, sadness, surprise)
|
| 150 |
+
- Grid layout for easy comparison between emotions
|
| 151 |
+
|
| 152 |
+
4. **Structured Analysis Display**:
|
| 153 |
+
- Clearly numbered sections for easy navigation
|
| 154 |
+
- Consistent formatting across both individual and group analyses
|
| 155 |
+
- Tab-based interface for group analyses to easily view each individual
|
| 156 |
+
|
| 157 |
+
## Version Notes
|
| 158 |
+
|
| 159 |
+
Current version: 0.1.3 (Phase 1.3 - April 2025)
|
| 160 |
+
- Basic facial detection implementation
|
| 161 |
+
- Functional Streamlit user interface
|
| 162 |
+
- Basic emotion classification
|
| 163 |
+
- Group analysis with up to 5 people
|
| 164 |
+
- Mandatory face labeling system
|
| 165 |
+
- Improved history display showing both group and individual analyses
|
| 166 |
+
- Enhanced visualization showing highlighted individuals in original group photos
|
| 167 |
+
- Added detailed facial metrics (brightness, contrast, symmetry)
|
| 168 |
+
- Implemented demographic analysis (age, gender)
|
| 169 |
+
- Added precise numerical emotion confidence values
|
| 170 |
+
- Ensured consistent UI formatting with clearly numbered sections
|
| 171 |
+
- All text elements standardized in English for international accessibility
|
| 172 |
+
- Improved group analysis visualization with individual face images
|
| 173 |
+
- Fixed AttributeError in ContextMember object handling
|
README-PREPROCESSING.md
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Image Preprocessing Module (Step 4)
|
| 2 |
+
|
| 3 |
+
## Overview
|
| 4 |
+
The image preprocessing module enhances facial detection and emotion analysis by improving image quality before processing. This feature allows users to:
|
| 5 |
+
|
| 6 |
+
1. View a side-by-side comparison of original vs. processed images
|
| 7 |
+
2. Choose whether to use the original or improved image for analysis
|
| 8 |
+
3. Understand the technical benefits of preprocessing for facial analysis
|
| 9 |
+
|
| 10 |
+
## Key Components
|
| 11 |
+
|
| 12 |
+
### 1. Image Service (`services/image_service.py`)
|
| 13 |
+
The core preprocessing functionality is implemented in the ImageService class:
|
| 14 |
+
|
| 15 |
+
- **`preprocess_image()`**: Applies multiple image enhancements and returns both original and processed images
|
| 16 |
+
- **`resize_image()`**: Resizes images to optimal dimensions while maintaining aspect ratio
|
| 17 |
+
- **`adjust_brightness()`**: Increases or decreases image brightness based on analysis
|
| 18 |
+
- **`adjust_contrast()`**: Adapts image contrast for better facial feature visibility
|
| 19 |
+
- **`save_processed_image()`**: Saves processed images to temporary storage
|
| 20 |
+
|
| 21 |
+
### 2. Preprocessing UI (`utils/preprocessing_ui.py`)
|
| 22 |
+
A modular UI component that handles the preprocessing interface:
|
| 23 |
+
|
| 24 |
+
- **`display_preprocessing_comparison()`**: Shows side-by-side comparison of original vs. processed images
|
| 25 |
+
- **`setup_preprocessing_controls()`**: Provides buttons for selecting which image to use
|
| 26 |
+
- **`display_processing_status()`**: Shows status indicators for the current image selection
|
| 27 |
+
- **`get_processing_image()`**: Retrieves the appropriate image based on user selection
|
| 28 |
+
|
| 29 |
+
### 3. Visual Agent Integration (`agent_framework/visual_agent.py`)
|
| 30 |
+
The VisualAgent has been extended to support preprocessed images:
|
| 31 |
+
|
| 32 |
+
- Updated `process()` method to handle preprocessed image paths
|
| 33 |
+
- Added support for switching between original and processed images
|
| 34 |
+
|
| 35 |
+
## Technical Benefits for Facial Analysis
|
| 36 |
+
|
| 37 |
+
The preprocessing steps provide several benefits for facial detection and emotion analysis:
|
| 38 |
+
|
| 39 |
+
1. **Balanced contrast**: Enhances visibility of facial features while reducing shadows and highlights
|
| 40 |
+
2. **Optimal brightness**: Ensures facial features are clearly distinguishable without over-exposure
|
| 41 |
+
3. **Proper sizing**: Maintains ideal dimensions for detection algorithms to recognize facial landmarks
|
| 42 |
+
|
| 43 |
+
These improvements result in:
|
| 44 |
+
- More accurate emotion classification
|
| 45 |
+
- Better feature extraction (eyes, mouth, eyebrows)
|
| 46 |
+
- Reduced noise and artifacts
|
| 47 |
+
- More consistent performance across different lighting conditions
|
| 48 |
+
|
| 49 |
+
## Usage Flow
|
| 50 |
+
|
| 51 |
+
1. User uploads an image
|
| 52 |
+
2. System analyzes and applies appropriate preprocessing
|
| 53 |
+
3. User is shown a comparison of original vs. improved images
|
| 54 |
+
4. User selects which image to use for analysis
|
| 55 |
+
5. Analysis proceeds with the selected image
|
| 56 |
+
6. Status indicators keep the user informed about which image is being used
|
| 57 |
+
|
| 58 |
+
The preprocessing is adaptive, applying only the enhancements needed for each specific image.
|
README.md
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: EmotionMirror
|
| 3 |
+
emoji: 📊
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: streamlit
|
| 7 |
+
sdk_version: 1.26.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
# EmotionMirror
|
| 13 |
+
|
| 14 |
+
EmotionMirror is an emotional analysis application that uses computer vision and AI to analyze facial expressions and body language.
|
| 15 |
+
|
| 16 |
+
## Features
|
| 17 |
+
|
| 18 |
+
- **Visual Analysis**: Detect faces and analyze expressions
|
| 19 |
+
- **Emotion Tracking**: Track emotions over time (coming soon)
|
| 20 |
+
- **Multi-Agent System**: Coordinated analysis with specialized agents
|
| 21 |
+
- **Group Analysis**: Process images with multiple faces (up to 5), with mandatory face labeling
|
| 22 |
+
- **Comprehensive History**: View both group analyses and individual analyses from group photos
|
| 23 |
+
- **Hugging Face Integration**: Designed for deployment on Hugging Face Spaces
|
| 24 |
+
|
| 25 |
+
## Project Structure
|
| 26 |
+
|
| 27 |
+
```
|
| 28 |
+
EmotionMirror/
|
| 29 |
+
├── app.py # Main Streamlit application
|
| 30 |
+
├── config/ # Configuration
|
| 31 |
+
│ ├── __init__.py
|
| 32 |
+
│ └── settings.py # Centralized settings
|
| 33 |
+
├── agent_framework/ # Agent implementation
|
| 34 |
+
│ ├── __init__.py
|
| 35 |
+
│ ├── base_agent.py # Base agent class
|
| 36 |
+
│ ├── visual_agent.py # Visual processing agent
|
| 37 |
+
│ └── agent_manager.py # Agent coordinator
|
| 38 |
+
├── services/ # Modular services
|
| 39 |
+
│ ├── __init__.py
|
| 40 |
+
│ └── model_service.py # Model management service
|
| 41 |
+
├── utils/ # Utilities
|
| 42 |
+
│ ├── __init__.py
|
| 43 |
+
│ └── file_utils.py # File handling utilities
|
| 44 |
+
├── models/ # Pre-trained models
|
| 45 |
+
└── static/ # Static assets
|
| 46 |
+
├── uploads/ # Uploaded images
|
| 47 |
+
└── results/ # Processing results
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
## Requirements
|
| 51 |
+
|
| 52 |
+
- Python 3.9+
|
| 53 |
+
- Streamlit
|
| 54 |
+
- Ultralytics YOLO
|
| 55 |
+
- OpenCV
|
| 56 |
+
- Torch
|
| 57 |
+
- See requirements.txt for complete list
|
| 58 |
+
|
| 59 |
+
## Installation
|
| 60 |
+
|
| 61 |
+
1. Clone the repository
|
| 62 |
+
2. Install dependencies:
|
| 63 |
+
```
|
| 64 |
+
pip install -r requirements.txt
|
| 65 |
+
```
|
| 66 |
+
3. Run the application:
|
| 67 |
+
```
|
| 68 |
+
streamlit run app.py
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
## Deployment to Hugging Face Spaces
|
| 72 |
+
|
| 73 |
+
This application is designed to be deployed on Hugging Face Spaces. To deploy:
|
| 74 |
+
|
| 75 |
+
1. Push this repository to GitHub
|
| 76 |
+
2. Create a new Space on Hugging Face
|
| 77 |
+
3. Connect your GitHub repository
|
| 78 |
+
4. Set the Space SDK to "Streamlit"
|
| 79 |
+
5. The application will automatically deploy
|
| 80 |
+
|
| 81 |
+
## Development Roadmap
|
| 82 |
+
|
| 83 |
+
- **Phase 1**: Basic visual analysis with YOLO
|
| 84 |
+
- **Phase 2**: Database integration for emotion tracking
|
| 85 |
+
- **Phase 3**: Hume.ai integration and advanced multi-agent system
|
| 86 |
+
|
| 87 |
+
## Recent Changes to Analysis History
|
| 88 |
+
|
| 89 |
+
### Group Analysis Update (April 2025)
|
| 90 |
+
|
| 91 |
+
The Group Analysis and History functionality have been enhanced with several important improvements:
|
| 92 |
+
|
| 93 |
+
#### 1. Group Size Limitation
|
| 94 |
+
- **Updated Limit**: Group analysis is now limited to a maximum of 5 faces per image
|
| 95 |
+
- **Improved Performance**: This change improves processing speed and UI responsiveness
|
| 96 |
+
- **Clear User Feedback**: Warning messages clearly inform users when images exceed the limit
|
| 97 |
+
|
| 98 |
+
#### 2. Mandatory Face Labeling
|
| 99 |
+
- **Required Fields**: All selected faces in group analysis now require name labels
|
| 100 |
+
- **Validation System**: Prevents saving analyses with incomplete labeling
|
| 101 |
+
- **Visual Indicators**: Warning messages appear next to empty name fields
|
| 102 |
+
- **Enhanced UI**: Improved face cropping for clearer identification during labeling
|
| 103 |
+
|
| 104 |
+
#### 3. Comprehensive Analysis History
|
| 105 |
+
- **Dual Analysis Storage**: Group photos now generate both:
|
| 106 |
+
- One group analysis with aggregate statistics
|
| 107 |
+
- Individual analyses for each labeled person in the photo
|
| 108 |
+
- **Enhanced Filtering**: Filter history by person name across all analyses
|
| 109 |
+
- **Improved Navigation**: Clear separation between individual and group analyses
|
| 110 |
+
|
| 111 |
+
### Synchronization Update (April 2025)
|
| 112 |
+
|
| 113 |
+
The Analysis History functionality has been completely synchronized between the local and Hugging Face versions to ensure consistent behavior. Below are the key improvements:
|
| 114 |
+
|
| 115 |
+
#### 1. Tab Structure Enhancement
|
| 116 |
+
- Added the "Statistics" tab between "Recent Analyses" and "Export Data"
|
| 117 |
+
- Implemented clear sequential numbering (1, 2, 3...) for all sections
|
| 118 |
+
|
| 119 |
+
#### 2. Person Filtering System
|
| 120 |
+
- Fixed the person tag system to properly save and display person names
|
| 121 |
+
- Implemented dropdown filtering by person name across all History tabs
|
| 122 |
+
- Corrected tag saving to use actual person names instead of emotion labels
|
| 123 |
+
|
| 124 |
+
#### 3. Export Functionality Expansion
|
| 125 |
+
- Added "Current Session" vs "All Sessions" filtering options
|
| 126 |
+
- Implemented adjustable record limit for exports
|
| 127 |
+
- Enhanced person filtering for export data
|
| 128 |
+
|
| 129 |
+
#### 4. Data Visualization Improvements
|
| 130 |
+
- Improved record display with expandable sections
|
| 131 |
+
- Enhanced chart visualization for emotion distributions
|
| 132 |
+
- Added detailed display of advanced data (age, gender) when available
|
| 133 |
+
|
| 134 |
+
### UI Architecture Improvements (Late April 2025)
|
| 135 |
+
|
| 136 |
+
Several significant technical improvements have been implemented to enhance the robustness and maintainability of the application:
|
| 137 |
+
|
| 138 |
+
#### 1. Modular Architecture
|
| 139 |
+
- **Complete Refactoring**: Application has been reorganized into controllers, services, and utility modules
|
| 140 |
+
- **Dependency Injection**: Services now receive configuration rather than hardcoding it
|
| 141 |
+
- **Separation of Concerns**: UI logic, business logic, and data access have been separated
|
| 142 |
+
|
| 143 |
+
#### 2. Streamlit Column Nesting Fix
|
| 144 |
+
- **Problem Solved**: Fixed the Streamlit limitation that only allows one level of column nesting
|
| 145 |
+
- **Enhanced UI Components**: Redesigned UI elements using tabs and flat layouts instead of nested columns
|
| 146 |
+
- **Tab-Based Navigation**: Implemented tabs for content organization to avoid excessive column usage
|
| 147 |
+
|
| 148 |
+
#### 3. False Positive Detection
|
| 149 |
+
- **Detection Validation**: Added checks to warn users about potential false positives in face detection
|
| 150 |
+
- **Confidence Thresholds**: Clear warnings when detected faces have low confidence scores
|
| 151 |
+
- **Size Validation**: Alerts for unusually small or large detected face regions
|
| 152 |
+
|
| 153 |
+
#### 4. Static Asset Management
|
| 154 |
+
- **Robust Directory Creation**: Automatic creation of required directories like "static/" and "static/results/"
|
| 155 |
+
- **Image Fallbacks**: Graceful handling of missing images with base64 backups
|
| 156 |
+
- **Platform Compatibility**: Consistent path handling across both local and Hugging Face environments
|
| 157 |
+
|
| 158 |
+
### Technical Details
|
| 159 |
+
|
| 160 |
+
The Hugging Face version previously handled tags differently from the local version. This has been corrected to ensure complete consistency:
|
| 161 |
+
|
| 162 |
+
- **Old Method (Fixed)**: Saved generic tags like 'emotion_analysis' and emotion names
|
| 163 |
+
- **New Method**: Uses the person's name as the sole tag, identical to local version
|
| 164 |
+
|
| 165 |
+
This ensures that filters now show actual person names rather than emotion categories, making it easier to track analyses by individual.
|
| 166 |
+
|
| 167 |
+
### Additional Debugging
|
| 168 |
+
|
| 169 |
+
The Hugging Face version includes extra debugging statements to help troubleshoot database operations, especially for tag handling and record retrieval, which are particularly important given the platform's permission restrictions.
|
| 170 |
+
|
| 171 |
+
## License
|
| 172 |
+
|
| 173 |
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
| 174 |
+
|
| 175 |
+
## Additional Information
|
| 176 |
+
|
| 177 |
+
### EmotionMirror - Facial Emotion Analysis with Computer Vision
|
| 178 |
+
|
| 179 |
+
EmotionMirror is an emotional analysis application based on computer vision that uses artificial intelligence models to detect and analyze emotions from facial expressions.
|
| 180 |
+
|
| 181 |
+
### Features
|
| 182 |
+
|
| 183 |
+
- **Facial Analysis**: Detection of faces and recognition of basic emotions
|
| 184 |
+
- **Results Visualization**: Clear presentation of data with numbered and sequential text
|
| 185 |
+
- **Intuitive Interface**: Minimalist design with well-defined sections
|
| 186 |
+
- **Group Analysis**: Process group photos with up to 5 people
|
| 187 |
+
- **Mandatory Labeling**: All faces in group photos must be labeled with names
|
| 188 |
+
- **Comprehensive History**: View both group analyses and individual analyses from group photos
|
| 189 |
+
|
| 190 |
+
### Technologies
|
| 191 |
+
|
| 192 |
+
- Streamlit for the user interface
|
| 193 |
+
- YOLOv8 for facial detection and pose analysis
|
| 194 |
+
- Custom agent framework for analysis coordination
|
| 195 |
+
- DeepFace for advanced emotion detection (when enabled)
|
| 196 |
+
|
| 197 |
+
### Usage
|
| 198 |
+
|
| 199 |
+
1. Navigate to the "Visual Analysis" page
|
| 200 |
+
2. Upload an image containing faces
|
| 201 |
+
3. Click on "Detect Faces in Image"
|
| 202 |
+
4. For single face images:
|
| 203 |
+
- Enter the person's name
|
| 204 |
+
- Click "Analyze Individual"
|
| 205 |
+
5. For group images (2-5 faces):
|
| 206 |
+
- Select which faces to analyze using the checkboxes
|
| 207 |
+
- Provide names for each selected face (mandatory)
|
| 208 |
+
- Enter a group name
|
| 209 |
+
- Click "Save Group Analysis"
|
| 210 |
+
6. View analysis results in the History page, which includes:
|
| 211 |
+
- Individual analyses (both from single photos and group photos)
|
| 212 |
+
- Group analyses with aggregate statistics
|
| 213 |
+
|
| 214 |
+
### Version Notes
|
| 215 |
+
|
| 216 |
+
Current version: 0.1.3 (Phase 1.3 - April 2025)
|
| 217 |
+
- Basic facial detection implementation
|
| 218 |
+
- Functional Streamlit user interface
|
| 219 |
+
- Basic emotion classification
|
| 220 |
+
- Group analysis with up to 5 people
|
| 221 |
+
- Mandatory face labeling system
|
| 222 |
+
- Improved history display showing both group and individual analyses
|
agent_framework/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agent framework package for EmotionMirror application.
|
| 3 |
+
Contains agent implementations for visual processing and emotion analysis.
|
| 4 |
+
"""
|
agent_framework/agent_manager.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agent manager for EmotionMirror application.
|
| 3 |
+
Coordinates communication between different agents.
|
| 4 |
+
"""
|
| 5 |
+
import logging
|
| 6 |
+
from typing import Dict, Any, Optional
|
| 7 |
+
|
| 8 |
+
from agent_framework.base_agent import BaseAgent
|
| 9 |
+
from agent_framework.visual_agent import VisualAgent
|
| 10 |
+
|
| 11 |
+
# Configure logging
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
class AgentManager:
|
| 15 |
+
"""Manages agents and coordinates their communication"""
|
| 16 |
+
|
| 17 |
+
_instance = None
|
| 18 |
+
|
| 19 |
+
def __new__(cls):
|
| 20 |
+
"""Singleton pattern implementation"""
|
| 21 |
+
if cls._instance is None:
|
| 22 |
+
cls._instance = super(AgentManager, cls).__new__(cls)
|
| 23 |
+
cls._instance._initialized = False
|
| 24 |
+
return cls._instance
|
| 25 |
+
|
| 26 |
+
def __init__(self):
|
| 27 |
+
"""Initialize the agent manager"""
|
| 28 |
+
if self._initialized:
|
| 29 |
+
return
|
| 30 |
+
|
| 31 |
+
self._initialized = True
|
| 32 |
+
logger.info("Initializing AgentManager")
|
| 33 |
+
|
| 34 |
+
# Dictionary to store agents
|
| 35 |
+
self.agents = {}
|
| 36 |
+
|
| 37 |
+
# Initialize agents
|
| 38 |
+
self._initialize_agents()
|
| 39 |
+
|
| 40 |
+
def _initialize_agents(self) -> None:
|
| 41 |
+
"""Initialize available agents"""
|
| 42 |
+
# In Phase 1, we only have the VisualAgent
|
| 43 |
+
self.register_agent(VisualAgent())
|
| 44 |
+
|
| 45 |
+
def register_agent(self, agent: BaseAgent) -> None:
|
| 46 |
+
"""
|
| 47 |
+
Register an agent with the manager.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
agent: Agent instance to register
|
| 51 |
+
"""
|
| 52 |
+
self.agents[agent.name] = agent
|
| 53 |
+
logger.info(f"Agent registered: {agent.name}")
|
| 54 |
+
|
| 55 |
+
def get_agent(self, agent_name: str) -> Optional[BaseAgent]:
|
| 56 |
+
"""
|
| 57 |
+
Get an agent by name.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
agent_name: Name of the agent
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
Agent instance or None if not found
|
| 64 |
+
"""
|
| 65 |
+
return self.agents.get(agent_name)
|
| 66 |
+
|
| 67 |
+
def process_visual(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
| 68 |
+
"""
|
| 69 |
+
Process visual data using the VisualAgent.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
data: Input data for processing
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
Processing results
|
| 76 |
+
"""
|
| 77 |
+
visual_agent = self.get_agent("VisualAgent")
|
| 78 |
+
if visual_agent:
|
| 79 |
+
return visual_agent.process(data)
|
| 80 |
+
else:
|
| 81 |
+
logger.error("VisualAgent not found")
|
| 82 |
+
return {"error": "VisualAgent not available"}
|
| 83 |
+
|
| 84 |
+
def list_agents(self) -> Dict[str, str]:
|
| 85 |
+
"""
|
| 86 |
+
List all registered agents.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
Dictionary of agent names and descriptions
|
| 90 |
+
"""
|
| 91 |
+
return {name: agent.description for name, agent in self.agents.items()}
|
agent_framework/base_agent.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Base agent class for EmotionMirror application.
|
| 3 |
+
Provides the foundation for all agent implementations.
|
| 4 |
+
"""
|
| 5 |
+
from abc import ABC, abstractmethod
|
| 6 |
+
import logging
|
| 7 |
+
from typing import Dict, Any
|
| 8 |
+
|
| 9 |
+
class BaseAgent(ABC):
|
| 10 |
+
"""Base class for all agents in EmotionMirror"""
|
| 11 |
+
|
| 12 |
+
def __init__(self, name: str, description: str = ""):
|
| 13 |
+
"""
|
| 14 |
+
Initialize the base agent.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
name: Name of the agent
|
| 18 |
+
description: Description of the agent's purpose
|
| 19 |
+
"""
|
| 20 |
+
self.name = name
|
| 21 |
+
self.description = description
|
| 22 |
+
self.logger = logging.getLogger(f"agent.{name}")
|
| 23 |
+
|
| 24 |
+
@abstractmethod
|
| 25 |
+
def process(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
| 26 |
+
"""
|
| 27 |
+
Process data and return a result.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
data: Input data to process
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
Dictionary with processing results
|
| 34 |
+
"""
|
| 35 |
+
pass
|
| 36 |
+
|
| 37 |
+
def log_activity(self, message: str, level: str = 'info') -> None:
|
| 38 |
+
"""
|
| 39 |
+
Log agent activity.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
message: Log message
|
| 43 |
+
level: Log level (debug, info, warning, error, critical)
|
| 44 |
+
"""
|
| 45 |
+
log_methods = {
|
| 46 |
+
'debug': self.logger.debug,
|
| 47 |
+
'info': self.logger.info,
|
| 48 |
+
'warning': self.logger.warning,
|
| 49 |
+
'error': self.logger.error,
|
| 50 |
+
'critical': self.logger.critical
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
log_method = log_methods.get(level.lower(), self.logger.info)
|
| 54 |
+
log_method(f"[{self.name}] {message}")
|
| 55 |
+
|
| 56 |
+
def __str__(self) -> str:
|
| 57 |
+
"""String representation of the agent"""
|
| 58 |
+
return f"Agent({self.name}): {self.description}"
|
agent_framework/visual_agent.py
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Visual agent for EmotionMirror application.
|
| 3 |
+
Handles image processing and facial analysis.
|
| 4 |
+
"""
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
import logging
|
| 8 |
+
from typing import Dict, Any, List, Tuple
|
| 9 |
+
|
| 10 |
+
from agent_framework.base_agent import BaseAgent
|
| 11 |
+
from services.model_service import ModelService
|
| 12 |
+
from services import get_emotion_service
|
| 13 |
+
|
| 14 |
+
class VisualAgent(BaseAgent):
|
| 15 |
+
"""Agent for visual processing and emotion analysis"""
|
| 16 |
+
|
| 17 |
+
def __init__(self):
|
| 18 |
+
"""Initialize the visual agent"""
|
| 19 |
+
super().__init__(name="VisualAgent", description="Processes images to detect faces and emotions")
|
| 20 |
+
self.model_service = ModelService()
|
| 21 |
+
self.emotion_service = get_emotion_service()
|
| 22 |
+
self.detection_model = None
|
| 23 |
+
self.pose_model = None
|
| 24 |
+
|
| 25 |
+
def _ensure_models_loaded(self) -> bool:
|
| 26 |
+
"""
|
| 27 |
+
Ensure that required models are loaded.
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
True if models are loaded successfully, False otherwise
|
| 31 |
+
"""
|
| 32 |
+
try:
|
| 33 |
+
if self.detection_model is None:
|
| 34 |
+
self.log_activity("Loading detection model")
|
| 35 |
+
self.detection_model = self.model_service.load_model('detection')
|
| 36 |
+
|
| 37 |
+
if self.pose_model is None:
|
| 38 |
+
self.log_activity("Loading pose model")
|
| 39 |
+
self.pose_model = self.model_service.load_model('pose')
|
| 40 |
+
|
| 41 |
+
return self.detection_model is not None and self.pose_model is not None
|
| 42 |
+
except Exception as e:
|
| 43 |
+
self.log_activity(f"Error loading models: {str(e)}", "error")
|
| 44 |
+
return False
|
| 45 |
+
|
| 46 |
+
def process(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
| 47 |
+
"""
|
| 48 |
+
Process an image to detect faces and basic expressions.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
data: Dictionary with:
|
| 52 |
+
- 'image_path': Path to the image
|
| 53 |
+
- 'image': (Optional) numpy array of the image
|
| 54 |
+
- 'confidence': Detection confidence threshold
|
| 55 |
+
- 'use_preprocessed_image': (Optional) Whether to use preprocessed image
|
| 56 |
+
- 'preprocessed_image_path': (Optional) Path to preprocessed image
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
Dictionary with visual analysis results
|
| 60 |
+
"""
|
| 61 |
+
# Ensure models are loaded
|
| 62 |
+
if not self._ensure_models_loaded():
|
| 63 |
+
return {"error": "Failed to load required models"}
|
| 64 |
+
|
| 65 |
+
# Get image data
|
| 66 |
+
image_path = data.get('image_path')
|
| 67 |
+
image = data.get('image')
|
| 68 |
+
confidence = data.get('confidence', 0.25)
|
| 69 |
+
detection_confidence = data.get('detection_confidence', confidence) # Support new parameter name
|
| 70 |
+
|
| 71 |
+
# STEP 4: Handle preprocessed image
|
| 72 |
+
use_preprocessed = data.get('use_preprocessed_image', False)
|
| 73 |
+
preprocessed_path = data.get('preprocessed_image_path', None)
|
| 74 |
+
|
| 75 |
+
if image_path is None and image is None:
|
| 76 |
+
return {'error': 'Image or image path is required'}
|
| 77 |
+
|
| 78 |
+
# Load image if path is provided
|
| 79 |
+
if image_path is not None:
|
| 80 |
+
try:
|
| 81 |
+
# STEP 4: Choose between original and preprocessed image
|
| 82 |
+
if use_preprocessed and preprocessed_path:
|
| 83 |
+
self.log_activity(f"Using preprocessed image from: {preprocessed_path}")
|
| 84 |
+
image = cv2.imread(preprocessed_path)
|
| 85 |
+
if image is None:
|
| 86 |
+
self.log_activity("Preprocessed image not found, falling back to original", "warning")
|
| 87 |
+
image = cv2.imread(image_path)
|
| 88 |
+
else:
|
| 89 |
+
image = cv2.imread(image_path)
|
| 90 |
+
|
| 91 |
+
if image is None:
|
| 92 |
+
return {'error': 'Failed to read image from provided path'}
|
| 93 |
+
except Exception as e:
|
| 94 |
+
self.log_activity(f"Error reading image: {e}", 'error')
|
| 95 |
+
return {'error': f'Error reading image: {str(e)}'}
|
| 96 |
+
|
| 97 |
+
# Process with detection model (for faces)
|
| 98 |
+
self.log_activity("Running detection model")
|
| 99 |
+
detection_results = self.detection_model(image, conf=detection_confidence)
|
| 100 |
+
|
| 101 |
+
# Process with pose model (for body language)
|
| 102 |
+
self.log_activity("Running pose model")
|
| 103 |
+
pose_results = self.pose_model(image, conf=detection_confidence)
|
| 104 |
+
|
| 105 |
+
# Extract faces (person detections)
|
| 106 |
+
faces = self._extract_faces(detection_results, image)
|
| 107 |
+
|
| 108 |
+
# Extract poses
|
| 109 |
+
poses = self._extract_poses(pose_results)
|
| 110 |
+
|
| 111 |
+
# Return combined results
|
| 112 |
+
return {
|
| 113 |
+
'faces': faces,
|
| 114 |
+
'poses': poses,
|
| 115 |
+
'face_count': len(faces),
|
| 116 |
+
'timestamp': data.get('timestamp'),
|
| 117 |
+
'used_preprocessed_image': use_preprocessed and preprocessed_path is not None
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
def _extract_faces(self, results: List, image: np.ndarray) -> List[Dict[str, Any]]:
|
| 121 |
+
"""
|
| 122 |
+
Extract face information from detection results.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
results: Detection model results
|
| 126 |
+
image: Original image
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
List of face data dictionaries
|
| 130 |
+
"""
|
| 131 |
+
faces = []
|
| 132 |
+
|
| 133 |
+
for r in results:
|
| 134 |
+
boxes = r.boxes
|
| 135 |
+
for box in boxes:
|
| 136 |
+
# Filter only for persons (class 0 in COCO)
|
| 137 |
+
if int(box.cls[0]) == 0: # 'person' in COCO dataset
|
| 138 |
+
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
| 139 |
+
|
| 140 |
+
# Extract face region
|
| 141 |
+
face_img = image[y1:y2, x1:x2]
|
| 142 |
+
|
| 143 |
+
# Analyze emotion of the face
|
| 144 |
+
emotion_data = self.emotion_service.analyze_emotion(face_img)
|
| 145 |
+
|
| 146 |
+
# Check if advanced service was used
|
| 147 |
+
using_advanced = False
|
| 148 |
+
if hasattr(self.emotion_service, 'is_advanced_service_active'):
|
| 149 |
+
using_advanced = self.emotion_service.is_advanced_service_active()
|
| 150 |
+
|
| 151 |
+
# Basic face data
|
| 152 |
+
face_data = {
|
| 153 |
+
'bbox': [x1, y1, x2, y2],
|
| 154 |
+
'confidence': float(box.conf[0]),
|
| 155 |
+
'emotion': emotion_data['emotion'],
|
| 156 |
+
'emotion_confidence': emotion_data['confidence'],
|
| 157 |
+
'emotions': emotion_data['emotions'],
|
| 158 |
+
'features': emotion_data['features'], # Usar el mismo nombre de clave que en emotion_service
|
| 159 |
+
'emotion_features': emotion_data['features'], # Mantener para compatibilidad
|
| 160 |
+
'using_advanced': using_advanced # Indicador de si se utilizó el servicio avanzado
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
# Add advanced data if available (from DeepFace)
|
| 164 |
+
if 'age' in emotion_data['features']:
|
| 165 |
+
face_data['age'] = emotion_data['features']['age']
|
| 166 |
+
if 'gender' in emotion_data['features']:
|
| 167 |
+
face_data['gender'] = emotion_data['features']['gender']
|
| 168 |
+
|
| 169 |
+
faces.append(face_data)
|
| 170 |
+
|
| 171 |
+
return faces
|
| 172 |
+
|
| 173 |
+
def _extract_poses(self, results: List) -> List[Dict[str, Any]]:
|
| 174 |
+
"""
|
| 175 |
+
Extract pose information from pose model results.
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
results: Pose model results
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
List of pose data dictionaries
|
| 182 |
+
"""
|
| 183 |
+
poses = []
|
| 184 |
+
|
| 185 |
+
for r in results:
|
| 186 |
+
if hasattr(r, 'keypoints') and r.keypoints is not None:
|
| 187 |
+
for i, keypoints in enumerate(r.keypoints.data):
|
| 188 |
+
pose_data = {
|
| 189 |
+
'keypoints': keypoints.tolist(),
|
| 190 |
+
'person_idx': i
|
| 191 |
+
}
|
| 192 |
+
poses.append(pose_data)
|
| 193 |
+
|
| 194 |
+
return poses
|
app.py
ADDED
|
@@ -0,0 +1,843 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
EmotionMirror - Emotional Analysis Application
|
| 3 |
+
|
| 4 |
+
A Streamlit application for analyzing emotions using computer vision.
|
| 5 |
+
"""
|
| 6 |
+
import os
|
| 7 |
+
import time
|
| 8 |
+
import uuid
|
| 9 |
+
import logging
|
| 10 |
+
import streamlit as st
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from PIL import Image
|
| 13 |
+
import numpy as np
|
| 14 |
+
import cv2
|
| 15 |
+
import json
|
| 16 |
+
import pandas as pd
|
| 17 |
+
|
| 18 |
+
# Import app modules
|
| 19 |
+
from config import settings
|
| 20 |
+
from agent_framework.agent_manager import AgentManager
|
| 21 |
+
from utils.file_utils import allowed_file, save_uploaded_file
|
| 22 |
+
from utils.export_utils import export_to_json, export_to_csv, get_download_link, generate_emotion_summary
|
| 23 |
+
from utils.preprocessing_ui import display_preprocessing_comparison, setup_preprocessing_controls, display_processing_status, get_processing_image, show_preprocessing_ui
|
| 24 |
+
from services.database_service import DatabaseService
|
| 25 |
+
from services.image_service import ImageService
|
| 26 |
+
# Import the new image visualization module (from root directory)
|
| 27 |
+
import sys
|
| 28 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 29 |
+
from image_visualization import display_image_with_controls, create_image_tabs
|
| 30 |
+
|
| 31 |
+
# Configure logging
|
| 32 |
+
logging.basicConfig(
|
| 33 |
+
level=logging.INFO,
|
| 34 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 35 |
+
)
|
| 36 |
+
logger = logging.getLogger(__name__)
|
| 37 |
+
|
| 38 |
+
# Page configuration
|
| 39 |
+
st.set_page_config(
|
| 40 |
+
page_title="EmotionMirror",
|
| 41 |
+
page_icon="📊",
|
| 42 |
+
layout="wide",
|
| 43 |
+
initial_sidebar_state="expanded",
|
| 44 |
+
menu_items={
|
| 45 |
+
'Get Help': 'https://www.example.com/help',
|
| 46 |
+
'Report a bug': 'https://www.example.com/bug',
|
| 47 |
+
'About': 'EmotionMirror is an emotion analysis application.'
|
| 48 |
+
}
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
# Apply custom CSS to improve stability and reduce flickering
|
| 52 |
+
st.markdown("""
|
| 53 |
+
<style>
|
| 54 |
+
/* Reduce animation and transitions to minimize flickering */
|
| 55 |
+
* {
|
| 56 |
+
transition: none !important;
|
| 57 |
+
animation: none !important;
|
| 58 |
+
transform: none !important;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
/* Make containers more stable */
|
| 62 |
+
.stApp {
|
| 63 |
+
transform: translateZ(0);
|
| 64 |
+
backface-visibility: hidden;
|
| 65 |
+
perspective: 1000px;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
/* Further stabilize the main content area */
|
| 69 |
+
.main .block-container {
|
| 70 |
+
transform: translateZ(0);
|
| 71 |
+
will-change: auto;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
/* Ensure column stability */
|
| 75 |
+
[data-testid="column"] {
|
| 76 |
+
transform: translateZ(0);
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
/* Improve scrolling stability */
|
| 80 |
+
.main {
|
| 81 |
+
overflow-y: auto;
|
| 82 |
+
overflow-x: hidden;
|
| 83 |
+
}
|
| 84 |
+
</style>
|
| 85 |
+
""", unsafe_allow_html=True)
|
| 86 |
+
|
| 87 |
+
# Initialize agent manager
|
| 88 |
+
@st.cache_resource
|
| 89 |
+
def get_agent_manager():
|
| 90 |
+
"""Get or create the agent manager singleton"""
|
| 91 |
+
return AgentManager()
|
| 92 |
+
|
| 93 |
+
# Initialize database service
|
| 94 |
+
@st.cache_resource
|
| 95 |
+
def get_database_service():
|
| 96 |
+
"""Get or create the database service singleton"""
|
| 97 |
+
return DatabaseService()
|
| 98 |
+
|
| 99 |
+
# Initialize image service for enhanced image handling
|
| 100 |
+
@st.cache_resource
|
| 101 |
+
def get_image_service():
|
| 102 |
+
"""
|
| 103 |
+
Get or create the image service singleton.
|
| 104 |
+
Part of Step 3 implementation: Added for image validation, dimension and quality analysis.
|
| 105 |
+
"""
|
| 106 |
+
return ImageService()
|
| 107 |
+
|
| 108 |
+
# Session state initialization
|
| 109 |
+
if "session_id" not in st.session_state:
|
| 110 |
+
st.session_state.session_id = str(uuid.uuid4())
|
| 111 |
+
logger.info(f"New session started: {st.session_state.session_id}")
|
| 112 |
+
|
| 113 |
+
if "upload_history" not in st.session_state:
|
| 114 |
+
st.session_state.upload_history = []
|
| 115 |
+
|
| 116 |
+
# Store the advanced emotion setting in session state to persist between pages
|
| 117 |
+
if 'use_advanced_emotion' not in st.session_state:
|
| 118 |
+
st.session_state.use_advanced_emotion = settings.USE_ADVANCED_EMOTION
|
| 119 |
+
|
| 120 |
+
# App title and description
|
| 121 |
+
st.title("EmotionMirror")
|
| 122 |
+
st.markdown("""
|
| 123 |
+
Welcome to EmotionMirror, an application for analyzing emotions using computer vision.
|
| 124 |
+
|
| 125 |
+
This is a prototype version that demonstrates the basic functionality.
|
| 126 |
+
""")
|
| 127 |
+
|
| 128 |
+
# Sidebar
|
| 129 |
+
with st.sidebar:
|
| 130 |
+
st.title("EmotionMirror")
|
| 131 |
+
st.subheader("Facial Emotion Analysis")
|
| 132 |
+
|
| 133 |
+
# Navigation options
|
| 134 |
+
page = st.radio(
|
| 135 |
+
"Navigation",
|
| 136 |
+
["Home", "Visual Analysis", "History", "About"]
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
st.divider()
|
| 140 |
+
|
| 141 |
+
# Settings section in sidebar
|
| 142 |
+
st.subheader("Settings")
|
| 143 |
+
|
| 144 |
+
# Add option to switch between basic and advanced emotion detection
|
| 145 |
+
use_advanced = st.checkbox(
|
| 146 |
+
"Use Advanced Emotion Detection",
|
| 147 |
+
value=st.session_state.use_advanced_emotion,
|
| 148 |
+
help="When enabled, DeepFace will be used for more accurate emotion detection"
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
# Update the setting if changed
|
| 152 |
+
if st.session_state.use_advanced_emotion != use_advanced:
|
| 153 |
+
st.session_state.use_advanced_emotion = use_advanced
|
| 154 |
+
settings.USE_ADVANCED_EMOTION = use_advanced
|
| 155 |
+
|
| 156 |
+
# Show a note about reloading
|
| 157 |
+
if use_advanced:
|
| 158 |
+
st.info("Advanced detection enabled")
|
| 159 |
+
else:
|
| 160 |
+
st.info("Basic detection enabled")
|
| 161 |
+
|
| 162 |
+
# General confidence threshold
|
| 163 |
+
confidence_threshold = st.slider(
|
| 164 |
+
"Detection Confidence",
|
| 165 |
+
min_value=0.1,
|
| 166 |
+
max_value=1.0,
|
| 167 |
+
value=0.45,
|
| 168 |
+
step=0.05,
|
| 169 |
+
help="Adjust the confidence threshold for detections"
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
st.divider()
|
| 173 |
+
st.caption(f"Session ID: {st.session_state.session_id}")
|
| 174 |
+
st.caption(f"Version: 0.1.3 (Phase 1.3)")
|
| 175 |
+
|
| 176 |
+
# Home page
|
| 177 |
+
if page == "Home":
|
| 178 |
+
st.header("EmotionMirror - Emotional Analysis System")
|
| 179 |
+
|
| 180 |
+
st.subheader("Features")
|
| 181 |
+
col1, col2, col3 = st.columns(3)
|
| 182 |
+
|
| 183 |
+
with col1:
|
| 184 |
+
st.markdown("### 📷 Visual Analysis")
|
| 185 |
+
st.markdown("Upload images to analyze facial expressions and emotions.")
|
| 186 |
+
|
| 187 |
+
with col2:
|
| 188 |
+
st.markdown("### 📊 Emotion Tracking")
|
| 189 |
+
st.markdown("Track emotions over time with detailed analytics. (Coming soon)")
|
| 190 |
+
|
| 191 |
+
with col3:
|
| 192 |
+
st.markdown("### 🧠 AI Recommendations")
|
| 193 |
+
st.markdown("Get personalized recommendations based on your emotional state. (Coming soon)")
|
| 194 |
+
|
| 195 |
+
st.subheader("Getting Started")
|
| 196 |
+
st.markdown("""
|
| 197 |
+
1. Navigate to the **Visual Analysis** page
|
| 198 |
+
2. Upload an image containing faces
|
| 199 |
+
3. View the analysis results
|
| 200 |
+
""")
|
| 201 |
+
|
| 202 |
+
# Visual Analysis page
|
| 203 |
+
elif page == "Visual Analysis":
|
| 204 |
+
# Use modular page handler with correct parameters
|
| 205 |
+
st.header("Visual Emotion Analysis")
|
| 206 |
+
st.markdown("""
|
| 207 |
+
Upload an image to analyze emotions.
|
| 208 |
+
For best results, use a clear image of a face with good lighting.
|
| 209 |
+
""")
|
| 210 |
+
|
| 211 |
+
# Initialize services explicitly
|
| 212 |
+
agent_mgr = get_agent_manager()
|
| 213 |
+
img_service = get_image_service()
|
| 214 |
+
database_service = get_database_service()
|
| 215 |
+
|
| 216 |
+
# Initialize the visual agent at the start
|
| 217 |
+
visual_agent = agent_mgr.get_agent("VisualAgent")
|
| 218 |
+
if not visual_agent:
|
| 219 |
+
st.warning("Visual agent not available. The system is initializing or there is a configuration issue.")
|
| 220 |
+
logger.error("Failed to get VisualAgent from agent_manager")
|
| 221 |
+
|
| 222 |
+
# Create numbered sections for clear navigation
|
| 223 |
+
st.subheader("1. Upload an Image")
|
| 224 |
+
|
| 225 |
+
# Add reset button for clearing current image
|
| 226 |
+
if "original_image" in st.session_state:
|
| 227 |
+
col1, col2 = st.columns([3, 1])
|
| 228 |
+
with col2:
|
| 229 |
+
if st.button("Clear Current Image", key="clear_image"):
|
| 230 |
+
# Clear the session state
|
| 231 |
+
if "original_image" in st.session_state:
|
| 232 |
+
del st.session_state["original_image"]
|
| 233 |
+
if "processed_image" in st.session_state:
|
| 234 |
+
del st.session_state["processed_image"]
|
| 235 |
+
if "current_image_path" in st.session_state:
|
| 236 |
+
del st.session_state["current_image_path"]
|
| 237 |
+
st.experimental_rerun()
|
| 238 |
+
|
| 239 |
+
# Create file uploader
|
| 240 |
+
uploaded_file = st.file_uploader(
|
| 241 |
+
"Choose an image...",
|
| 242 |
+
type=["jpg", "jpeg", "png"],
|
| 243 |
+
help="Upload a clear image of a face for analysis."
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
# Display information about detection methods
|
| 247 |
+
with st.expander("About the Detection Methods", expanded=False):
|
| 248 |
+
st.markdown("""
|
| 249 |
+
### About the Detection Methods
|
| 250 |
+
|
| 251 |
+
Currently using: **Advanced Detection**
|
| 252 |
+
|
| 253 |
+
* **Basic detection** is faster but less accurate. It works by analyzing simple facial features.
|
| 254 |
+
* **Advanced detection (DeepFace)** uses deep learning models that are trained on thousands of faces to recognize subtle emotional cues.
|
| 255 |
+
|
| 256 |
+
You can change the default detection method in the sidebar settings.
|
| 257 |
+
""")
|
| 258 |
+
|
| 259 |
+
# Display image and interface when uploaded
|
| 260 |
+
if uploaded_file is not None:
|
| 261 |
+
try:
|
| 262 |
+
# Process the uploaded file
|
| 263 |
+
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
| 264 |
+
uploaded_file.seek(0) # Reset the file pointer for further processing
|
| 265 |
+
|
| 266 |
+
# Validate image file - includes format, size, and dimensions
|
| 267 |
+
validation_result = img_service.validate_image_file(
|
| 268 |
+
uploaded_file,
|
| 269 |
+
check_content=True,
|
| 270 |
+
check_dimensions=True
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
# If the image is valid, process it
|
| 274 |
+
if validation_result["valid"]:
|
| 275 |
+
# Process the uploaded image to improve quality if possible
|
| 276 |
+
try:
|
| 277 |
+
# Load image for processing with OpenCV (this will be in BGR format)
|
| 278 |
+
img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
| 279 |
+
|
| 280 |
+
# Convert from BGR to RGB for preprocessing display
|
| 281 |
+
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 282 |
+
|
| 283 |
+
# Store original RGB image for display
|
| 284 |
+
if "original_image" not in st.session_state:
|
| 285 |
+
st.session_state.original_image = img_rgb.copy()
|
| 286 |
+
|
| 287 |
+
# Determine which image to process based on preprocessing settings
|
| 288 |
+
use_improved = st.session_state.get("use_improved_image", False)
|
| 289 |
+
|
| 290 |
+
# Choose image for processing
|
| 291 |
+
img_to_process = get_processing_image(img_rgb, img_service, use_improved)
|
| 292 |
+
|
| 293 |
+
# STEP 5: Display the image with visualization controls
|
| 294 |
+
st.subheader("2. Image Visualization")
|
| 295 |
+
st.markdown("""
|
| 296 |
+
View the uploaded image with basic controls. You can:
|
| 297 |
+
- Zoom in/out to see details
|
| 298 |
+
- Reset view to original size
|
| 299 |
+
- Download the current image
|
| 300 |
+
""")
|
| 301 |
+
|
| 302 |
+
# Check if we have both original and processed images
|
| 303 |
+
if "processed_image" in st.session_state:
|
| 304 |
+
# Create tabs to display original and processed images
|
| 305 |
+
image_tabs_result = create_image_tabs(
|
| 306 |
+
st.session_state.original_image,
|
| 307 |
+
st.session_state.processed_image
|
| 308 |
+
)
|
| 309 |
+
else:
|
| 310 |
+
# Display just the original image
|
| 311 |
+
display_result = display_image_with_controls(
|
| 312 |
+
st.session_state.original_image,
|
| 313 |
+
title="Uploaded Image",
|
| 314 |
+
allow_zoom=True,
|
| 315 |
+
allow_download=True
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
# Store the processed image if available for comparison
|
| 319 |
+
if use_improved and img_to_process is not None:
|
| 320 |
+
st.session_state.processed_image = img_to_process
|
| 321 |
+
|
| 322 |
+
# Add space between sections
|
| 323 |
+
st.markdown("<div style='height: 20px;'></div>", unsafe_allow_html=True)
|
| 324 |
+
|
| 325 |
+
# Display image analysis from validation
|
| 326 |
+
st.subheader("3. Image Analysis")
|
| 327 |
+
analysis_expander = st.expander("View Image Analysis Details", expanded=False)
|
| 328 |
+
|
| 329 |
+
with analysis_expander:
|
| 330 |
+
# Show image metadata
|
| 331 |
+
if "image_metadata" in validation_result:
|
| 332 |
+
metadata = validation_result["image_metadata"]
|
| 333 |
+
st.markdown("#### Image Details")
|
| 334 |
+
|
| 335 |
+
# Create columns for metadata display
|
| 336 |
+
meta_cols = st.columns(3)
|
| 337 |
+
|
| 338 |
+
with meta_cols[0]:
|
| 339 |
+
st.markdown("**Dimensions**")
|
| 340 |
+
width = metadata.get("width", 0)
|
| 341 |
+
height = metadata.get("height", 0)
|
| 342 |
+
st.write(f"{width} × {height}")
|
| 343 |
+
|
| 344 |
+
with meta_cols[1]:
|
| 345 |
+
st.markdown("**Format**")
|
| 346 |
+
img_format = metadata.get("format", "Unknown")
|
| 347 |
+
st.write(f"{img_format}")
|
| 348 |
+
|
| 349 |
+
with meta_cols[2]:
|
| 350 |
+
st.markdown("**Size**")
|
| 351 |
+
size_kb = metadata.get("size_kb", 0)
|
| 352 |
+
st.write(f"{size_kb:.1f} KB")
|
| 353 |
+
|
| 354 |
+
# Quality metrics if available
|
| 355 |
+
if "quality" in metadata:
|
| 356 |
+
st.markdown("#### Quality Metrics")
|
| 357 |
+
quality = metadata["quality"]
|
| 358 |
+
|
| 359 |
+
# Create columns for quality metrics
|
| 360 |
+
metric_cols = st.columns(3)
|
| 361 |
+
|
| 362 |
+
with metric_cols[0]:
|
| 363 |
+
st.markdown("**Sharpness**")
|
| 364 |
+
sharpness = int(quality.get("sharpness", 0) * 100)
|
| 365 |
+
st.write(f"{sharpness}%")
|
| 366 |
+
|
| 367 |
+
with metric_cols[1]:
|
| 368 |
+
st.markdown("**Brightness**")
|
| 369 |
+
brightness = int(quality.get("brightness", 0) * 100)
|
| 370 |
+
st.write(f"{brightness}%")
|
| 371 |
+
|
| 372 |
+
with metric_cols[2]:
|
| 373 |
+
st.markdown("**Contrast**")
|
| 374 |
+
contrast = int(quality.get("contrast", 0.26) * 100)
|
| 375 |
+
st.write(f"{contrast}%")
|
| 376 |
+
|
| 377 |
+
# Add space between sections
|
| 378 |
+
st.markdown("<div style='height: 20px;'></div>", unsafe_allow_html=True)
|
| 379 |
+
|
| 380 |
+
# NOW display the preprocessing UI
|
| 381 |
+
st.subheader("4. Image Preprocessing")
|
| 382 |
+
preprocessing_result = show_preprocessing_ui(img_service, img_rgb)
|
| 383 |
+
|
| 384 |
+
if not preprocessing_result.get("success", False):
|
| 385 |
+
st.error(f"Error in preprocessing: {preprocessing_result.get('message', 'Unknown error')}")
|
| 386 |
+
|
| 387 |
+
# Store path in session state for future use
|
| 388 |
+
if "current_image_path" not in st.session_state:
|
| 389 |
+
# Save the file for reference
|
| 390 |
+
save_path = img_service.save_uploaded_image(img_to_process)
|
| 391 |
+
st.session_state["current_image_path"] = save_path
|
| 392 |
+
|
| 393 |
+
# Add some space to improve layout
|
| 394 |
+
st.markdown("<div style='height: 20px;'></div>", unsafe_allow_html=True)
|
| 395 |
+
|
| 396 |
+
# Analysis section
|
| 397 |
+
st.subheader("5. Emotion Analysis")
|
| 398 |
+
st.info("Image successfully uploaded. Emotion analysis functionality will be available soon.")
|
| 399 |
+
|
| 400 |
+
# Add a disabled button as placeholder for future functionality
|
| 401 |
+
st.button("Analyze Emotions (Coming Soon)", disabled=True, key="analyze_button")
|
| 402 |
+
|
| 403 |
+
except Exception as e:
|
| 404 |
+
logger.error(f"Error processing image: {e}", exc_info=True)
|
| 405 |
+
st.error(f"Error processing image: {str(e)}")
|
| 406 |
+
else:
|
| 407 |
+
# Show validation issues
|
| 408 |
+
st.error("Image validation failed:")
|
| 409 |
+
for issue in validation_result["issues"]:
|
| 410 |
+
st.warning(issue)
|
| 411 |
+
|
| 412 |
+
except Exception as e:
|
| 413 |
+
logger.error(f"Error in Visual Analysis: {e}", exc_info=True)
|
| 414 |
+
st.error(f"Error processing image: {str(e)}")
|
| 415 |
+
|
| 416 |
+
# History page
|
| 417 |
+
elif page == "History":
|
| 418 |
+
st.header("Analysis History")
|
| 419 |
+
st.markdown("View your previous analyses and export results.")
|
| 420 |
+
|
| 421 |
+
# Initialize database service
|
| 422 |
+
db_service = get_database_service()
|
| 423 |
+
|
| 424 |
+
# Create tabs for different views - Using numbered sequence for clear navigation
|
| 425 |
+
history_tabs = ["1. Recent Analyses", "2. Statistics", "3. Export Data"]
|
| 426 |
+
tab1, tab2, tab3 = st.tabs(history_tabs)
|
| 427 |
+
|
| 428 |
+
with tab1:
|
| 429 |
+
st.subheader("Recent Emotion Analyses")
|
| 430 |
+
|
| 431 |
+
# Get available person tags from the database
|
| 432 |
+
# [April 2025] - Person filtering system synchronized with local version
|
| 433 |
+
# to show actual person names instead of emotion tags
|
| 434 |
+
try:
|
| 435 |
+
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=100)
|
| 436 |
+
person_tags = set()
|
| 437 |
+
for analysis in all_analyses:
|
| 438 |
+
if 'tags' in analysis and analysis['tags']:
|
| 439 |
+
for tag in analysis['tags']:
|
| 440 |
+
person_tags.add(tag)
|
| 441 |
+
|
| 442 |
+
person_tags = sorted(list(person_tags))
|
| 443 |
+
|
| 444 |
+
# Create a filter dropdown if there are person tags
|
| 445 |
+
selected_person = None
|
| 446 |
+
if person_tags:
|
| 447 |
+
filter_options = ["All People"] + person_tags
|
| 448 |
+
selected_filter = st.selectbox(
|
| 449 |
+
"Filter by Person",
|
| 450 |
+
options=filter_options,
|
| 451 |
+
index=0,
|
| 452 |
+
help="Select a person to filter the analysis history"
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
if selected_filter != "All People":
|
| 456 |
+
selected_person = selected_filter
|
| 457 |
+
st.info(f"Showing analyses for: **{selected_filter}**")
|
| 458 |
+
except Exception as e:
|
| 459 |
+
logger.error(f"Error retrieving tags: {e}")
|
| 460 |
+
person_tags = []
|
| 461 |
+
selected_person = None
|
| 462 |
+
|
| 463 |
+
# Get analysis history from database, filtered by person if selected
|
| 464 |
+
analyses = []
|
| 465 |
+
try:
|
| 466 |
+
# Get analyses, filtered by tag if a person is selected
|
| 467 |
+
if selected_person:
|
| 468 |
+
# For simplicity, we'll retrieve all analyses and filter them here
|
| 469 |
+
# In a production app, we'd want to filter in the database query
|
| 470 |
+
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=20)
|
| 471 |
+
analyses = [a for a in all_analyses if 'tags' in a and a['tags'] and selected_person in a['tags']]
|
| 472 |
+
else:
|
| 473 |
+
analyses = db_service.get_history(session_id=st.session_state.session_id, limit=10)
|
| 474 |
+
|
| 475 |
+
print(f"[DEBUG] Found {len(analyses)} analyses to display")
|
| 476 |
+
for a in analyses:
|
| 477 |
+
print(f"[DEBUG] Analysis ID: {a.get('id', 'unknown')} Timestamp: {a.get('timestamp', 'unknown')}")
|
| 478 |
+
except Exception as e:
|
| 479 |
+
logger.error(f"Error retrieving analyses: {e}")
|
| 480 |
+
print(f"[DEBUG] Error retrieving analyses: {e}")
|
| 481 |
+
|
| 482 |
+
if analyses:
|
| 483 |
+
for i, analysis in enumerate(analyses):
|
| 484 |
+
with st.expander(f"Analysis #{analysis['id']} - {analysis['timestamp'][:16]}", expanded=(i==0)):
|
| 485 |
+
# Create columns for layout
|
| 486 |
+
col1, col2 = st.columns([1, 2])
|
| 487 |
+
|
| 488 |
+
with col1:
|
| 489 |
+
# Show image if available and path exists
|
| 490 |
+
image_path = analysis['image_path']
|
| 491 |
+
if os.path.exists(image_path):
|
| 492 |
+
try:
|
| 493 |
+
img = Image.open(image_path)
|
| 494 |
+
st.image(img, caption=f"Analyzed Image", use_column_width=True)
|
| 495 |
+
except Exception as e:
|
| 496 |
+
st.error(f"Unable to load image: {str(e)}")
|
| 497 |
+
else:
|
| 498 |
+
st.warning("Image file no longer available")
|
| 499 |
+
|
| 500 |
+
with col2:
|
| 501 |
+
# Display analysis details
|
| 502 |
+
st.markdown(f"**Date:** {analysis['timestamp'][:19]}")
|
| 503 |
+
st.markdown(f"**Faces Detected:** {analysis['face_count']}")
|
| 504 |
+
|
| 505 |
+
# Show person name if available
|
| 506 |
+
if 'tags' in analysis and analysis['tags']:
|
| 507 |
+
st.markdown(f"**Person:** {analysis['tags'][0]}")
|
| 508 |
+
|
| 509 |
+
# Check if advanced detection was used
|
| 510 |
+
# Comprobar si los resultados ya son un diccionario o una cadena JSON
|
| 511 |
+
if isinstance(analysis['results'], dict):
|
| 512 |
+
results_dict = analysis['results']
|
| 513 |
+
else:
|
| 514 |
+
results_dict = json.loads(analysis['results'])
|
| 515 |
+
|
| 516 |
+
using_advanced = False
|
| 517 |
+
has_advanced_data = False
|
| 518 |
+
age_data = None
|
| 519 |
+
gender_data = None
|
| 520 |
+
|
| 521 |
+
# Check for advanced detection usage and data
|
| 522 |
+
if 'faces' in results_dict and len(results_dict['faces']) > 0:
|
| 523 |
+
first_face = results_dict['faces'][0]
|
| 524 |
+
using_advanced = first_face.get('using_advanced', False)
|
| 525 |
+
|
| 526 |
+
# Check for advanced data
|
| 527 |
+
if 'age' in first_face:
|
| 528 |
+
has_advanced_data = True
|
| 529 |
+
age_data = first_face['age']
|
| 530 |
+
if 'gender' in first_face:
|
| 531 |
+
has_advanced_data = True
|
| 532 |
+
gender_data = first_face['gender']
|
| 533 |
+
|
| 534 |
+
# Show detection method used
|
| 535 |
+
if using_advanced:
|
| 536 |
+
st.markdown("**Detection:** <span style='background-color:#4CAF50;color:white;padding:2px 6px;border-radius:3px;font-size:0.7em;'>DeepFace</span>", unsafe_allow_html=True)
|
| 537 |
+
|
| 538 |
+
# Show advanced data if available
|
| 539 |
+
if has_advanced_data:
|
| 540 |
+
st.markdown("### Advanced Data")
|
| 541 |
+
if age_data is not None:
|
| 542 |
+
st.markdown(f"**Age Estimate:** {age_data}")
|
| 543 |
+
if gender_data is not None:
|
| 544 |
+
# Capitalize gender
|
| 545 |
+
if isinstance(gender_data, str) and len(gender_data) > 0:
|
| 546 |
+
gender_data = gender_data[0].upper() + gender_data[1:].lower() if len(gender_data) > 0 else gender_data
|
| 547 |
+
st.markdown(f"**Gender Estimate:** {gender_data}")
|
| 548 |
+
|
| 549 |
+
# Show emotion data
|
| 550 |
+
if analysis['face_count'] > 0:
|
| 551 |
+
st.markdown("### 1. Detected Emotions")
|
| 552 |
+
|
| 553 |
+
# Display each face
|
| 554 |
+
for j, face in enumerate(results_dict['faces']):
|
| 555 |
+
# Get emotion data
|
| 556 |
+
emotion = face['emotion'].capitalize()
|
| 557 |
+
confidence = face.get('emotion_confidence', face.get('confidence', 0))
|
| 558 |
+
|
| 559 |
+
# Show badge for DeepFace if used
|
| 560 |
+
if face.get('using_advanced', False):
|
| 561 |
+
st.markdown(f"**Face {j+1}:** {emotion} <span style='background-color:#4CAF50;color:white;padding:2px 6px;border-radius:3px;font-size:0.7em;'>DeepFace</span> (Confidence: {confidence:.2f})", unsafe_allow_html=True)
|
| 562 |
+
else:
|
| 563 |
+
st.markdown(f"**Face {j+1}:** {emotion} (Confidence: {confidence:.2f})")
|
| 564 |
+
|
| 565 |
+
# Create emotion dataframe for visualization
|
| 566 |
+
if 'emotions' in face:
|
| 567 |
+
emotions = face['emotions']
|
| 568 |
+
emotion_df = pd.DataFrame({
|
| 569 |
+
'Emotion': [k.capitalize() for k in emotions.keys()],
|
| 570 |
+
'Score': list(emotions.values())
|
| 571 |
+
})
|
| 572 |
+
emotion_df = emotion_df.sort_values('Score', ascending=False)
|
| 573 |
+
|
| 574 |
+
# Display mini chart
|
| 575 |
+
st.bar_chart(emotion_df.set_index('Emotion'), height=150)
|
| 576 |
+
|
| 577 |
+
# Option to delete analysis
|
| 578 |
+
if st.button(f"Delete Analysis #{analysis['id']}", key=f"del_{analysis['id']}"):
|
| 579 |
+
if db_service.delete_record(analysis['id']):
|
| 580 |
+
st.success("Analysis deleted successfully!")
|
| 581 |
+
st.experimental_rerun()
|
| 582 |
+
else:
|
| 583 |
+
st.error("Failed to delete analysis")
|
| 584 |
+
else:
|
| 585 |
+
st.info("No previous analyses found in the database.")
|
| 586 |
+
|
| 587 |
+
# Display session upload history if available
|
| 588 |
+
if st.session_state.upload_history:
|
| 589 |
+
st.markdown("### Recent Uploads (Not yet saved to database)")
|
| 590 |
+
for i, upload in enumerate(st.session_state.upload_history):
|
| 591 |
+
st.markdown(f"**{i+1}. {upload['file_name']}**")
|
| 592 |
+
st.markdown(f"* Timestamp: {upload['timestamp']}")
|
| 593 |
+
|
| 594 |
+
with tab2:
|
| 595 |
+
st.subheader("Emotion Analytics")
|
| 596 |
+
|
| 597 |
+
# Add filter by person to statistics view
|
| 598 |
+
try:
|
| 599 |
+
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=100)
|
| 600 |
+
person_tags = set()
|
| 601 |
+
for analysis in all_analyses:
|
| 602 |
+
if 'tags' in analysis and analysis['tags']:
|
| 603 |
+
for tag in analysis['tags']:
|
| 604 |
+
person_tags.add(tag)
|
| 605 |
+
|
| 606 |
+
person_tags = sorted(list(person_tags))
|
| 607 |
+
|
| 608 |
+
# Create a filter dropdown if there are person tags
|
| 609 |
+
selected_person = None
|
| 610 |
+
if person_tags:
|
| 611 |
+
filter_options = ["All People"] + person_tags
|
| 612 |
+
selected_filter = st.selectbox(
|
| 613 |
+
"1. Select Person to Analyze",
|
| 614 |
+
options=filter_options,
|
| 615 |
+
index=0,
|
| 616 |
+
help="Select a person to view their emotion statistics"
|
| 617 |
+
)
|
| 618 |
+
|
| 619 |
+
if selected_filter != "All People":
|
| 620 |
+
selected_person = selected_filter
|
| 621 |
+
except Exception as e:
|
| 622 |
+
logger.error(f"Error retrieving tags for statistics: {e}")
|
| 623 |
+
person_tags = []
|
| 624 |
+
selected_person = None
|
| 625 |
+
|
| 626 |
+
# Get emotion statistics
|
| 627 |
+
emotion_stats = db_service.get_emotion_stats(session_id=st.session_state.session_id)
|
| 628 |
+
|
| 629 |
+
if emotion_stats:
|
| 630 |
+
# Display chart of emotion frequencies
|
| 631 |
+
st.markdown("### 2. Emotion Distribution")
|
| 632 |
+
|
| 633 |
+
# Create dataframe for visualization
|
| 634 |
+
stats_df = pd.DataFrame({
|
| 635 |
+
'Emotion': list(emotion_stats.keys()),
|
| 636 |
+
'Frequency': list(emotion_stats.values())
|
| 637 |
+
})
|
| 638 |
+
|
| 639 |
+
# Sort by frequency for better visualization
|
| 640 |
+
stats_df = stats_df.sort_values('Frequency', ascending=False)
|
| 641 |
+
|
| 642 |
+
# Display chart
|
| 643 |
+
st.bar_chart(stats_df.set_index('Emotion'))
|
| 644 |
+
|
| 645 |
+
# Show numeric values below the chart
|
| 646 |
+
st.markdown("### 3. Detailed Percentages")
|
| 647 |
+
|
| 648 |
+
# Create columns for better presentation
|
| 649 |
+
cols = st.columns(3)
|
| 650 |
+
for i, (emotion, frequency) in enumerate(zip(stats_df['Emotion'], stats_df['Frequency'])):
|
| 651 |
+
col_idx = i % 3
|
| 652 |
+
with cols[col_idx]:
|
| 653 |
+
st.metric(
|
| 654 |
+
emotion.capitalize(),
|
| 655 |
+
f"{frequency:.1%}",
|
| 656 |
+
delta=None
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
# Add explanation of statistics
|
| 660 |
+
st.markdown("---")
|
| 661 |
+
st.markdown("### 4. Understanding These Statistics")
|
| 662 |
+
st.markdown("""
|
| 663 |
+
The data above shows the distribution of emotions detected across all analyses:
|
| 664 |
+
|
| 665 |
+
1. **Emotion Distribution** - The bar chart visualizes the relative frequency of each emotion
|
| 666 |
+
2. **Detailed Percentages** - Exact percentage values for each emotion detected
|
| 667 |
+
3. **Sample Size** - Based on all faces detected in the selected analyses
|
| 668 |
+
""")
|
| 669 |
+
else:
|
| 670 |
+
st.info("No emotion data available for analysis yet.")
|
| 671 |
+
|
| 672 |
+
with tab3:
|
| 673 |
+
st.subheader("Export Analysis Data")
|
| 674 |
+
|
| 675 |
+
# Add filter by person for export
|
| 676 |
+
selected_person_tag = None
|
| 677 |
+
try:
|
| 678 |
+
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=100)
|
| 679 |
+
person_tags = set()
|
| 680 |
+
for analysis in all_analyses:
|
| 681 |
+
if 'tags' in analysis and analysis['tags']:
|
| 682 |
+
for tag in analysis['tags']:
|
| 683 |
+
person_tags.add(tag)
|
| 684 |
+
|
| 685 |
+
person_tags = sorted(list(person_tags))
|
| 686 |
+
|
| 687 |
+
# Create a person filter dropdown if person tags exist
|
| 688 |
+
if person_tags:
|
| 689 |
+
person_options = ["All People"] + person_tags
|
| 690 |
+
person_filter = st.selectbox(
|
| 691 |
+
"1. Select Person to Export",
|
| 692 |
+
options=person_options,
|
| 693 |
+
index=0,
|
| 694 |
+
help="Filter export data to include only analyses for a specific person"
|
| 695 |
+
)
|
| 696 |
+
|
| 697 |
+
if person_filter != "All People":
|
| 698 |
+
selected_person_tag = person_filter
|
| 699 |
+
st.info(f"Export will only include analyses for: {selected_person_tag}")
|
| 700 |
+
except Exception as e:
|
| 701 |
+
logger.error(f"Error retrieving person tags for export: {e}")
|
| 702 |
+
|
| 703 |
+
# Options for export format
|
| 704 |
+
export_type = st.radio(
|
| 705 |
+
"2. Select Export Format",
|
| 706 |
+
["JSON", "CSV"],
|
| 707 |
+
horizontal=True
|
| 708 |
+
)
|
| 709 |
+
|
| 710 |
+
export_scope = st.radio(
|
| 711 |
+
"3. Select Data Scope",
|
| 712 |
+
["Current Session", "All Sessions"],
|
| 713 |
+
horizontal=True
|
| 714 |
+
)
|
| 715 |
+
|
| 716 |
+
session_id = st.session_state.session_id if export_scope == "Current Session" else None
|
| 717 |
+
|
| 718 |
+
# Option to limit number of records
|
| 719 |
+
record_limit = st.slider(
|
| 720 |
+
"4. Maximum Records to Export",
|
| 721 |
+
min_value=1,
|
| 722 |
+
max_value=100,
|
| 723 |
+
value=20,
|
| 724 |
+
step=1
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
# Generate export button
|
| 728 |
+
if st.button("Generate Export"):
|
| 729 |
+
with st.spinner("Preparing export..."):
|
| 730 |
+
# Get export data
|
| 731 |
+
export_data = db_service.export_data(
|
| 732 |
+
session_id=session_id,
|
| 733 |
+
limit=record_limit
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
# Filter data by person if selected
|
| 737 |
+
if selected_person_tag and 'analyses' in export_data:
|
| 738 |
+
filtered_analyses = []
|
| 739 |
+
for analysis in export_data['analyses']:
|
| 740 |
+
if 'tags' in analysis and analysis['tags'] and selected_person_tag in analysis['tags']:
|
| 741 |
+
filtered_analyses.append(analysis)
|
| 742 |
+
|
| 743 |
+
export_data['analyses'] = filtered_analyses
|
| 744 |
+
export_data['metadata']['record_count'] = len(filtered_analyses)
|
| 745 |
+
export_data['metadata']['person_filter'] = selected_person_tag
|
| 746 |
+
|
| 747 |
+
if not export_data or not export_data.get('analyses'):
|
| 748 |
+
st.warning("No data available to export")
|
| 749 |
+
else:
|
| 750 |
+
st.success(f"Export generated with {len(export_data.get('analyses', []))} analyses")
|
| 751 |
+
|
| 752 |
+
# Create download options based on export type
|
| 753 |
+
if export_type == "JSON":
|
| 754 |
+
json_str = export_to_json(export_data)
|
| 755 |
+
filename = f"emotion_analysis_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
| 756 |
+
|
| 757 |
+
# Create download button
|
| 758 |
+
st.download_button(
|
| 759 |
+
label="Download JSON",
|
| 760 |
+
data=json_str,
|
| 761 |
+
file_name=filename,
|
| 762 |
+
mime="application/json"
|
| 763 |
+
)
|
| 764 |
+
else: # CSV
|
| 765 |
+
csv_str = export_to_csv(export_data)
|
| 766 |
+
filename = f"emotion_analysis_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
|
| 767 |
+
|
| 768 |
+
# Create download button
|
| 769 |
+
st.download_button(
|
| 770 |
+
label="Download CSV",
|
| 771 |
+
data=csv_str,
|
| 772 |
+
file_name=filename,
|
| 773 |
+
mime="text/csv"
|
| 774 |
+
)
|
| 775 |
+
|
| 776 |
+
# Show summary statistics
|
| 777 |
+
summary = generate_emotion_summary(export_data)
|
| 778 |
+
|
| 779 |
+
st.markdown("### Export Summary")
|
| 780 |
+
st.markdown(f"**Total Analyses:** {summary['total_analyses']}")
|
| 781 |
+
st.markdown(f"**Total Faces Analyzed:** {summary['total_faces']}")
|
| 782 |
+
|
| 783 |
+
if summary['total_faces'] > 0:
|
| 784 |
+
# Display emotion distribution
|
| 785 |
+
st.markdown("**Emotion Distribution:**")
|
| 786 |
+
|
| 787 |
+
# Create dataframe for visualization
|
| 788 |
+
emotions_df = pd.DataFrame({
|
| 789 |
+
'Emotion': list(summary['emotion_percentages'].keys()),
|
| 790 |
+
'Percentage': list(summary['emotion_percentages'].values())
|
| 791 |
+
})
|
| 792 |
+
|
| 793 |
+
# Sort by percentage
|
| 794 |
+
emotions_df = emotions_df.sort_values('Percentage', ascending=False)
|
| 795 |
+
|
| 796 |
+
# Display chart
|
| 797 |
+
st.bar_chart(emotions_df.set_index('Emotion'))
|
| 798 |
+
|
| 799 |
+
# Add explanation of export formats below visualization
|
| 800 |
+
st.markdown("---")
|
| 801 |
+
st.markdown("### 5. About Export Formats")
|
| 802 |
+
st.markdown("""
|
| 803 |
+
The exported data includes analysis details in your chosen format:
|
| 804 |
+
|
| 805 |
+
1. **JSON Format** - Complete data structure with all details, ideal for further processing
|
| 806 |
+
2. **CSV Format** - Tabular data format, can be opened in spreadsheet software
|
| 807 |
+
3. **Data Contents** - Each export includes timestamps, emotion labels, confidence values, and facial features
|
| 808 |
+
4. **Person Information** - If you specified a person name, it's included with each analysis
|
| 809 |
+
""")
|
| 810 |
+
|
| 811 |
+
# About page
|
| 812 |
+
elif page == "About":
|
| 813 |
+
st.header("About EmotionMirror")
|
| 814 |
+
|
| 815 |
+
st.markdown("""
|
| 816 |
+
## EmotionMirror: Emotional Analysis System
|
| 817 |
+
|
| 818 |
+
EmotionMirror is an application that uses computer vision and artificial intelligence
|
| 819 |
+
to analyze emotions from facial expressions and body language.
|
| 820 |
+
|
| 821 |
+
### Technology Stack
|
| 822 |
+
|
| 823 |
+
* **Streamlit**: For the user interface
|
| 824 |
+
* **YOLOv8**: For object detection, pose estimation, and facial analysis
|
| 825 |
+
* **Agent Framework**: A custom multi-agent system for coordinated analysis
|
| 826 |
+
|
| 827 |
+
### Future Features
|
| 828 |
+
|
| 829 |
+
* Enhanced emotion recognition with Hume.ai integration
|
| 830 |
+
* Temporal emotion tracking and pattern analysis
|
| 831 |
+
* Personalized recommendations based on emotional states
|
| 832 |
+
* Guided emotional wellness sessions
|
| 833 |
+
|
| 834 |
+
### Privacy
|
| 835 |
+
|
| 836 |
+
* All image processing is done locally
|
| 837 |
+
* We don't store your images after processing
|
| 838 |
+
* No personal data is shared with third parties
|
| 839 |
+
""")
|
| 840 |
+
|
| 841 |
+
# Footer
|
| 842 |
+
st.markdown("---")
|
| 843 |
+
st.markdown(" EmotionMirror | Developed as a prototype application")
|
app.py.backup
ADDED
|
@@ -0,0 +1,927 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
EmotionMirror - Emotional Analysis Application
|
| 3 |
+
|
| 4 |
+
A Streamlit application for analyzing emotions using computer vision.
|
| 5 |
+
"""
|
| 6 |
+
import os
|
| 7 |
+
import time
|
| 8 |
+
import uuid
|
| 9 |
+
import logging
|
| 10 |
+
import streamlit as st
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from PIL import Image
|
| 13 |
+
import numpy as np
|
| 14 |
+
import cv2
|
| 15 |
+
import json
|
| 16 |
+
import pandas as pd
|
| 17 |
+
|
| 18 |
+
# Import app modules
|
| 19 |
+
from config import settings
|
| 20 |
+
from agent_framework.agent_manager import AgentManager
|
| 21 |
+
from utils.file_utils import allowed_file, save_uploaded_file
|
| 22 |
+
from utils.export_utils import export_to_json, export_to_csv, get_download_link, generate_emotion_summary
|
| 23 |
+
from utils.preprocessing_ui import display_preprocessing_comparison, setup_preprocessing_controls, display_processing_status, get_processing_image, show_preprocessing_ui
|
| 24 |
+
from services.database_service import DatabaseService
|
| 25 |
+
from services.image_service import ImageService
|
| 26 |
+
|
| 27 |
+
# Configure logging
|
| 28 |
+
logging.basicConfig(
|
| 29 |
+
level=logging.INFO,
|
| 30 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 31 |
+
)
|
| 32 |
+
logger = logging.getLogger(__name__)
|
| 33 |
+
|
| 34 |
+
# Page configuration
|
| 35 |
+
st.set_page_config(
|
| 36 |
+
page_title="EmotionMirror",
|
| 37 |
+
page_icon="📊",
|
| 38 |
+
layout="wide",
|
| 39 |
+
initial_sidebar_state="expanded"
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
# Initialize agent manager
|
| 43 |
+
@st.cache_resource
|
| 44 |
+
def get_agent_manager():
|
| 45 |
+
"""Get or create the agent manager singleton"""
|
| 46 |
+
return AgentManager()
|
| 47 |
+
|
| 48 |
+
# Initialize database service
|
| 49 |
+
@st.cache_resource
|
| 50 |
+
def get_database_service():
|
| 51 |
+
"""Get or create the database service singleton"""
|
| 52 |
+
return DatabaseService()
|
| 53 |
+
|
| 54 |
+
# Initialize image service for enhanced image handling
|
| 55 |
+
@st.cache_resource
|
| 56 |
+
def get_image_service():
|
| 57 |
+
"""
|
| 58 |
+
Get or create the image service singleton.
|
| 59 |
+
Part of Step 3 implementation: Added for image validation, dimension and quality analysis.
|
| 60 |
+
"""
|
| 61 |
+
return ImageService()
|
| 62 |
+
|
| 63 |
+
# Session state initialization
|
| 64 |
+
if "session_id" not in st.session_state:
|
| 65 |
+
st.session_state.session_id = str(uuid.uuid4())
|
| 66 |
+
logger.info(f"New session started: {st.session_state.session_id}")
|
| 67 |
+
|
| 68 |
+
if "upload_history" not in st.session_state:
|
| 69 |
+
st.session_state.upload_history = []
|
| 70 |
+
|
| 71 |
+
# Store the advanced emotion setting in session state to persist between pages
|
| 72 |
+
if 'use_advanced_emotion' not in st.session_state:
|
| 73 |
+
st.session_state.use_advanced_emotion = settings.USE_ADVANCED_EMOTION
|
| 74 |
+
|
| 75 |
+
# App title and description
|
| 76 |
+
st.title("EmotionMirror")
|
| 77 |
+
st.markdown("""
|
| 78 |
+
Welcome to EmotionMirror, an application for analyzing emotions using computer vision.
|
| 79 |
+
|
| 80 |
+
This is a prototype version that demonstrates the basic functionality.
|
| 81 |
+
""")
|
| 82 |
+
|
| 83 |
+
# Sidebar
|
| 84 |
+
with st.sidebar:
|
| 85 |
+
st.title("EmotionMirror")
|
| 86 |
+
st.subheader("Facial Emotion Analysis")
|
| 87 |
+
|
| 88 |
+
# Navigation options
|
| 89 |
+
page = st.radio(
|
| 90 |
+
"Navigation",
|
| 91 |
+
["Home", "Visual Analysis", "History", "About"]
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
st.divider()
|
| 95 |
+
|
| 96 |
+
# Settings section in sidebar
|
| 97 |
+
st.subheader("Settings")
|
| 98 |
+
|
| 99 |
+
# Add option to switch between basic and advanced emotion detection
|
| 100 |
+
use_advanced = st.checkbox(
|
| 101 |
+
"Use Advanced Emotion Detection",
|
| 102 |
+
value=st.session_state.use_advanced_emotion,
|
| 103 |
+
help="When enabled, DeepFace will be used for more accurate emotion detection"
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
# Update the setting if changed
|
| 107 |
+
if st.session_state.use_advanced_emotion != use_advanced:
|
| 108 |
+
st.session_state.use_advanced_emotion = use_advanced
|
| 109 |
+
settings.USE_ADVANCED_EMOTION = use_advanced
|
| 110 |
+
|
| 111 |
+
# Show a note about reloading
|
| 112 |
+
if use_advanced:
|
| 113 |
+
st.info("Advanced detection enabled")
|
| 114 |
+
else:
|
| 115 |
+
st.info("Basic detection enabled")
|
| 116 |
+
|
| 117 |
+
# General confidence threshold
|
| 118 |
+
confidence_threshold = st.slider(
|
| 119 |
+
"Detection Confidence",
|
| 120 |
+
min_value=0.1,
|
| 121 |
+
max_value=1.0,
|
| 122 |
+
value=0.45,
|
| 123 |
+
step=0.05,
|
| 124 |
+
help="Adjust the confidence threshold for detections"
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
st.divider()
|
| 128 |
+
st.caption(f"Session ID: {st.session_state.session_id}")
|
| 129 |
+
st.caption(f"Version: 0.1.3 (Phase 1.3)")
|
| 130 |
+
|
| 131 |
+
# Home page
|
| 132 |
+
if page == "Home":
|
| 133 |
+
st.header("EmotionMirror - Emotional Analysis System")
|
| 134 |
+
|
| 135 |
+
st.subheader("Features")
|
| 136 |
+
col1, col2, col3 = st.columns(3)
|
| 137 |
+
|
| 138 |
+
with col1:
|
| 139 |
+
st.markdown("### 📷 Visual Analysis")
|
| 140 |
+
st.markdown("Upload images to analyze facial expressions and emotions.")
|
| 141 |
+
|
| 142 |
+
with col2:
|
| 143 |
+
st.markdown("### 📊 Emotion Tracking")
|
| 144 |
+
st.markdown("Track emotions over time with detailed analytics. (Coming soon)")
|
| 145 |
+
|
| 146 |
+
with col3:
|
| 147 |
+
st.markdown("### 🧠 AI Recommendations")
|
| 148 |
+
st.markdown("Get personalized recommendations based on your emotional state. (Coming soon)")
|
| 149 |
+
|
| 150 |
+
st.subheader("Getting Started")
|
| 151 |
+
st.markdown("""
|
| 152 |
+
1. Navigate to the **Visual Analysis** page
|
| 153 |
+
2. Upload an image containing faces
|
| 154 |
+
3. View the analysis results
|
| 155 |
+
""")
|
| 156 |
+
|
| 157 |
+
# Visual Analysis page
|
| 158 |
+
elif page == "Visual Analysis":
|
| 159 |
+
st.title("Visual Emotion Analysis")
|
| 160 |
+
st.markdown("Upload an image to analyze emotions")
|
| 161 |
+
|
| 162 |
+
# Initialize the image service for improved image handling
|
| 163 |
+
# STEP 3: Using the enhanced image service for validation of dimensions and quality
|
| 164 |
+
image_service = get_image_service()
|
| 165 |
+
|
| 166 |
+
# Implement Steps 1, 2 & 3: Interface design, format validation, and dimension/quality analysis
|
| 167 |
+
uploaded_file = image_service.setup_image_upload_interface(st)
|
| 168 |
+
|
| 169 |
+
# Model settings in a cleaner expandable section
|
| 170 |
+
with st.expander("Detection Settings"):
|
| 171 |
+
# Add explanation text about the detection methods
|
| 172 |
+
detection_type = "Advanced Detection" if st.session_state.use_advanced_emotion else "Basic Detection"
|
| 173 |
+
st.markdown(f"""
|
| 174 |
+
### About the Detection Methods
|
| 175 |
+
|
| 176 |
+
Currently using: **{detection_type}**
|
| 177 |
+
|
| 178 |
+
- **Basic detection** is faster but less accurate. It works by analyzing simple facial features.
|
| 179 |
+
- **Advanced detection (DeepFace)** uses deep learning models that are trained on thousands of faces to recognize subtle emotional cues.
|
| 180 |
+
|
| 181 |
+
You can change the default detection method in the sidebar settings.
|
| 182 |
+
""")
|
| 183 |
+
|
| 184 |
+
# Display image and interface when uploaded
|
| 185 |
+
if uploaded_file is not None:
|
| 186 |
+
# We're removing redundant title and image display since our image service
|
| 187 |
+
# already handles this in the two-column layout
|
| 188 |
+
|
| 189 |
+
# Keep this for processing, but no display
|
| 190 |
+
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
| 191 |
+
uploaded_file.seek(0) # Reset the file pointer for further processing
|
| 192 |
+
|
| 193 |
+
# Get image service for processing and validation
|
| 194 |
+
image_service = get_image_service()
|
| 195 |
+
|
| 196 |
+
# Validate image file - includes format, size, and dimensions
|
| 197 |
+
validation_result = image_service.validate_image_file(
|
| 198 |
+
uploaded_file,
|
| 199 |
+
check_content=True,
|
| 200 |
+
check_dimensions=True
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
if validation_result["valid"]:
|
| 204 |
+
# Process the uploaded image to improve quality if possible
|
| 205 |
+
# STEP 4: Add image preprocessing functionality
|
| 206 |
+
try:
|
| 207 |
+
# Load image for processing
|
| 208 |
+
img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
| 209 |
+
|
| 210 |
+
# Use the modular preprocessing UI handler
|
| 211 |
+
preprocessing_result = show_preprocessing_ui(image_service, img)
|
| 212 |
+
|
| 213 |
+
if not preprocessing_result.get("success", False):
|
| 214 |
+
st.error(f"Error in preprocessing: {preprocessing_result.get('message', 'Unknown error')}")
|
| 215 |
+
|
| 216 |
+
except Exception as e:
|
| 217 |
+
st.error(f"Error preprocessing image: {str(e)}")
|
| 218 |
+
logging.error(f"Preprocessing error: {str(e)}", exc_info=True)
|
| 219 |
+
|
| 220 |
+
# Continue with original code - now using PIL to open the file
|
| 221 |
+
image = Image.open(uploaded_file)
|
| 222 |
+
|
| 223 |
+
# Add person name field
|
| 224 |
+
st.subheader("2. Person Information")
|
| 225 |
+
st.markdown("Enter the name of the person in the image")
|
| 226 |
+
|
| 227 |
+
person_name = st.text_input(
|
| 228 |
+
"Person Name",
|
| 229 |
+
key="person_name",
|
| 230 |
+
help="Enter the name of the person whose emotions you want to analyze"
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
# Process button - disabled if no name entered
|
| 234 |
+
process_button = st.button(
|
| 235 |
+
"Analyze Image",
|
| 236 |
+
disabled=(not person_name or len(person_name.strip()) == 0),
|
| 237 |
+
help="You must enter a person name before analyzing"
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
# Show warning if name field is empty
|
| 241 |
+
if not person_name or len(person_name.strip()) == 0:
|
| 242 |
+
st.warning("Please enter the person's name before analyzing the image")
|
| 243 |
+
|
| 244 |
+
# Only proceed if button is clicked and name is provided
|
| 245 |
+
if process_button:
|
| 246 |
+
# Validate person name again for safety
|
| 247 |
+
if not person_name or len(person_name.strip()) == 0:
|
| 248 |
+
st.error("Person name is required. Please enter a name to proceed.")
|
| 249 |
+
else:
|
| 250 |
+
with st.spinner("Processing image..."):
|
| 251 |
+
# Save the uploaded file
|
| 252 |
+
success, message, file_path = save_uploaded_file(
|
| 253 |
+
uploaded_file,
|
| 254 |
+
settings.UPLOADS_DIR
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
if not success:
|
| 258 |
+
st.error(message)
|
| 259 |
+
else:
|
| 260 |
+
# Add to history
|
| 261 |
+
st.session_state.upload_history.append({
|
| 262 |
+
"timestamp": datetime.now().isoformat(),
|
| 263 |
+
"file_path": file_path,
|
| 264 |
+
"file_name": uploaded_file.name
|
| 265 |
+
})
|
| 266 |
+
|
| 267 |
+
# Prepare data for processing
|
| 268 |
+
process_data = {
|
| 269 |
+
"image_path": file_path,
|
| 270 |
+
"person_name": person_name.strip(),
|
| 271 |
+
"detection_confidence": confidence_threshold,
|
| 272 |
+
"use_advanced_emotion": st.session_state.use_advanced_emotion
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
# STEP 4: Get the appropriate image for processing (original or preprocessed)
|
| 276 |
+
if "original_image" in st.session_state:
|
| 277 |
+
processing_img = get_processing_image(image_service, st.session_state.original_image)
|
| 278 |
+
|
| 279 |
+
# Convert to proper format if needed for agent processing
|
| 280 |
+
if "using_preprocessed_image" in st.session_state and st.session_state["using_preprocessed_image"]:
|
| 281 |
+
# Make sure we're using the preprocessed image for processing
|
| 282 |
+
process_data["use_preprocessed_image"] = True
|
| 283 |
+
process_data["preprocessed_image_path"] = st.session_state.get("preprocessed_image_path", "")
|
| 284 |
+
|
| 285 |
+
# Process the image
|
| 286 |
+
agent_manager = get_agent_manager()
|
| 287 |
+
results = agent_manager.process_visual(process_data)
|
| 288 |
+
|
| 289 |
+
# Display results
|
| 290 |
+
st.subheader("Analysis Results")
|
| 291 |
+
|
| 292 |
+
if "error" in results:
|
| 293 |
+
st.error(results["error"])
|
| 294 |
+
else:
|
| 295 |
+
# Display faces detected
|
| 296 |
+
st.markdown(f"**Faces Detected:** {results['face_count']}")
|
| 297 |
+
|
| 298 |
+
# Save analysis results to the database
|
| 299 |
+
try:
|
| 300 |
+
# Clean the person name
|
| 301 |
+
clean_name = person_name.strip()
|
| 302 |
+
|
| 303 |
+
# Save to database with person name as tag
|
| 304 |
+
# [April 2025] - Updated tag handling to use person name as the sole tag
|
| 305 |
+
# This ensures consistency with the local version and improves filtering
|
| 306 |
+
# Previously used generic tags like 'emotion_analysis' and emotion names
|
| 307 |
+
db_service = get_database_service()
|
| 308 |
+
result_id = db_service.save_analysis_results(
|
| 309 |
+
session_id=st.session_state.session_id,
|
| 310 |
+
image_path=file_path,
|
| 311 |
+
results=results,
|
| 312 |
+
tags=[clean_name]
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
# Print debugging information
|
| 316 |
+
print(f"[DEBUG] Analysis saved with ID: {result_id}")
|
| 317 |
+
print(f"[DEBUG] Person tag: {clean_name}")
|
| 318 |
+
|
| 319 |
+
st.success(f"Analysis saved to history with tag: {clean_name}")
|
| 320 |
+
except Exception as e:
|
| 321 |
+
print(f"[DEBUG] Error saving analysis results: {e}")
|
| 322 |
+
logger.error(f"Error saving analysis results: {e}")
|
| 323 |
+
|
| 324 |
+
if results["face_count"] > 0:
|
| 325 |
+
st.markdown("### Detected Faces")
|
| 326 |
+
|
| 327 |
+
# Load image for visualization
|
| 328 |
+
img_path = os.path.join(settings.STATIC_DIR, os.path.relpath(file_path, start=settings.STATIC_DIR))
|
| 329 |
+
img = cv2.imread(file_path)
|
| 330 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 331 |
+
|
| 332 |
+
# Create a clean copy of the image without annotations
|
| 333 |
+
clean_img = img.copy()
|
| 334 |
+
|
| 335 |
+
# Draw bounding boxes and labels
|
| 336 |
+
for i, face in enumerate(results["faces"]):
|
| 337 |
+
x1, y1, x2, y2 = face["bbox"]
|
| 338 |
+
|
| 339 |
+
# Get emotion and corresponding color
|
| 340 |
+
emotion = face["emotion"]
|
| 341 |
+
confidence = face.get("confidence", 0.55)
|
| 342 |
+
|
| 343 |
+
# Create color based on emotion - default to green if no matching service
|
| 344 |
+
color = (0, 255, 0) # Default color (green)
|
| 345 |
+
|
| 346 |
+
# Draw rectangle with emotion-based color
|
| 347 |
+
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
|
| 348 |
+
|
| 349 |
+
# Draw just the face number in a small font at the top-left corner
|
| 350 |
+
# to minimize overlap issues
|
| 351 |
+
cv2.putText(
|
| 352 |
+
img,
|
| 353 |
+
f"{i+1}",
|
| 354 |
+
(x1 + 5, y1 + 15),
|
| 355 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 356 |
+
0.5, # Smaller font
|
| 357 |
+
color,
|
| 358 |
+
2
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
# Display the annotated image
|
| 362 |
+
st.image(img, caption="Analysis Results", use_column_width=True)
|
| 363 |
+
|
| 364 |
+
# Create tabbed interface for emotion details with clear numbering
|
| 365 |
+
face_tabs = [f"1. Face {i+1}" for i in range(len(results["faces"]))]
|
| 366 |
+
tabs = st.tabs(face_tabs)
|
| 367 |
+
|
| 368 |
+
# Display detailed emotion information for each face
|
| 369 |
+
for i, (tab, face) in enumerate(zip(tabs, results["faces"])):
|
| 370 |
+
with tab:
|
| 371 |
+
# Create two columns for layout
|
| 372 |
+
col1, col2 = st.columns([2, 3])
|
| 373 |
+
|
| 374 |
+
with col1:
|
| 375 |
+
# 1. Display the primary emotion with text representation
|
| 376 |
+
emotion = face["emotion"]
|
| 377 |
+
emotion_name = emotion.capitalize()
|
| 378 |
+
|
| 379 |
+
# Use text representation instead of emoji to avoid display issues
|
| 380 |
+
emotion_icons = {
|
| 381 |
+
'joy': '(Happy)',
|
| 382 |
+
'sadness': '(Sad)',
|
| 383 |
+
'anger': '(Angry)',
|
| 384 |
+
'fear': '(Afraid)',
|
| 385 |
+
'surprise': '(Surprised)',
|
| 386 |
+
'disgust': '(Disgusted)',
|
| 387 |
+
'neutral': '(Neutral)',
|
| 388 |
+
'unknown': '(Unknown)'
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
emotion_text = emotion_icons.get(emotion, '')
|
| 392 |
+
st.markdown(f"## {emotion_name} {emotion_text}")
|
| 393 |
+
st.markdown(f"**Confidence:** {face.get('confidence', 0.0):.2f}")
|
| 394 |
+
|
| 395 |
+
# Show advanced data if available from DeepFace
|
| 396 |
+
if 'age' in face:
|
| 397 |
+
st.markdown(f"**Age Estimate:** {face['age']}")
|
| 398 |
+
if 'gender' in face:
|
| 399 |
+
gender = face['gender']
|
| 400 |
+
# Capitalize first letter
|
| 401 |
+
gender = gender[0].upper() + gender[1:].lower() if len(gender) > 0 else gender
|
| 402 |
+
st.markdown(f"**Gender Estimate:** {gender}")
|
| 403 |
+
|
| 404 |
+
# Get facial features
|
| 405 |
+
features = face.get("features", {})
|
| 406 |
+
# Fallback to emotion_features if features is not present
|
| 407 |
+
if not features and "emotion_features" in face:
|
| 408 |
+
features = face.get("emotion_features", {})
|
| 409 |
+
|
| 410 |
+
# Create metrics in a row - clearly labeled
|
| 411 |
+
st.markdown("### 1. Facial Metrics")
|
| 412 |
+
cols = st.columns(3)
|
| 413 |
+
with cols[0]:
|
| 414 |
+
st.metric("Brightness", f"{features.get('brightness', 0):.1f}")
|
| 415 |
+
with cols[1]:
|
| 416 |
+
st.metric("Contrast", f"{features.get('contrast', 0):.1f}")
|
| 417 |
+
with cols[2]:
|
| 418 |
+
st.metric("Symmetry", f"{features.get('symmetry', 0):.1f}")
|
| 419 |
+
|
| 420 |
+
with col2:
|
| 421 |
+
# 2. Create a bar chart of all emotions
|
| 422 |
+
# Obtener emociones o usar valores predeterminados
|
| 423 |
+
# Asegurar que todas las emociones estén presentes
|
| 424 |
+
all_emotions = {
|
| 425 |
+
'anger': 0.0,
|
| 426 |
+
'disgust': 0.0,
|
| 427 |
+
'fear': 0.0,
|
| 428 |
+
'joy': 0.0,
|
| 429 |
+
'neutral': 0.0,
|
| 430 |
+
'sadness': 0.0,
|
| 431 |
+
'surprise': 0.0
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
# Actualizar con valores detectados
|
| 435 |
+
emotions = face.get('emotions', {'neutral': 1.0})
|
| 436 |
+
for emotion, value in emotions.items():
|
| 437 |
+
if emotion in all_emotions:
|
| 438 |
+
all_emotions[emotion] = value
|
| 439 |
+
|
| 440 |
+
# Create a title for the chart section
|
| 441 |
+
st.markdown("### 2. Emotion Confidence")
|
| 442 |
+
|
| 443 |
+
# Display the bar chart
|
| 444 |
+
import pandas as pd
|
| 445 |
+
emotions_df = pd.DataFrame({
|
| 446 |
+
'Emotion': list(all_emotions.keys()),
|
| 447 |
+
'Confidence': list(all_emotions.values())
|
| 448 |
+
})
|
| 449 |
+
|
| 450 |
+
# First show the chart
|
| 451 |
+
st.bar_chart(emotions_df.set_index('Emotion'))
|
| 452 |
+
|
| 453 |
+
# Then show the exact values in a clean format
|
| 454 |
+
values_col1, values_col2, values_col3 = st.columns(3)
|
| 455 |
+
|
| 456 |
+
# Split the emotion values into 3 columns for better presentation
|
| 457 |
+
emotions_list = list(all_emotions.items())
|
| 458 |
+
chunk_size = (len(emotions_list) + 2) // 3 # Ceiling division
|
| 459 |
+
chunks = [emotions_list[i:i + chunk_size] for i in range(0, len(emotions_list), chunk_size)]
|
| 460 |
+
|
| 461 |
+
# Add values to each column
|
| 462 |
+
for i, col in enumerate([values_col1, values_col2, values_col3]):
|
| 463 |
+
if i < len(chunks):
|
| 464 |
+
with col:
|
| 465 |
+
for emotion, value in chunks[i]:
|
| 466 |
+
# Format to 2 decimal places
|
| 467 |
+
st.markdown(f"**{emotion.capitalize()}**: {value:.2f}")
|
| 468 |
+
|
| 469 |
+
# 3. Display features that contributed to the emotion
|
| 470 |
+
st.markdown("### 3. Analysis Features")
|
| 471 |
+
|
| 472 |
+
# Explanation text with numbered sequence
|
| 473 |
+
st.markdown("""
|
| 474 |
+
### 4. How We Determined This Emotion
|
| 475 |
+
|
| 476 |
+
1. **Face Detection**: We located the face in the image
|
| 477 |
+
2. **Feature Extraction**: We analyzed brightness, contrast, and symmetry
|
| 478 |
+
3. **Emotion Classification**: We matched these features to emotional patterns
|
| 479 |
+
""")
|
| 480 |
+
|
| 481 |
+
# Display detailed results in expandable section with improved title and description
|
| 482 |
+
with st.expander("Technical Data (For Developers)"):
|
| 483 |
+
st.markdown("""
|
| 484 |
+
This section displays the raw JSON data from the analysis process.
|
| 485 |
+
It's intended for developers and technical users who need access to the exact values and parameters used in the emotion detection process.
|
| 486 |
+
|
| 487 |
+
**Uses for this data:**
|
| 488 |
+
- Transparency on how the system works
|
| 489 |
+
- Debugging and development
|
| 490 |
+
- Access to precise numerical values
|
| 491 |
+
- Export for use in other systems or tools
|
| 492 |
+
""")
|
| 493 |
+
st.json(results)
|
| 494 |
+
else:
|
| 495 |
+
st.info("No faces were detected in the image. Please try another image.")
|
| 496 |
+
except Exception as e:
|
| 497 |
+
logger.error(f"Error processing image: {e}")
|
| 498 |
+
st.error(f"Error processing image: {str(e)}")
|
| 499 |
+
|
| 500 |
+
# History page
|
| 501 |
+
elif page == "History":
|
| 502 |
+
st.header("Analysis History")
|
| 503 |
+
st.markdown("View your previous analyses and export results.")
|
| 504 |
+
|
| 505 |
+
# Initialize database service
|
| 506 |
+
db_service = get_database_service()
|
| 507 |
+
|
| 508 |
+
# Create tabs for different views - Using numbered sequence for clear navigation
|
| 509 |
+
history_tabs = ["1. Recent Analyses", "2. Statistics", "3. Export Data"]
|
| 510 |
+
tab1, tab2, tab3 = st.tabs(history_tabs)
|
| 511 |
+
|
| 512 |
+
with tab1:
|
| 513 |
+
st.subheader("Recent Emotion Analyses")
|
| 514 |
+
|
| 515 |
+
# Get available person tags from the database
|
| 516 |
+
# [April 2025] - Person filtering system synchronized with local version
|
| 517 |
+
# to show actual person names instead of emotion tags
|
| 518 |
+
try:
|
| 519 |
+
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=100)
|
| 520 |
+
person_tags = set()
|
| 521 |
+
for analysis in all_analyses:
|
| 522 |
+
if 'tags' in analysis and analysis['tags']:
|
| 523 |
+
for tag in analysis['tags']:
|
| 524 |
+
person_tags.add(tag)
|
| 525 |
+
|
| 526 |
+
person_tags = sorted(list(person_tags))
|
| 527 |
+
|
| 528 |
+
# Create a filter dropdown if there are person tags
|
| 529 |
+
selected_person = None
|
| 530 |
+
if person_tags:
|
| 531 |
+
filter_options = ["All People"] + person_tags
|
| 532 |
+
selected_filter = st.selectbox(
|
| 533 |
+
"Filter by Person",
|
| 534 |
+
options=filter_options,
|
| 535 |
+
index=0,
|
| 536 |
+
help="Select a person to filter the analysis history"
|
| 537 |
+
)
|
| 538 |
+
|
| 539 |
+
if selected_filter != "All People":
|
| 540 |
+
selected_person = selected_filter
|
| 541 |
+
st.info(f"Showing analyses for: **{selected_filter}**")
|
| 542 |
+
except Exception as e:
|
| 543 |
+
logger.error(f"Error retrieving tags: {e}")
|
| 544 |
+
person_tags = []
|
| 545 |
+
selected_person = None
|
| 546 |
+
|
| 547 |
+
# Get analysis history from database, filtered by person if selected
|
| 548 |
+
analyses = []
|
| 549 |
+
try:
|
| 550 |
+
# Get analyses, filtered by tag if a person is selected
|
| 551 |
+
if selected_person:
|
| 552 |
+
# For simplicity, we'll retrieve all analyses and filter them here
|
| 553 |
+
# In a production app, we'd want to filter in the database query
|
| 554 |
+
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=20)
|
| 555 |
+
analyses = [a for a in all_analyses if 'tags' in a and a['tags'] and selected_person in a['tags']]
|
| 556 |
+
else:
|
| 557 |
+
analyses = db_service.get_history(session_id=st.session_state.session_id, limit=10)
|
| 558 |
+
|
| 559 |
+
print(f"[DEBUG] Found {len(analyses)} analyses to display")
|
| 560 |
+
for a in analyses:
|
| 561 |
+
print(f"[DEBUG] Analysis ID: {a.get('id', 'unknown')} Timestamp: {a.get('timestamp', 'unknown')}")
|
| 562 |
+
except Exception as e:
|
| 563 |
+
logger.error(f"Error retrieving analyses: {e}")
|
| 564 |
+
print(f"[DEBUG] Error retrieving analyses: {e}")
|
| 565 |
+
|
| 566 |
+
if analyses:
|
| 567 |
+
for i, analysis in enumerate(analyses):
|
| 568 |
+
with st.expander(f"Analysis #{analysis['id']} - {analysis['timestamp'][:16]}", expanded=(i==0)):
|
| 569 |
+
# Create columns for layout
|
| 570 |
+
col1, col2 = st.columns([1, 2])
|
| 571 |
+
|
| 572 |
+
with col1:
|
| 573 |
+
# Show image if available and path exists
|
| 574 |
+
image_path = analysis['image_path']
|
| 575 |
+
if os.path.exists(image_path):
|
| 576 |
+
try:
|
| 577 |
+
img = Image.open(image_path)
|
| 578 |
+
st.image(img, caption=f"Analyzed Image", use_column_width=True)
|
| 579 |
+
except Exception as e:
|
| 580 |
+
st.error(f"Unable to load image: {str(e)}")
|
| 581 |
+
else:
|
| 582 |
+
st.warning("Image file no longer available")
|
| 583 |
+
|
| 584 |
+
with col2:
|
| 585 |
+
# Display analysis details
|
| 586 |
+
st.markdown(f"**Date:** {analysis['timestamp'][:19]}")
|
| 587 |
+
st.markdown(f"**Faces Detected:** {analysis['face_count']}")
|
| 588 |
+
|
| 589 |
+
# Show person name if available
|
| 590 |
+
if 'tags' in analysis and analysis['tags']:
|
| 591 |
+
st.markdown(f"**Person:** {analysis['tags'][0]}")
|
| 592 |
+
|
| 593 |
+
# Check if advanced detection was used
|
| 594 |
+
# Comprobar si los resultados ya son un diccionario o una cadena JSON
|
| 595 |
+
if isinstance(analysis['results'], dict):
|
| 596 |
+
results_dict = analysis['results']
|
| 597 |
+
else:
|
| 598 |
+
results_dict = json.loads(analysis['results'])
|
| 599 |
+
|
| 600 |
+
using_advanced = False
|
| 601 |
+
has_advanced_data = False
|
| 602 |
+
age_data = None
|
| 603 |
+
gender_data = None
|
| 604 |
+
|
| 605 |
+
# Check for advanced detection usage and data
|
| 606 |
+
if 'faces' in results_dict and len(results_dict['faces']) > 0:
|
| 607 |
+
first_face = results_dict['faces'][0]
|
| 608 |
+
using_advanced = first_face.get('using_advanced', False)
|
| 609 |
+
|
| 610 |
+
# Check for advanced data
|
| 611 |
+
if 'age' in first_face:
|
| 612 |
+
has_advanced_data = True
|
| 613 |
+
age_data = first_face['age']
|
| 614 |
+
if 'gender' in first_face:
|
| 615 |
+
has_advanced_data = True
|
| 616 |
+
gender_data = first_face['gender']
|
| 617 |
+
|
| 618 |
+
# Show detection method used
|
| 619 |
+
if using_advanced:
|
| 620 |
+
st.markdown("**Detection:** <span style='background-color:#4CAF50;color:white;padding:2px 6px;border-radius:3px;font-size:0.7em;'>DeepFace</span>", unsafe_allow_html=True)
|
| 621 |
+
|
| 622 |
+
# Show advanced data if available
|
| 623 |
+
if has_advanced_data:
|
| 624 |
+
st.markdown("### Advanced Data")
|
| 625 |
+
if age_data is not None:
|
| 626 |
+
st.markdown(f"**Age Estimate:** {age_data}")
|
| 627 |
+
if gender_data is not None:
|
| 628 |
+
# Capitalize gender
|
| 629 |
+
if isinstance(gender_data, str) and len(gender_data) > 0:
|
| 630 |
+
gender_data = gender_data[0].upper() + gender_data[1:].lower() if len(gender_data) > 0 else gender_data
|
| 631 |
+
st.markdown(f"**Gender Estimate:** {gender_data}")
|
| 632 |
+
|
| 633 |
+
# Show emotion data
|
| 634 |
+
if analysis['face_count'] > 0:
|
| 635 |
+
st.markdown("### 1. Detected Emotions")
|
| 636 |
+
|
| 637 |
+
# Display each face
|
| 638 |
+
for j, face in enumerate(results_dict['faces']):
|
| 639 |
+
# Get emotion data
|
| 640 |
+
emotion = face['emotion'].capitalize()
|
| 641 |
+
confidence = face.get('emotion_confidence', face.get('confidence', 0))
|
| 642 |
+
|
| 643 |
+
# Show badge for DeepFace if used
|
| 644 |
+
if face.get('using_advanced', False):
|
| 645 |
+
st.markdown(f"**Face {j+1}:** {emotion} <span style='background-color:#4CAF50;color:white;padding:2px 6px;border-radius:3px;font-size:0.7em;'>DeepFace</span> (Confidence: {confidence:.2f})", unsafe_allow_html=True)
|
| 646 |
+
else:
|
| 647 |
+
st.markdown(f"**Face {j+1}:** {emotion} (Confidence: {confidence:.2f})")
|
| 648 |
+
|
| 649 |
+
# Create emotion dataframe for visualization
|
| 650 |
+
if 'emotions' in face:
|
| 651 |
+
emotions = face['emotions']
|
| 652 |
+
emotion_df = pd.DataFrame({
|
| 653 |
+
'Emotion': [k.capitalize() for k in emotions.keys()],
|
| 654 |
+
'Score': list(emotions.values())
|
| 655 |
+
})
|
| 656 |
+
emotion_df = emotion_df.sort_values('Score', ascending=False)
|
| 657 |
+
|
| 658 |
+
# Display mini chart
|
| 659 |
+
st.bar_chart(emotion_df.set_index('Emotion'), height=150)
|
| 660 |
+
|
| 661 |
+
# Option to delete analysis
|
| 662 |
+
if st.button(f"Delete Analysis #{analysis['id']}", key=f"del_{analysis['id']}"):
|
| 663 |
+
if db_service.delete_record(analysis['id']):
|
| 664 |
+
st.success("Analysis deleted successfully!")
|
| 665 |
+
st.experimental_rerun()
|
| 666 |
+
else:
|
| 667 |
+
st.error("Failed to delete analysis")
|
| 668 |
+
else:
|
| 669 |
+
st.info("No previous analyses found in the database.")
|
| 670 |
+
|
| 671 |
+
# Display session upload history if available
|
| 672 |
+
if st.session_state.upload_history:
|
| 673 |
+
st.markdown("### Recent Uploads (Not yet saved to database)")
|
| 674 |
+
for i, upload in enumerate(st.session_state.upload_history):
|
| 675 |
+
st.markdown(f"**{i+1}. {upload['file_name']}**")
|
| 676 |
+
st.markdown(f"* Timestamp: {upload['timestamp']}")
|
| 677 |
+
|
| 678 |
+
with tab2:
|
| 679 |
+
st.subheader("Emotion Analytics")
|
| 680 |
+
|
| 681 |
+
# Add filter by person to statistics view
|
| 682 |
+
try:
|
| 683 |
+
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=100)
|
| 684 |
+
person_tags = set()
|
| 685 |
+
for analysis in all_analyses:
|
| 686 |
+
if 'tags' in analysis and analysis['tags']:
|
| 687 |
+
for tag in analysis['tags']:
|
| 688 |
+
person_tags.add(tag)
|
| 689 |
+
|
| 690 |
+
person_tags = sorted(list(person_tags))
|
| 691 |
+
|
| 692 |
+
# Create a filter dropdown if there are person tags
|
| 693 |
+
selected_person = None
|
| 694 |
+
if person_tags:
|
| 695 |
+
filter_options = ["All People"] + person_tags
|
| 696 |
+
selected_filter = st.selectbox(
|
| 697 |
+
"1. Select Person to Analyze",
|
| 698 |
+
options=filter_options,
|
| 699 |
+
index=0,
|
| 700 |
+
help="Select a person to view their emotion statistics"
|
| 701 |
+
)
|
| 702 |
+
|
| 703 |
+
if selected_filter != "All People":
|
| 704 |
+
selected_person = selected_filter
|
| 705 |
+
except Exception as e:
|
| 706 |
+
logger.error(f"Error retrieving tags for statistics: {e}")
|
| 707 |
+
person_tags = []
|
| 708 |
+
selected_person = None
|
| 709 |
+
|
| 710 |
+
# Get emotion statistics
|
| 711 |
+
emotion_stats = db_service.get_emotion_stats(session_id=st.session_state.session_id)
|
| 712 |
+
|
| 713 |
+
if emotion_stats:
|
| 714 |
+
# Display chart of emotion frequencies
|
| 715 |
+
st.markdown("### 2. Emotion Distribution")
|
| 716 |
+
|
| 717 |
+
# Create dataframe for visualization
|
| 718 |
+
stats_df = pd.DataFrame({
|
| 719 |
+
'Emotion': list(emotion_stats.keys()),
|
| 720 |
+
'Frequency': list(emotion_stats.values())
|
| 721 |
+
})
|
| 722 |
+
|
| 723 |
+
# Sort by frequency for better visualization
|
| 724 |
+
stats_df = stats_df.sort_values('Frequency', ascending=False)
|
| 725 |
+
|
| 726 |
+
# Display chart
|
| 727 |
+
st.bar_chart(stats_df.set_index('Emotion'))
|
| 728 |
+
|
| 729 |
+
# Show numeric values below the chart
|
| 730 |
+
st.markdown("### 3. Detailed Percentages")
|
| 731 |
+
|
| 732 |
+
# Create columns for better presentation
|
| 733 |
+
cols = st.columns(3)
|
| 734 |
+
for i, (emotion, frequency) in enumerate(zip(stats_df['Emotion'], stats_df['Frequency'])):
|
| 735 |
+
col_idx = i % 3
|
| 736 |
+
with cols[col_idx]:
|
| 737 |
+
st.metric(
|
| 738 |
+
emotion.capitalize(),
|
| 739 |
+
f"{frequency:.1%}",
|
| 740 |
+
delta=None
|
| 741 |
+
)
|
| 742 |
+
|
| 743 |
+
# Add explanation of statistics
|
| 744 |
+
st.markdown("---")
|
| 745 |
+
st.markdown("### 4. Understanding These Statistics")
|
| 746 |
+
st.markdown("""
|
| 747 |
+
The data above shows the distribution of emotions detected across all analyses:
|
| 748 |
+
|
| 749 |
+
1. **Emotion Distribution** - The bar chart visualizes the relative frequency of each emotion
|
| 750 |
+
2. **Detailed Percentages** - Exact percentage values for each emotion detected
|
| 751 |
+
3. **Sample Size** - Based on all faces detected in the selected analyses
|
| 752 |
+
""")
|
| 753 |
+
else:
|
| 754 |
+
st.info("No emotion data available for analysis yet.")
|
| 755 |
+
|
| 756 |
+
with tab3:
|
| 757 |
+
st.subheader("Export Analysis Data")
|
| 758 |
+
|
| 759 |
+
# Add filter by person for export
|
| 760 |
+
selected_person_tag = None
|
| 761 |
+
try:
|
| 762 |
+
all_analyses = db_service.get_history(session_id=st.session_state.session_id, limit=100)
|
| 763 |
+
person_tags = set()
|
| 764 |
+
for analysis in all_analyses:
|
| 765 |
+
if 'tags' in analysis and analysis['tags']:
|
| 766 |
+
for tag in analysis['tags']:
|
| 767 |
+
person_tags.add(tag)
|
| 768 |
+
|
| 769 |
+
person_tags = sorted(list(person_tags))
|
| 770 |
+
|
| 771 |
+
# Create a person filter dropdown if person tags exist
|
| 772 |
+
if person_tags:
|
| 773 |
+
person_options = ["All People"] + person_tags
|
| 774 |
+
person_filter = st.selectbox(
|
| 775 |
+
"1. Select Person to Export",
|
| 776 |
+
options=person_options,
|
| 777 |
+
index=0,
|
| 778 |
+
help="Filter export data to include only analyses for a specific person"
|
| 779 |
+
)
|
| 780 |
+
|
| 781 |
+
if person_filter != "All People":
|
| 782 |
+
selected_person_tag = person_filter
|
| 783 |
+
st.info(f"Export will only include analyses for: {selected_person_tag}")
|
| 784 |
+
except Exception as e:
|
| 785 |
+
logger.error(f"Error retrieving person tags for export: {e}")
|
| 786 |
+
|
| 787 |
+
# Options for export format
|
| 788 |
+
export_type = st.radio(
|
| 789 |
+
"2. Select Export Format",
|
| 790 |
+
["JSON", "CSV"],
|
| 791 |
+
horizontal=True
|
| 792 |
+
)
|
| 793 |
+
|
| 794 |
+
export_scope = st.radio(
|
| 795 |
+
"3. Select Data Scope",
|
| 796 |
+
["Current Session", "All Sessions"],
|
| 797 |
+
horizontal=True
|
| 798 |
+
)
|
| 799 |
+
|
| 800 |
+
session_id = st.session_state.session_id if export_scope == "Current Session" else None
|
| 801 |
+
|
| 802 |
+
# Option to limit number of records
|
| 803 |
+
record_limit = st.slider(
|
| 804 |
+
"4. Maximum Records to Export",
|
| 805 |
+
min_value=1,
|
| 806 |
+
max_value=100,
|
| 807 |
+
value=20,
|
| 808 |
+
step=1
|
| 809 |
+
)
|
| 810 |
+
|
| 811 |
+
# Generate export button
|
| 812 |
+
if st.button("Generate Export"):
|
| 813 |
+
with st.spinner("Preparing export..."):
|
| 814 |
+
# Get export data
|
| 815 |
+
export_data = db_service.export_data(
|
| 816 |
+
session_id=session_id,
|
| 817 |
+
limit=record_limit
|
| 818 |
+
)
|
| 819 |
+
|
| 820 |
+
# Filter data by person if selected
|
| 821 |
+
if selected_person_tag and 'analyses' in export_data:
|
| 822 |
+
filtered_analyses = []
|
| 823 |
+
for analysis in export_data['analyses']:
|
| 824 |
+
if 'tags' in analysis and analysis['tags'] and selected_person_tag in analysis['tags']:
|
| 825 |
+
filtered_analyses.append(analysis)
|
| 826 |
+
|
| 827 |
+
export_data['analyses'] = filtered_analyses
|
| 828 |
+
export_data['metadata']['record_count'] = len(filtered_analyses)
|
| 829 |
+
export_data['metadata']['person_filter'] = selected_person_tag
|
| 830 |
+
|
| 831 |
+
if not export_data or not export_data.get('analyses'):
|
| 832 |
+
st.warning("No data available to export")
|
| 833 |
+
else:
|
| 834 |
+
st.success(f"Export generated with {len(export_data.get('analyses', []))} analyses")
|
| 835 |
+
|
| 836 |
+
# Create download options based on export type
|
| 837 |
+
if export_type == "JSON":
|
| 838 |
+
json_str = export_to_json(export_data)
|
| 839 |
+
filename = f"emotion_analysis_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
| 840 |
+
|
| 841 |
+
# Create download button
|
| 842 |
+
st.download_button(
|
| 843 |
+
label="Download JSON",
|
| 844 |
+
data=json_str,
|
| 845 |
+
file_name=filename,
|
| 846 |
+
mime="application/json"
|
| 847 |
+
)
|
| 848 |
+
else: # CSV
|
| 849 |
+
csv_str = export_to_csv(export_data)
|
| 850 |
+
filename = f"emotion_analysis_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
|
| 851 |
+
|
| 852 |
+
# Create download button
|
| 853 |
+
st.download_button(
|
| 854 |
+
label="Download CSV",
|
| 855 |
+
data=csv_str,
|
| 856 |
+
file_name=filename,
|
| 857 |
+
mime="text/csv"
|
| 858 |
+
)
|
| 859 |
+
|
| 860 |
+
# Show summary statistics
|
| 861 |
+
summary = generate_emotion_summary(export_data)
|
| 862 |
+
|
| 863 |
+
st.markdown("### Export Summary")
|
| 864 |
+
st.markdown(f"**Total Analyses:** {summary['total_analyses']}")
|
| 865 |
+
st.markdown(f"**Total Faces Analyzed:** {summary['total_faces']}")
|
| 866 |
+
|
| 867 |
+
if summary['total_faces'] > 0:
|
| 868 |
+
# Display emotion distribution
|
| 869 |
+
st.markdown("**Emotion Distribution:**")
|
| 870 |
+
|
| 871 |
+
# Create dataframe for visualization
|
| 872 |
+
emotions_df = pd.DataFrame({
|
| 873 |
+
'Emotion': list(summary['emotion_percentages'].keys()),
|
| 874 |
+
'Percentage': list(summary['emotion_percentages'].values())
|
| 875 |
+
})
|
| 876 |
+
|
| 877 |
+
# Sort by percentage
|
| 878 |
+
emotions_df = emotions_df.sort_values('Percentage', ascending=False)
|
| 879 |
+
|
| 880 |
+
# Display chart
|
| 881 |
+
st.bar_chart(emotions_df.set_index('Emotion'))
|
| 882 |
+
|
| 883 |
+
# Add explanation of export formats below visualization
|
| 884 |
+
st.markdown("---")
|
| 885 |
+
st.markdown("### 5. About Export Formats")
|
| 886 |
+
st.markdown("""
|
| 887 |
+
The exported data includes analysis details in your chosen format:
|
| 888 |
+
|
| 889 |
+
1. **JSON Format** - Complete data structure with all details, ideal for further processing
|
| 890 |
+
2. **CSV Format** - Tabular data format, can be opened in spreadsheet software
|
| 891 |
+
3. **Data Contents** - Each export includes timestamps, emotion labels, confidence values, and facial features
|
| 892 |
+
4. **Person Information** - If you specified a person name, it's included with each analysis
|
| 893 |
+
""")
|
| 894 |
+
|
| 895 |
+
# About page
|
| 896 |
+
elif page == "About":
|
| 897 |
+
st.header("About EmotionMirror")
|
| 898 |
+
|
| 899 |
+
st.markdown("""
|
| 900 |
+
## EmotionMirror: Emotional Analysis System
|
| 901 |
+
|
| 902 |
+
EmotionMirror is an application that uses computer vision and artificial intelligence
|
| 903 |
+
to analyze emotions from facial expressions and body language.
|
| 904 |
+
|
| 905 |
+
### Technology Stack
|
| 906 |
+
|
| 907 |
+
* **Streamlit**: For the user interface
|
| 908 |
+
* **YOLOv8**: For object detection, pose estimation, and facial analysis
|
| 909 |
+
* **Agent Framework**: A custom multi-agent system for coordinated analysis
|
| 910 |
+
|
| 911 |
+
### Future Features
|
| 912 |
+
|
| 913 |
+
* Enhanced emotion recognition with Hume.ai integration
|
| 914 |
+
* Temporal emotion tracking and pattern analysis
|
| 915 |
+
* Personalized recommendations based on emotional states
|
| 916 |
+
* Guided emotional wellness sessions
|
| 917 |
+
|
| 918 |
+
### Privacy
|
| 919 |
+
|
| 920 |
+
* All image processing is done locally
|
| 921 |
+
* We don't store your images after processing
|
| 922 |
+
* No personal data is shared with third parties
|
| 923 |
+
""")
|
| 924 |
+
|
| 925 |
+
# Footer
|
| 926 |
+
st.markdown("---")
|
| 927 |
+
st.markdown(" EmotionMirror | Developed as a prototype application")
|
config/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Configuration package for EmotionMirror application.
|
| 3 |
+
"""
|
config/settings.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Settings module for EmotionMirror application.
|
| 3 |
+
Contains configuration parameters and environment settings.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
import secrets
|
| 7 |
+
|
| 8 |
+
# Base directory
|
| 9 |
+
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 10 |
+
|
| 11 |
+
# Determine if running in Hugging Face environment
|
| 12 |
+
HF_SPACE = os.environ.get('HF_SPACE', '') == 'true'
|
| 13 |
+
|
| 14 |
+
# Debug mode
|
| 15 |
+
DEBUG = os.environ.get('DEBUG', 'false').lower() == 'true'
|
| 16 |
+
|
| 17 |
+
# Secret key for secure operations
|
| 18 |
+
SECRET_KEY = os.environ.get('SECRET_KEY', secrets.token_hex(16))
|
| 19 |
+
|
| 20 |
+
# Folder paths
|
| 21 |
+
STATIC_DIR = os.path.join(BASE_DIR, 'static')
|
| 22 |
+
MODELS_DIR = os.path.join(BASE_DIR, 'models')
|
| 23 |
+
UPLOADS_DIR = os.path.join(STATIC_DIR, 'uploads')
|
| 24 |
+
RESULTS_DIR = os.path.join(STATIC_DIR, 'results')
|
| 25 |
+
TEMP_DIR = os.path.join(BASE_DIR, 'temp') # Directorio temporal para DeepFace
|
| 26 |
+
|
| 27 |
+
# Ensure directories exist
|
| 28 |
+
os.makedirs(UPLOADS_DIR, exist_ok=True)
|
| 29 |
+
os.makedirs(RESULTS_DIR, exist_ok=True)
|
| 30 |
+
os.makedirs(MODELS_DIR, exist_ok=True)
|
| 31 |
+
os.makedirs(TEMP_DIR, exist_ok=True) # Crear directorio temporal
|
| 32 |
+
|
| 33 |
+
# File extensions
|
| 34 |
+
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'mp4', 'avi'}
|
| 35 |
+
|
| 36 |
+
# Model configuration
|
| 37 |
+
MODEL_MAPPING = {
|
| 38 |
+
'detection': 'yolov8n.pt',
|
| 39 |
+
'segmentation': 'yolov8n-seg.pt',
|
| 40 |
+
'pose': 'yolov8n-pose.pt',
|
| 41 |
+
'classification': 'yolov8n-cls.pt'
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
# Database configuration
|
| 45 |
+
"""
|
| 46 |
+
IMPORTANT NOTE ABOUT DIFFERENCES BETWEEN LOCAL AND HUGGING FACE VERSIONS:
|
| 47 |
+
-----------------------------------------------------------------------
|
| 48 |
+
In the local version, the database is stored in the 'database' directory
|
| 49 |
+
inside the project.
|
| 50 |
+
|
| 51 |
+
In Hugging Face (HF_SPACE=true), due to write permission restrictions,
|
| 52 |
+
additional logic is implemented:
|
| 53 |
+
1. First attempts to write to the project's 'database' directory
|
| 54 |
+
2. If this fails (due to permission restrictions), uses an alternative location
|
| 55 |
+
at '/tmp/emotion_data.db' which normally has write permissions
|
| 56 |
+
3. Detailed messages are logged to facilitate debugging
|
| 57 |
+
|
| 58 |
+
This difference is necessary to ensure that the analysis saving functionality
|
| 59 |
+
works correctly in the restricted Hugging Face environment.
|
| 60 |
+
"""
|
| 61 |
+
if HF_SPACE:
|
| 62 |
+
# Usar un directorio específico para la base de datos en Hugging Face
|
| 63 |
+
# que tenga más probabilidades de tener permisos de escritura
|
| 64 |
+
DB_DIR = os.path.join(BASE_DIR, 'database')
|
| 65 |
+
os.makedirs(DB_DIR, exist_ok=True)
|
| 66 |
+
DB_PATH = os.path.join(DB_DIR, 'emotion_data.db')
|
| 67 |
+
print(f"Hugging Face DB Path: {DB_PATH}")
|
| 68 |
+
# Intentar crear un archivo de prueba para verificar permisos
|
| 69 |
+
try:
|
| 70 |
+
test_file = os.path.join(DB_DIR, 'test_write.txt')
|
| 71 |
+
with open(test_file, 'w') as f:
|
| 72 |
+
f.write('test')
|
| 73 |
+
os.remove(test_file)
|
| 74 |
+
print(f"Successfully wrote to {DB_DIR}, permissions OK")
|
| 75 |
+
except Exception as e:
|
| 76 |
+
print(f"Warning: Could not write to {DB_DIR}: {e}")
|
| 77 |
+
# Intentar alternativa en el directorio /tmp que suele tener permisos
|
| 78 |
+
DB_PATH = '/tmp/emotion_data.db'
|
| 79 |
+
print(f"Using alternative DB path: {DB_PATH}")
|
| 80 |
+
else:
|
| 81 |
+
# Use database directory for local development
|
| 82 |
+
DB_DIR = os.path.join(BASE_DIR, 'database')
|
| 83 |
+
os.makedirs(DB_DIR, exist_ok=True)
|
| 84 |
+
DB_PATH = os.path.join(DB_DIR, 'emotions.db')
|
| 85 |
+
|
| 86 |
+
# Hume.ai API configuration
|
| 87 |
+
HUME_API_KEY = os.environ.get('HUME_API_KEY', '')
|
| 88 |
+
HUME_API_ENABLED = bool(HUME_API_KEY)
|
| 89 |
+
|
| 90 |
+
# DeepFace configuration
|
| 91 |
+
USE_ADVANCED_EMOTION = os.environ.get('USE_ADVANCED_EMOTION', 'true').lower() == 'true'
|
| 92 |
+
|
| 93 |
+
# Emotion categories
|
| 94 |
+
EMOTIONS = ['joy', 'sadness', 'anger', 'fear', 'surprise', 'disgust', 'neutral']
|
| 95 |
+
|
| 96 |
+
# DeepFace model settings
|
| 97 |
+
DEEPFACE_SETTINGS = {
|
| 98 |
+
'detector_backend': 'retinaface', # Options: opencv, ssd, mtcnn, retinaface, mediapipe
|
| 99 |
+
'emotion_model': 'fer', # Options: fer (default), resmob
|
| 100 |
+
'enforce_detection': False, # Don't fail if face detection fails
|
| 101 |
+
'actions': ['emotion', 'age', 'gender']
|
| 102 |
+
}
|
database/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Database module for EmotionMirror application.
|
| 3 |
+
|
| 4 |
+
This module contains database-related components for storing and retrieving
|
| 5 |
+
emotion analysis history.
|
| 6 |
+
"""
|
database/db_manager.py
ADDED
|
@@ -0,0 +1,463 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Database Manager for EmotionMirror Application
|
| 3 |
+
|
| 4 |
+
This module provides functionality for database operations, including:
|
| 5 |
+
- Creating and initializing the SQLite database
|
| 6 |
+
- Storing analysis results
|
| 7 |
+
- Retrieving historical data
|
| 8 |
+
- Managing database connections
|
| 9 |
+
|
| 10 |
+
All database operations are encapsulated in this module to ensure clean separation
|
| 11 |
+
of concerns and modularity.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
import json
|
| 16 |
+
import sqlite3
|
| 17 |
+
import logging
|
| 18 |
+
from datetime import datetime
|
| 19 |
+
from typing import Dict, List, Any, Optional, Tuple
|
| 20 |
+
|
| 21 |
+
# Configure logging
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
class DatabaseManager:
|
| 25 |
+
"""
|
| 26 |
+
Manages database operations for the EmotionMirror application.
|
| 27 |
+
|
| 28 |
+
This class handles all interactions with the SQLite database, providing
|
| 29 |
+
a clean interface for the rest of the application to store and retrieve data.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, db_path: str):
|
| 33 |
+
"""
|
| 34 |
+
Initialize the database manager.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
db_path: Path to the SQLite database file
|
| 38 |
+
"""
|
| 39 |
+
self.db_path = db_path
|
| 40 |
+
self._initialize_db()
|
| 41 |
+
|
| 42 |
+
def _initialize_db(self) -> None:
|
| 43 |
+
"""
|
| 44 |
+
Initialize the database by creating necessary tables if they don't exist.
|
| 45 |
+
"""
|
| 46 |
+
os.makedirs(os.path.dirname(self.db_path), exist_ok=True)
|
| 47 |
+
|
| 48 |
+
conn = None
|
| 49 |
+
try:
|
| 50 |
+
conn = sqlite3.connect(self.db_path)
|
| 51 |
+
cursor = conn.cursor()
|
| 52 |
+
|
| 53 |
+
# Create analyses table
|
| 54 |
+
cursor.execute('''
|
| 55 |
+
CREATE TABLE IF NOT EXISTS analyses (
|
| 56 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 57 |
+
session_id TEXT NOT NULL,
|
| 58 |
+
timestamp TEXT NOT NULL,
|
| 59 |
+
image_path TEXT NOT NULL,
|
| 60 |
+
face_count INTEGER NOT NULL,
|
| 61 |
+
results TEXT NOT NULL,
|
| 62 |
+
tags TEXT
|
| 63 |
+
)
|
| 64 |
+
''')
|
| 65 |
+
|
| 66 |
+
# Create faces table
|
| 67 |
+
cursor.execute('''
|
| 68 |
+
CREATE TABLE IF NOT EXISTS faces (
|
| 69 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 70 |
+
analysis_id INTEGER NOT NULL,
|
| 71 |
+
face_index INTEGER NOT NULL,
|
| 72 |
+
emotion TEXT NOT NULL,
|
| 73 |
+
confidence REAL NOT NULL,
|
| 74 |
+
features TEXT NOT NULL,
|
| 75 |
+
emotions TEXT NOT NULL,
|
| 76 |
+
FOREIGN KEY (analysis_id) REFERENCES analyses (id)
|
| 77 |
+
)
|
| 78 |
+
''')
|
| 79 |
+
|
| 80 |
+
conn.commit()
|
| 81 |
+
logger.info("Database initialized successfully")
|
| 82 |
+
except sqlite3.Error as e:
|
| 83 |
+
logger.error(f"Database initialization error: {e}")
|
| 84 |
+
finally:
|
| 85 |
+
if conn:
|
| 86 |
+
conn.close()
|
| 87 |
+
|
| 88 |
+
def save_analysis(self,
|
| 89 |
+
session_id: str,
|
| 90 |
+
image_path: str,
|
| 91 |
+
results: Dict[str, Any],
|
| 92 |
+
tags: Optional[List[str]] = None) -> int:
|
| 93 |
+
"""
|
| 94 |
+
Save analysis results to the database.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
session_id: Current session identifier
|
| 98 |
+
image_path: Path to the analyzed image
|
| 99 |
+
results: Analysis results dictionary
|
| 100 |
+
tags: Optional list of tags for the analysis
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
The ID of the newly inserted analysis record
|
| 104 |
+
"""
|
| 105 |
+
conn = None
|
| 106 |
+
try:
|
| 107 |
+
conn = sqlite3.connect(self.db_path)
|
| 108 |
+
cursor = conn.cursor()
|
| 109 |
+
|
| 110 |
+
# Convert tags list to JSON string if provided
|
| 111 |
+
tags_json = json.dumps(tags) if tags else None
|
| 112 |
+
|
| 113 |
+
# Insert analysis record
|
| 114 |
+
cursor.execute(
|
| 115 |
+
'''
|
| 116 |
+
INSERT INTO analyses
|
| 117 |
+
(session_id, timestamp, image_path, face_count, results, tags)
|
| 118 |
+
VALUES (?, ?, ?, ?, ?, ?)
|
| 119 |
+
''',
|
| 120 |
+
(
|
| 121 |
+
session_id,
|
| 122 |
+
datetime.now().isoformat(),
|
| 123 |
+
image_path,
|
| 124 |
+
results.get('face_count', 0),
|
| 125 |
+
json.dumps(results),
|
| 126 |
+
tags_json
|
| 127 |
+
)
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
analysis_id = cursor.lastrowid
|
| 131 |
+
|
| 132 |
+
# Insert face records if present
|
| 133 |
+
faces = results.get('faces', [])
|
| 134 |
+
for i, face in enumerate(faces):
|
| 135 |
+
cursor.execute(
|
| 136 |
+
'''
|
| 137 |
+
INSERT INTO faces
|
| 138 |
+
(analysis_id, face_index, emotion, confidence, features, emotions)
|
| 139 |
+
VALUES (?, ?, ?, ?, ?, ?)
|
| 140 |
+
''',
|
| 141 |
+
(
|
| 142 |
+
analysis_id,
|
| 143 |
+
i,
|
| 144 |
+
face.get('emotion', 'unknown'),
|
| 145 |
+
face.get('confidence', 0.0),
|
| 146 |
+
json.dumps(face.get('features', {})),
|
| 147 |
+
json.dumps(face.get('emotions', {}))
|
| 148 |
+
)
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
conn.commit()
|
| 152 |
+
logger.info(f"Saved analysis with ID {analysis_id} and {len(faces)} faces")
|
| 153 |
+
return analysis_id
|
| 154 |
+
|
| 155 |
+
except sqlite3.Error as e:
|
| 156 |
+
logger.error(f"Error saving analysis: {e}")
|
| 157 |
+
if conn:
|
| 158 |
+
conn.rollback()
|
| 159 |
+
return -1
|
| 160 |
+
finally:
|
| 161 |
+
if conn:
|
| 162 |
+
conn.close()
|
| 163 |
+
|
| 164 |
+
def get_analysis_history(self,
|
| 165 |
+
session_id: Optional[str] = None,
|
| 166 |
+
limit: int = 10) -> List[Dict[str, Any]]:
|
| 167 |
+
"""
|
| 168 |
+
Retrieve analysis history from the database.
|
| 169 |
+
|
| 170 |
+
Args:
|
| 171 |
+
session_id: Optional session ID to filter by
|
| 172 |
+
limit: Maximum number of records to return
|
| 173 |
+
|
| 174 |
+
Returns:
|
| 175 |
+
List of analysis records as dictionaries
|
| 176 |
+
"""
|
| 177 |
+
conn = None
|
| 178 |
+
try:
|
| 179 |
+
conn = sqlite3.connect(self.db_path)
|
| 180 |
+
conn.row_factory = sqlite3.Row # This enables name-based access to columns
|
| 181 |
+
cursor = conn.cursor()
|
| 182 |
+
|
| 183 |
+
query = '''
|
| 184 |
+
SELECT
|
| 185 |
+
id,
|
| 186 |
+
session_id,
|
| 187 |
+
timestamp,
|
| 188 |
+
image_path,
|
| 189 |
+
face_count,
|
| 190 |
+
results,
|
| 191 |
+
tags
|
| 192 |
+
FROM analyses
|
| 193 |
+
'''
|
| 194 |
+
|
| 195 |
+
params = []
|
| 196 |
+
if session_id:
|
| 197 |
+
query += ' WHERE session_id = ?'
|
| 198 |
+
params.append(session_id)
|
| 199 |
+
|
| 200 |
+
query += '''
|
| 201 |
+
ORDER BY timestamp DESC
|
| 202 |
+
LIMIT ?
|
| 203 |
+
'''
|
| 204 |
+
params.append(limit)
|
| 205 |
+
|
| 206 |
+
cursor.execute(query, params)
|
| 207 |
+
|
| 208 |
+
# Convert row objects to dictionaries
|
| 209 |
+
analyses = []
|
| 210 |
+
for row in cursor.fetchall():
|
| 211 |
+
analysis = dict(row)
|
| 212 |
+
|
| 213 |
+
# Parse JSON fields
|
| 214 |
+
analysis['results'] = json.loads(analysis['results'])
|
| 215 |
+
if analysis['tags']:
|
| 216 |
+
analysis['tags'] = json.loads(analysis['tags'])
|
| 217 |
+
|
| 218 |
+
# Get faces for this analysis
|
| 219 |
+
face_cursor = conn.cursor()
|
| 220 |
+
face_cursor.execute(
|
| 221 |
+
'''
|
| 222 |
+
SELECT
|
| 223 |
+
id,
|
| 224 |
+
face_index,
|
| 225 |
+
emotion,
|
| 226 |
+
confidence,
|
| 227 |
+
features,
|
| 228 |
+
emotions
|
| 229 |
+
FROM faces
|
| 230 |
+
WHERE analysis_id = ?
|
| 231 |
+
ORDER BY face_index
|
| 232 |
+
''',
|
| 233 |
+
(analysis['id'],)
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
faces = []
|
| 237 |
+
for face_row in face_cursor.fetchall():
|
| 238 |
+
face = dict(face_row)
|
| 239 |
+
|
| 240 |
+
# Parse JSON fields
|
| 241 |
+
face['features'] = json.loads(face['features'])
|
| 242 |
+
face['emotions'] = json.loads(face['emotions'])
|
| 243 |
+
|
| 244 |
+
faces.append(face)
|
| 245 |
+
|
| 246 |
+
analysis['faces'] = faces
|
| 247 |
+
analyses.append(analysis)
|
| 248 |
+
|
| 249 |
+
return analyses
|
| 250 |
+
|
| 251 |
+
except sqlite3.Error as e:
|
| 252 |
+
logger.error(f"Error retrieving analysis history: {e}")
|
| 253 |
+
return []
|
| 254 |
+
finally:
|
| 255 |
+
if conn:
|
| 256 |
+
conn.close()
|
| 257 |
+
|
| 258 |
+
def get_analysis_by_id(self, analysis_id: int) -> Optional[Dict[str, Any]]:
|
| 259 |
+
"""
|
| 260 |
+
Retrieve a specific analysis by ID.
|
| 261 |
+
|
| 262 |
+
Args:
|
| 263 |
+
analysis_id: ID of the analysis to retrieve
|
| 264 |
+
|
| 265 |
+
Returns:
|
| 266 |
+
Analysis record as a dictionary, or None if not found
|
| 267 |
+
"""
|
| 268 |
+
conn = None
|
| 269 |
+
try:
|
| 270 |
+
conn = sqlite3.connect(self.db_path)
|
| 271 |
+
conn.row_factory = sqlite3.Row
|
| 272 |
+
cursor = conn.cursor()
|
| 273 |
+
|
| 274 |
+
cursor.execute(
|
| 275 |
+
'''
|
| 276 |
+
SELECT
|
| 277 |
+
id,
|
| 278 |
+
session_id,
|
| 279 |
+
timestamp,
|
| 280 |
+
image_path,
|
| 281 |
+
face_count,
|
| 282 |
+
results,
|
| 283 |
+
tags
|
| 284 |
+
FROM analyses
|
| 285 |
+
WHERE id = ?
|
| 286 |
+
''',
|
| 287 |
+
(analysis_id,)
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
row = cursor.fetchone()
|
| 291 |
+
if not row:
|
| 292 |
+
return None
|
| 293 |
+
|
| 294 |
+
analysis = dict(row)
|
| 295 |
+
|
| 296 |
+
# Parse JSON fields
|
| 297 |
+
analysis['results'] = json.loads(analysis['results'])
|
| 298 |
+
if analysis['tags']:
|
| 299 |
+
analysis['tags'] = json.loads(analysis['tags'])
|
| 300 |
+
|
| 301 |
+
# Get faces for this analysis
|
| 302 |
+
face_cursor = conn.cursor()
|
| 303 |
+
face_cursor.execute(
|
| 304 |
+
'''
|
| 305 |
+
SELECT
|
| 306 |
+
id,
|
| 307 |
+
face_index,
|
| 308 |
+
emotion,
|
| 309 |
+
confidence,
|
| 310 |
+
features,
|
| 311 |
+
emotions
|
| 312 |
+
FROM faces
|
| 313 |
+
WHERE analysis_id = ?
|
| 314 |
+
ORDER BY face_index
|
| 315 |
+
''',
|
| 316 |
+
(analysis_id,)
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
faces = []
|
| 320 |
+
for face_row in face_cursor.fetchall():
|
| 321 |
+
face = dict(face_row)
|
| 322 |
+
|
| 323 |
+
# Parse JSON fields
|
| 324 |
+
face['features'] = json.loads(face['features'])
|
| 325 |
+
face['emotions'] = json.loads(face['emotions'])
|
| 326 |
+
|
| 327 |
+
faces.append(face)
|
| 328 |
+
|
| 329 |
+
analysis['faces'] = faces
|
| 330 |
+
return analysis
|
| 331 |
+
|
| 332 |
+
except sqlite3.Error as e:
|
| 333 |
+
logger.error(f"Error retrieving analysis by ID: {e}")
|
| 334 |
+
return None
|
| 335 |
+
finally:
|
| 336 |
+
if conn:
|
| 337 |
+
conn.close()
|
| 338 |
+
|
| 339 |
+
def get_emotion_statistics(self,
|
| 340 |
+
session_id: Optional[str] = None,
|
| 341 |
+
limit: int = 50) -> Dict[str, float]:
|
| 342 |
+
"""
|
| 343 |
+
Compute emotion statistics across multiple analyses.
|
| 344 |
+
|
| 345 |
+
Args:
|
| 346 |
+
session_id: Optional session ID to filter by
|
| 347 |
+
limit: Maximum number of records to analyze
|
| 348 |
+
|
| 349 |
+
Returns:
|
| 350 |
+
Dictionary of emotion frequencies
|
| 351 |
+
"""
|
| 352 |
+
conn = None
|
| 353 |
+
try:
|
| 354 |
+
conn = sqlite3.connect(self.db_path)
|
| 355 |
+
cursor = conn.cursor()
|
| 356 |
+
|
| 357 |
+
query = '''
|
| 358 |
+
SELECT emotion, COUNT(*) as count
|
| 359 |
+
FROM faces
|
| 360 |
+
'''
|
| 361 |
+
|
| 362 |
+
params = []
|
| 363 |
+
if session_id:
|
| 364 |
+
query += '''
|
| 365 |
+
JOIN analyses ON faces.analysis_id = analyses.id
|
| 366 |
+
WHERE analyses.session_id = ?
|
| 367 |
+
'''
|
| 368 |
+
params.append(session_id)
|
| 369 |
+
|
| 370 |
+
query += '''
|
| 371 |
+
GROUP BY emotion
|
| 372 |
+
ORDER BY count DESC
|
| 373 |
+
'''
|
| 374 |
+
|
| 375 |
+
cursor.execute(query, params)
|
| 376 |
+
rows = cursor.fetchall()
|
| 377 |
+
|
| 378 |
+
# Calculate emotion frequencies
|
| 379 |
+
total = sum(count for _, count in rows)
|
| 380 |
+
stats = {emotion: count / total for emotion, count in rows} if total > 0 else {}
|
| 381 |
+
|
| 382 |
+
return stats
|
| 383 |
+
|
| 384 |
+
except sqlite3.Error as e:
|
| 385 |
+
logger.error(f"Error computing emotion statistics: {e}")
|
| 386 |
+
return {}
|
| 387 |
+
finally:
|
| 388 |
+
if conn:
|
| 389 |
+
conn.close()
|
| 390 |
+
|
| 391 |
+
def delete_analysis(self, analysis_id: int) -> bool:
|
| 392 |
+
"""
|
| 393 |
+
Delete an analysis and its associated faces.
|
| 394 |
+
|
| 395 |
+
Args:
|
| 396 |
+
analysis_id: ID of the analysis to delete
|
| 397 |
+
|
| 398 |
+
Returns:
|
| 399 |
+
True if deletion was successful, False otherwise
|
| 400 |
+
"""
|
| 401 |
+
conn = None
|
| 402 |
+
try:
|
| 403 |
+
conn = sqlite3.connect(self.db_path)
|
| 404 |
+
cursor = conn.cursor()
|
| 405 |
+
|
| 406 |
+
# Delete associated faces first (due to foreign key constraint)
|
| 407 |
+
cursor.execute('DELETE FROM faces WHERE analysis_id = ?', (analysis_id,))
|
| 408 |
+
|
| 409 |
+
# Delete the analysis
|
| 410 |
+
cursor.execute('DELETE FROM analyses WHERE id = ?', (analysis_id,))
|
| 411 |
+
|
| 412 |
+
conn.commit()
|
| 413 |
+
return cursor.rowcount > 0
|
| 414 |
+
|
| 415 |
+
except sqlite3.Error as e:
|
| 416 |
+
logger.error(f"Error deleting analysis: {e}")
|
| 417 |
+
if conn:
|
| 418 |
+
conn.rollback()
|
| 419 |
+
return False
|
| 420 |
+
finally:
|
| 421 |
+
if conn:
|
| 422 |
+
conn.close()
|
| 423 |
+
|
| 424 |
+
def export_analysis_data(self,
|
| 425 |
+
analysis_id: Optional[int] = None,
|
| 426 |
+
session_id: Optional[str] = None,
|
| 427 |
+
limit: int = 100) -> Dict[str, Any]:
|
| 428 |
+
"""
|
| 429 |
+
Export analysis data in a structured format suitable for JSON/CSV export.
|
| 430 |
+
|
| 431 |
+
Args:
|
| 432 |
+
analysis_id: Optional specific analysis ID to export
|
| 433 |
+
session_id: Optional session ID to filter by
|
| 434 |
+
limit: Maximum number of records to export
|
| 435 |
+
|
| 436 |
+
Returns:
|
| 437 |
+
Dictionary containing the exported data
|
| 438 |
+
"""
|
| 439 |
+
if analysis_id:
|
| 440 |
+
# Export a single analysis
|
| 441 |
+
analysis = self.get_analysis_by_id(analysis_id)
|
| 442 |
+
if not analysis:
|
| 443 |
+
return {'error': f'Analysis with ID {analysis_id} not found'}
|
| 444 |
+
|
| 445 |
+
return {
|
| 446 |
+
'metadata': {
|
| 447 |
+
'exported_at': datetime.now().isoformat(),
|
| 448 |
+
'record_count': 1
|
| 449 |
+
},
|
| 450 |
+
'analyses': [analysis]
|
| 451 |
+
}
|
| 452 |
+
else:
|
| 453 |
+
# Export multiple analyses
|
| 454 |
+
analyses = self.get_analysis_history(session_id, limit)
|
| 455 |
+
|
| 456 |
+
return {
|
| 457 |
+
'metadata': {
|
| 458 |
+
'exported_at': datetime.now().isoformat(),
|
| 459 |
+
'record_count': len(analyses),
|
| 460 |
+
'session_id': session_id
|
| 461 |
+
},
|
| 462 |
+
'analyses': analyses
|
| 463 |
+
}
|
download_haarcascade.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import cv2
|
| 3 |
+
import urllib.request
|
| 4 |
+
import logging
|
| 5 |
+
|
| 6 |
+
# Configurar logging
|
| 7 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 8 |
+
|
| 9 |
+
# Directorio donde guardar los archivos
|
| 10 |
+
models_dir = "models"
|
| 11 |
+
os.makedirs(models_dir, exist_ok=True)
|
| 12 |
+
|
| 13 |
+
# URL del archivo haarcascade_smile.xml de OpenCV
|
| 14 |
+
smile_cascade_url = "https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_smile.xml"
|
| 15 |
+
smile_cascade_path = os.path.join(models_dir, "haarcascade_smile.xml")
|
| 16 |
+
|
| 17 |
+
# Descargar el archivo
|
| 18 |
+
logging.info(f"Descargando {smile_cascade_url} a {smile_cascade_path}")
|
| 19 |
+
try:
|
| 20 |
+
urllib.request.urlretrieve(smile_cascade_url, smile_cascade_path)
|
| 21 |
+
logging.info(f"Archivo descargado correctamente")
|
| 22 |
+
except Exception as e:
|
| 23 |
+
logging.error(f"Error al descargar el archivo: {e}")
|
| 24 |
+
|
| 25 |
+
# Verificar que el archivo existe
|
| 26 |
+
if os.path.exists(smile_cascade_path):
|
| 27 |
+
logging.info(f"El archivo {smile_cascade_path} existe y tiene {os.path.getsize(smile_cascade_path)} bytes")
|
| 28 |
+
else:
|
| 29 |
+
logging.error(f"El archivo {smile_cascade_path} no existe")
|
| 30 |
+
|
| 31 |
+
# Verificar instalación de DeepFace
|
| 32 |
+
try:
|
| 33 |
+
from deepface import DeepFace
|
| 34 |
+
logging.info("DeepFace está instalado correctamente")
|
| 35 |
+
|
| 36 |
+
# Mostrar versión
|
| 37 |
+
import deepface
|
| 38 |
+
logging.info(f"Versión de DeepFace: {deepface.__version__}")
|
| 39 |
+
|
| 40 |
+
# Mostrar backends disponibles
|
| 41 |
+
backends = DeepFace.detector_backends()
|
| 42 |
+
logging.info(f"Backends de detección disponibles: {backends}")
|
| 43 |
+
|
| 44 |
+
except ImportError:
|
| 45 |
+
logging.error("DeepFace no está instalado")
|
| 46 |
+
except Exception as e:
|
| 47 |
+
logging.error(f"Error al importar DeepFace: {e}")
|
| 48 |
+
|
| 49 |
+
logging.info("Verificación completada")
|
finalproject/Dockerfile
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
# Instalar dependencias esenciales del sistema para OpenCV
|
| 6 |
+
RUN apt-get update && apt-get install -y \
|
| 7 |
+
libgl1-mesa-glx \
|
| 8 |
+
libglib2.0-0 \
|
| 9 |
+
libgomp1 \
|
| 10 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
+
|
| 12 |
+
# Configurar variables de entorno para Matplotlib y Ultralytics
|
| 13 |
+
ENV MPLCONFIGDIR=/tmp/matplotlib
|
| 14 |
+
ENV YOLO_CONFIG_DIR=/tmp/ultralytics
|
| 15 |
+
|
| 16 |
+
# Crear directorios necesarios
|
| 17 |
+
RUN mkdir -p /app/static /app/models /app/database
|
| 18 |
+
|
| 19 |
+
# Copiar requirements.txt primero para aprovechar caché de Docker
|
| 20 |
+
COPY requirements.txt .
|
| 21 |
+
|
| 22 |
+
# Instalar dependencias
|
| 23 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 24 |
+
|
| 25 |
+
# Copiar el resto de archivos
|
| 26 |
+
COPY . .
|
| 27 |
+
|
| 28 |
+
# Crear directorios necesarios para la aplicación
|
| 29 |
+
RUN mkdir -p /app/static/uploads /app/static/images /app/database \
|
| 30 |
+
&& chmod -R 777 /app /tmp
|
| 31 |
+
|
| 32 |
+
# Exponer el puerto que utiliza Streamlit por defecto
|
| 33 |
+
EXPOSE 8501
|
| 34 |
+
|
| 35 |
+
# Comando para ejecutar la aplicación
|
| 36 |
+
CMD ["streamlit", "run", "app.py", "--server.headless", "true", "--server.port=8501", "--server.address=0.0.0.0"]
|
finalproject/app.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
EmotionMirror - Emotional Analysis Application
|
| 3 |
+
|
| 4 |
+
A Streamlit application for analyzing emotions using computer vision.
|
| 5 |
+
"""
|
| 6 |
+
import os
|
| 7 |
+
import time
|
| 8 |
+
import uuid
|
| 9 |
+
import logging
|
| 10 |
+
import streamlit as st
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from PIL import Image
|
| 13 |
+
import numpy as np
|
| 14 |
+
import cv2
|
| 15 |
+
|
| 16 |
+
# Import app modules
|
| 17 |
+
from config import settings
|
| 18 |
+
from agent_framework.agent_manager import AgentManager
|
| 19 |
+
from utils.file_utils import allowed_file, save_uploaded_file
|
| 20 |
+
|
| 21 |
+
# Configure logging
|
| 22 |
+
logging.basicConfig(
|
| 23 |
+
level=logging.INFO,
|
| 24 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 25 |
+
)
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
# Page configuration
|
| 29 |
+
st.set_page_config(
|
| 30 |
+
page_title="EmotionMirror",
|
| 31 |
+
page_icon="📊",
|
| 32 |
+
layout="wide",
|
| 33 |
+
initial_sidebar_state="expanded"
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
# Initialize agent manager
|
| 37 |
+
@st.cache_resource
|
| 38 |
+
def get_agent_manager():
|
| 39 |
+
"""Get or create the agent manager singleton"""
|
| 40 |
+
return AgentManager()
|
| 41 |
+
|
| 42 |
+
# Session state initialization
|
| 43 |
+
if "session_id" not in st.session_state:
|
| 44 |
+
st.session_state.session_id = str(uuid.uuid4())
|
| 45 |
+
logger.info(f"New session started: {st.session_state.session_id}")
|
| 46 |
+
|
| 47 |
+
if "upload_history" not in st.session_state:
|
| 48 |
+
st.session_state.upload_history = []
|
| 49 |
+
|
| 50 |
+
# App title and description
|
| 51 |
+
st.title("EmotionMirror")
|
| 52 |
+
st.markdown("""
|
| 53 |
+
Welcome to EmotionMirror, an application for analyzing emotions using computer vision.
|
| 54 |
+
|
| 55 |
+
This is a prototype version that demonstrates the basic functionality.
|
| 56 |
+
""")
|
| 57 |
+
|
| 58 |
+
# Sidebar
|
| 59 |
+
with st.sidebar:
|
| 60 |
+
st.header("Navigation")
|
| 61 |
+
page = st.selectbox(
|
| 62 |
+
"Choose a page",
|
| 63 |
+
["Home", "Visual Analysis", "About"]
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
st.header("Configuration")
|
| 67 |
+
model_confidence = st.slider(
|
| 68 |
+
"Detection Confidence",
|
| 69 |
+
min_value=0.05,
|
| 70 |
+
max_value=0.95,
|
| 71 |
+
value=0.25,
|
| 72 |
+
step=0.05,
|
| 73 |
+
help="Adjust the confidence threshold for detections"
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
st.divider()
|
| 77 |
+
st.caption(f"Session ID: {st.session_state.session_id}")
|
| 78 |
+
st.caption(f"Version: 0.1.0 (Phase 1)")
|
| 79 |
+
|
| 80 |
+
# Home page
|
| 81 |
+
if page == "Home":
|
| 82 |
+
st.header("EmotionMirror - Emotional Analysis System")
|
| 83 |
+
|
| 84 |
+
st.subheader("Features")
|
| 85 |
+
col1, col2, col3 = st.columns(3)
|
| 86 |
+
|
| 87 |
+
with col1:
|
| 88 |
+
st.markdown("### 📷 Visual Analysis")
|
| 89 |
+
st.markdown("Upload images to analyze facial expressions and emotions.")
|
| 90 |
+
|
| 91 |
+
with col2:
|
| 92 |
+
st.markdown("### 📊 Emotion Tracking")
|
| 93 |
+
st.markdown("Track emotions over time with detailed analytics. (Coming soon)")
|
| 94 |
+
|
| 95 |
+
with col3:
|
| 96 |
+
st.markdown("### 🧠 AI Recommendations")
|
| 97 |
+
st.markdown("Get personalized recommendations based on your emotional state. (Coming soon)")
|
| 98 |
+
|
| 99 |
+
st.subheader("Getting Started")
|
| 100 |
+
st.markdown("""
|
| 101 |
+
1. Navigate to the **Visual Analysis** page
|
| 102 |
+
2. Upload an image containing faces
|
| 103 |
+
3. View the analysis results
|
| 104 |
+
""")
|
| 105 |
+
|
| 106 |
+
# Visual Analysis page
|
| 107 |
+
elif page == "Visual Analysis":
|
| 108 |
+
st.header("Visual Analysis")
|
| 109 |
+
st.markdown("Upload an image to analyze facial expressions and emotions.")
|
| 110 |
+
|
| 111 |
+
# File uploader
|
| 112 |
+
uploaded_file = st.file_uploader(
|
| 113 |
+
"Choose an image file",
|
| 114 |
+
type=["jpg", "jpeg", "png"],
|
| 115 |
+
help="Upload an image file to analyze"
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
if uploaded_file is not None:
|
| 119 |
+
# Check if file is valid
|
| 120 |
+
if not allowed_file(uploaded_file.name, settings.ALLOWED_EXTENSIONS):
|
| 121 |
+
st.error(f"Invalid file type. Allowed types: {', '.join(settings.ALLOWED_EXTENSIONS)}")
|
| 122 |
+
else:
|
| 123 |
+
# Display the uploaded image
|
| 124 |
+
st.subheader("Uploaded Image")
|
| 125 |
+
image = Image.open(uploaded_file)
|
| 126 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 127 |
+
|
| 128 |
+
# Process button
|
| 129 |
+
if st.button("Analyze Image"):
|
| 130 |
+
with st.spinner("Processing image..."):
|
| 131 |
+
# Save the uploaded file
|
| 132 |
+
success, message, file_path = save_uploaded_file(
|
| 133 |
+
uploaded_file,
|
| 134 |
+
settings.UPLOADS_DIR
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
if not success:
|
| 138 |
+
st.error(message)
|
| 139 |
+
else:
|
| 140 |
+
# Add to history
|
| 141 |
+
st.session_state.upload_history.append({
|
| 142 |
+
"timestamp": datetime.now().isoformat(),
|
| 143 |
+
"file_path": file_path,
|
| 144 |
+
"file_name": uploaded_file.name
|
| 145 |
+
})
|
| 146 |
+
|
| 147 |
+
# Process with visual agent
|
| 148 |
+
try:
|
| 149 |
+
agent_manager = get_agent_manager()
|
| 150 |
+
|
| 151 |
+
# Prepare data for processing
|
| 152 |
+
process_data = {
|
| 153 |
+
"image_path": file_path,
|
| 154 |
+
"confidence": model_confidence,
|
| 155 |
+
"timestamp": datetime.now().isoformat()
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
# Process the image
|
| 159 |
+
results = agent_manager.process_visual(process_data)
|
| 160 |
+
|
| 161 |
+
# Display results
|
| 162 |
+
st.subheader("Analysis Results")
|
| 163 |
+
|
| 164 |
+
if "error" in results:
|
| 165 |
+
st.error(results["error"])
|
| 166 |
+
else:
|
| 167 |
+
# Display faces detected
|
| 168 |
+
st.markdown(f"**Faces Detected:** {results['face_count']}")
|
| 169 |
+
|
| 170 |
+
# Show each face
|
| 171 |
+
if results["face_count"] > 0:
|
| 172 |
+
st.markdown("### Detected Faces")
|
| 173 |
+
|
| 174 |
+
# Load image for visualization
|
| 175 |
+
img = cv2.imread(file_path)
|
| 176 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 177 |
+
|
| 178 |
+
# Draw bounding boxes and labels
|
| 179 |
+
for i, face in enumerate(results["faces"]):
|
| 180 |
+
x1, y1, x2, y2 = face["bbox"]
|
| 181 |
+
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
| 182 |
+
cv2.putText(
|
| 183 |
+
img,
|
| 184 |
+
f"Face {i+1}: {face['emotion']}",
|
| 185 |
+
(x1, y1-10),
|
| 186 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 187 |
+
0.8,
|
| 188 |
+
(0, 255, 0),
|
| 189 |
+
2
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
# Display the annotated image
|
| 193 |
+
st.image(img, caption="Analysis Results", use_column_width=True)
|
| 194 |
+
|
| 195 |
+
# Display detailed results
|
| 196 |
+
with st.expander("View Detailed Results"):
|
| 197 |
+
st.json(results)
|
| 198 |
+
else:
|
| 199 |
+
st.info("No faces were detected in the image. Please try another image.")
|
| 200 |
+
except Exception as e:
|
| 201 |
+
logger.error(f"Error processing image: {e}")
|
| 202 |
+
st.error(f"Error processing image: {str(e)}")
|
| 203 |
+
|
| 204 |
+
# About page
|
| 205 |
+
elif page == "About":
|
| 206 |
+
st.header("About EmotionMirror")
|
| 207 |
+
|
| 208 |
+
st.markdown("""
|
| 209 |
+
## EmotionMirror: Emotional Analysis System
|
| 210 |
+
|
| 211 |
+
EmotionMirror is an application that uses computer vision and artificial intelligence
|
| 212 |
+
to analyze emotions from facial expressions and body language.
|
| 213 |
+
|
| 214 |
+
### Technology Stack
|
| 215 |
+
|
| 216 |
+
* **Streamlit**: For the user interface
|
| 217 |
+
* **YOLOv8**: For object detection, pose estimation, and facial analysis
|
| 218 |
+
* **Agent Framework**: A custom multi-agent system for coordinated analysis
|
| 219 |
+
|
| 220 |
+
### Future Features
|
| 221 |
+
|
| 222 |
+
* Enhanced emotion recognition with Hume.ai integration
|
| 223 |
+
* Temporal emotion tracking and pattern analysis
|
| 224 |
+
* Personalized recommendations based on emotional states
|
| 225 |
+
* Guided emotional wellness sessions
|
| 226 |
+
|
| 227 |
+
### Privacy
|
| 228 |
+
|
| 229 |
+
* All image processing is done locally
|
| 230 |
+
* We don't store your images after processing
|
| 231 |
+
* No personal data is shared with third parties
|
| 232 |
+
""")
|
| 233 |
+
|
| 234 |
+
# Footer
|
| 235 |
+
st.markdown("---")
|
| 236 |
+
st.markdown("© 2025 EmotionMirror | Developed as a prototype application")
|
finalproject/packages.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
libgl1-mesa-glx
|
| 2 |
+
libglib2.0-0
|
| 3 |
+
libgomp1
|
| 4 |
+
libsm6
|
| 5 |
+
libxext6
|
| 6 |
+
libxrender-dev
|
| 7 |
+
libpython3-dev
|
finalproject/requirements.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit==1.26.0
|
| 2 |
+
ultralytics==8.0.196
|
| 3 |
+
Pillow==9.5.0
|
| 4 |
+
numpy==1.24.3
|
| 5 |
+
opencv-python==4.8.0.76
|
| 6 |
+
torch==2.0.1
|
| 7 |
+
torchvision==0.15.2
|
| 8 |
+
requests==2.31.0
|
| 9 |
+
python-dotenv==1.0.0
|
packages.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
libgl1-mesa-glx
|
| 2 |
+
libglib2.0-0
|
| 3 |
+
libgomp1
|
| 4 |
+
libsm6
|
| 5 |
+
libxext6
|
| 6 |
+
libxrender-dev
|
| 7 |
+
libpython3-dev
|
requirements.txt
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit==1.26.0
|
| 2 |
+
ultralytics==8.0.196
|
| 3 |
+
Pillow==9.5.0
|
| 4 |
+
numpy==1.23.5
|
| 5 |
+
opencv-python==4.6.0.66
|
| 6 |
+
torch==2.0.1
|
| 7 |
+
torchvision==0.15.2
|
| 8 |
+
requests==2.31.0
|
| 9 |
+
python-dotenv==1.0.0
|
| 10 |
+
scikit-learn==1.3.2
|
| 11 |
+
pandas==2.1.0
|
| 12 |
+
deepface==0.0.75
|
| 13 |
+
protobuf==3.20.3
|
| 14 |
+
tensorflow==2.8.0
|
| 15 |
+
dlib-bin==19.24.6
|
services/__init__.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Services package for EmotionMirror application.
|
| 3 |
+
Contains modular services for image processing, model management and emotion analysis.
|
| 4 |
+
"""
|
| 5 |
+
import logging
|
| 6 |
+
|
| 7 |
+
from config import settings
|
| 8 |
+
from services.emotion_service import EmotionService
|
| 9 |
+
|
| 10 |
+
# Import DeepFaceEmotionService conditionally to avoid errors if deepface isn't installed
|
| 11 |
+
try:
|
| 12 |
+
from services.deepface_emotion_service import DeepFaceEmotionService, DEEPFACE_AVAILABLE
|
| 13 |
+
except ImportError:
|
| 14 |
+
logging.warning("DeepFaceEmotionService could not be imported. Advanced emotion detection will be unavailable.")
|
| 15 |
+
DeepFaceEmotionService = None
|
| 16 |
+
DEEPFACE_AVAILABLE = False
|
| 17 |
+
|
| 18 |
+
def get_emotion_service():
|
| 19 |
+
"""
|
| 20 |
+
Factory function that returns the appropriate emotion service based on configuration.
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
EmotionService: The configured emotion service (basic or advanced)
|
| 24 |
+
"""
|
| 25 |
+
# Choose between basic and advanced emotion service based on configuration
|
| 26 |
+
if settings.USE_ADVANCED_EMOTION and DEEPFACE_AVAILABLE and DeepFaceEmotionService is not None:
|
| 27 |
+
try:
|
| 28 |
+
# Simplemente intentamos crear el servicio avanzado
|
| 29 |
+
logging.info("Using advanced emotion detection with DeepFace")
|
| 30 |
+
return DeepFaceEmotionService()
|
| 31 |
+
except Exception as e:
|
| 32 |
+
logging.error(f"Error initializing DeepFaceEmotionService: {e}")
|
| 33 |
+
logging.warning("Falling back to basic emotion service")
|
| 34 |
+
|
| 35 |
+
# Fall back to basic emotion service
|
| 36 |
+
logging.info("Using basic emotion service")
|
| 37 |
+
return EmotionService()
|
services/database_service.py
ADDED
|
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Database Service for EmotionMirror Application
|
| 3 |
+
|
| 4 |
+
This service provides a high-level interface for database operations,
|
| 5 |
+
integrating the database manager with the rest of the application.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import json
|
| 10 |
+
import logging
|
| 11 |
+
import streamlit as st
|
| 12 |
+
from typing import Dict, List, Any, Optional, Tuple
|
| 13 |
+
from datetime import datetime
|
| 14 |
+
|
| 15 |
+
from config import settings
|
| 16 |
+
from database.db_manager import DatabaseManager
|
| 17 |
+
|
| 18 |
+
# Configure logging
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
class DatabaseService:
|
| 22 |
+
"""
|
| 23 |
+
Service for handling database operations in the EmotionMirror application.
|
| 24 |
+
|
| 25 |
+
This class integrates the database functionality with the rest of the
|
| 26 |
+
application, providing a service-oriented interface.
|
| 27 |
+
|
| 28 |
+
IMPORTANT NOTE ABOUT DIFFERENCES BETWEEN LOCAL AND HUGGING FACE VERSIONS:
|
| 29 |
+
-----------------------------------------------------------------------
|
| 30 |
+
In environments with permission restrictions such as Hugging Face, this class
|
| 31 |
+
implements additional logic to handle write errors:
|
| 32 |
+
|
| 33 |
+
1. Verifies that the database directory has write permissions
|
| 34 |
+
2. Attempts to write a test file to confirm permissions
|
| 35 |
+
3. If it fails, uses an alternative location (/tmp/emotion_mirror.db)
|
| 36 |
+
4. Logs detailed debugging information
|
| 37 |
+
|
| 38 |
+
This behavior is different from the local version, where it is assumed
|
| 39 |
+
that there are no write permission restrictions.
|
| 40 |
+
|
| 41 |
+
HISTORY FUNCTIONALITY DIFFERENCES:
|
| 42 |
+
--------------------------------
|
| 43 |
+
The history retrieval system also differs between versions:
|
| 44 |
+
|
| 45 |
+
Local Version:
|
| 46 |
+
- Uses standard DatabaseManager method for data retrieval
|
| 47 |
+
- Requires explicit session ID for filtering
|
| 48 |
+
- Implements basic error handling
|
| 49 |
+
- Assumes stable database connection
|
| 50 |
+
|
| 51 |
+
Hugging Face Version:
|
| 52 |
+
- Uses a multi-layered approach with direct SQL fallback
|
| 53 |
+
- Auto-detects current session ID when possible
|
| 54 |
+
- Implements enhanced error recovery with detailed diagnostics
|
| 55 |
+
- Tests database connection and table existence before operations
|
| 56 |
+
- Performs preliminary count of available records
|
| 57 |
+
- Provides detailed logging of database operations
|
| 58 |
+
|
| 59 |
+
These differences ensure the history functionality works reliably in the
|
| 60 |
+
more restrictive Hugging Face environment.
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(self):
|
| 64 |
+
"""Initialize the database service with the configured database path."""
|
| 65 |
+
# Ensure the database directory exists
|
| 66 |
+
db_dir = os.path.dirname(settings.DB_PATH)
|
| 67 |
+
os.makedirs(db_dir, exist_ok=True)
|
| 68 |
+
|
| 69 |
+
# Log database path for debugging
|
| 70 |
+
logger.info(f"Initializing DatabaseService with DB at: {settings.DB_PATH}")
|
| 71 |
+
logger.info(f"Database directory exists: {os.path.exists(db_dir)}")
|
| 72 |
+
logger.info(f"Database directory is writable: {os.access(db_dir, os.W_OK)}")
|
| 73 |
+
|
| 74 |
+
# Print to console for immediate visibility in logs
|
| 75 |
+
print(f"[DEBUG] Database path: {settings.DB_PATH}")
|
| 76 |
+
print(f"[DEBUG] Database dir exists: {os.path.exists(db_dir)}")
|
| 77 |
+
print(f"[DEBUG] Database dir writable: {os.access(db_dir, os.W_OK)}")
|
| 78 |
+
|
| 79 |
+
try:
|
| 80 |
+
# Test if we can write to the directory
|
| 81 |
+
test_file = os.path.join(db_dir, '.db_test')
|
| 82 |
+
with open(test_file, 'w') as f:
|
| 83 |
+
f.write('test')
|
| 84 |
+
os.remove(test_file)
|
| 85 |
+
logger.info(f"Successfully wrote test file to {db_dir}")
|
| 86 |
+
print(f"[DEBUG] Successfully wrote test file to {db_dir}")
|
| 87 |
+
except Exception as e:
|
| 88 |
+
logger.error(f"Failed to write test file to {db_dir}: {e}")
|
| 89 |
+
print(f"[DEBUG] Failed to write test file to {db_dir}: {e}")
|
| 90 |
+
# No need to raise exception, we'll let the DatabaseManager handle this
|
| 91 |
+
|
| 92 |
+
# List of possible fallback paths from most to least preferred
|
| 93 |
+
fallback_paths = [
|
| 94 |
+
os.path.join('/tmp', 'emotion_mirror.db'),
|
| 95 |
+
os.path.join('/home/user', 'emotion_mirror.db'),
|
| 96 |
+
':memory:' # SQLite in-memory database as last resort
|
| 97 |
+
]
|
| 98 |
+
|
| 99 |
+
# Initialize the database manager
|
| 100 |
+
db_initialized = False
|
| 101 |
+
db_path_used = settings.DB_PATH
|
| 102 |
+
|
| 103 |
+
try:
|
| 104 |
+
self.db_manager = DatabaseManager(settings.DB_PATH)
|
| 105 |
+
logger.info(f"Database manager initialized successfully")
|
| 106 |
+
print(f"[DEBUG] Database manager initialized successfully with {settings.DB_PATH}")
|
| 107 |
+
|
| 108 |
+
# Verify we can actually write to the database
|
| 109 |
+
try:
|
| 110 |
+
# Test direct SQLite connection instead of using a method that might not exist
|
| 111 |
+
import sqlite3
|
| 112 |
+
conn = sqlite3.connect(settings.DB_PATH)
|
| 113 |
+
cursor = conn.cursor()
|
| 114 |
+
cursor.execute("PRAGMA quick_check")
|
| 115 |
+
result = cursor.fetchone()
|
| 116 |
+
print(f"[DEBUG] Database integrity check: {result}")
|
| 117 |
+
|
| 118 |
+
# Test insert and select
|
| 119 |
+
cursor.execute("CREATE TABLE IF NOT EXISTS test_table (id INTEGER PRIMARY KEY, test TEXT)")
|
| 120 |
+
cursor.execute("INSERT INTO test_table (test) VALUES ('test_write')")
|
| 121 |
+
conn.commit()
|
| 122 |
+
cursor.execute("SELECT * FROM test_table LIMIT 1")
|
| 123 |
+
test_result = cursor.fetchone()
|
| 124 |
+
print(f"[DEBUG] Database test write/read successful: {test_result}")
|
| 125 |
+
cursor.execute("DROP TABLE test_table")
|
| 126 |
+
conn.commit()
|
| 127 |
+
conn.close()
|
| 128 |
+
db_initialized = True
|
| 129 |
+
except Exception as e:
|
| 130 |
+
logger.error(f"Database write test failed: {e}")
|
| 131 |
+
print(f"[DEBUG] Database write test failed: {e}")
|
| 132 |
+
raise Exception(f"Database write test failed: {e}")
|
| 133 |
+
|
| 134 |
+
except Exception as e:
|
| 135 |
+
logger.error(f"Error initializing database manager: {e}")
|
| 136 |
+
print(f"[DEBUG] Error initializing database manager: {e}")
|
| 137 |
+
|
| 138 |
+
# Try fallback paths
|
| 139 |
+
for fallback_path in fallback_paths:
|
| 140 |
+
try:
|
| 141 |
+
print(f"[DEBUG] Trying fallback database path: {fallback_path}")
|
| 142 |
+
|
| 143 |
+
# Test direct SQLite connection for fallback path
|
| 144 |
+
if fallback_path != ':memory:':
|
| 145 |
+
fallback_dir = os.path.dirname(fallback_path)
|
| 146 |
+
if fallback_dir: # Skip directory check for :memory:
|
| 147 |
+
os.makedirs(fallback_dir, exist_ok=True)
|
| 148 |
+
test_file = os.path.join(fallback_dir, '.db_test')
|
| 149 |
+
with open(test_file, 'w') as f:
|
| 150 |
+
f.write('test')
|
| 151 |
+
os.remove(test_file)
|
| 152 |
+
|
| 153 |
+
# Test database connection and operations
|
| 154 |
+
import sqlite3
|
| 155 |
+
conn = sqlite3.connect(fallback_path)
|
| 156 |
+
cursor = conn.cursor()
|
| 157 |
+
cursor.execute("CREATE TABLE IF NOT EXISTS test_table (id INTEGER PRIMARY KEY, test TEXT)")
|
| 158 |
+
cursor.execute("INSERT INTO test_table (test) VALUES ('test_write')")
|
| 159 |
+
conn.commit()
|
| 160 |
+
cursor.execute("SELECT * FROM test_table LIMIT 1")
|
| 161 |
+
test_result = cursor.fetchone()
|
| 162 |
+
cursor.execute("DROP TABLE test_table")
|
| 163 |
+
conn.commit()
|
| 164 |
+
conn.close()
|
| 165 |
+
|
| 166 |
+
# If we got here, the database works with this path
|
| 167 |
+
self.db_manager = DatabaseManager(fallback_path)
|
| 168 |
+
db_path_used = fallback_path
|
| 169 |
+
|
| 170 |
+
print(f"[DEBUG] Successfully initialized database with fallback path: {fallback_path}")
|
| 171 |
+
logger.info(f"Using alternative database path: {fallback_path}")
|
| 172 |
+
db_initialized = True
|
| 173 |
+
break
|
| 174 |
+
except Exception as e:
|
| 175 |
+
print(f"[DEBUG] Failed to use fallback path {fallback_path}: {e}")
|
| 176 |
+
logger.warning(f"Failed to use fallback path {fallback_path}: {e}")
|
| 177 |
+
continue
|
| 178 |
+
|
| 179 |
+
if not db_initialized:
|
| 180 |
+
error_msg = "CRITICAL: Could not initialize database with any path. History functionality will not work."
|
| 181 |
+
logger.critical(error_msg)
|
| 182 |
+
print(f"[DEBUG] {error_msg}")
|
| 183 |
+
# Create an in-memory database as last resort
|
| 184 |
+
try:
|
| 185 |
+
self.db_manager = DatabaseManager(":memory:")
|
| 186 |
+
print("[DEBUG] Created in-memory database as last resort. History will not persist.")
|
| 187 |
+
except Exception as e:
|
| 188 |
+
print(f"[DEBUG] Even in-memory database failed: {e}")
|
| 189 |
+
# Initialize a dummy database manager that will log but not throw errors
|
| 190 |
+
class DummyDBManager:
|
| 191 |
+
def __init__(self):
|
| 192 |
+
pass
|
| 193 |
+
def __getattr__(self, name):
|
| 194 |
+
def dummy_method(*args, **kwargs):
|
| 195 |
+
print(f"[DEBUG] Called dummy DB method {name} with {args}, {kwargs}")
|
| 196 |
+
if name.startswith('get_'):
|
| 197 |
+
return []
|
| 198 |
+
return 0
|
| 199 |
+
return dummy_method
|
| 200 |
+
self.db_manager = DummyDBManager()
|
| 201 |
+
|
| 202 |
+
print(f"[DEBUG] DatabaseService initialization complete. Using DB: {db_path_used}")
|
| 203 |
+
logger.info(f"DatabaseService initialization complete. Using DB: {db_path_used}")
|
| 204 |
+
|
| 205 |
+
def save_analysis_results(self,
|
| 206 |
+
session_id: str,
|
| 207 |
+
image_path: str,
|
| 208 |
+
results: Dict[str, Any],
|
| 209 |
+
tags: Optional[List[str]] = None) -> int:
|
| 210 |
+
"""
|
| 211 |
+
Save analysis results to the database.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
session_id: Current session identifier
|
| 215 |
+
image_path: Path to the analyzed image
|
| 216 |
+
results: Analysis results dictionary
|
| 217 |
+
tags: Optional list of tags for the analysis
|
| 218 |
+
|
| 219 |
+
Returns:
|
| 220 |
+
The ID of the saved analysis record
|
| 221 |
+
"""
|
| 222 |
+
try:
|
| 223 |
+
logger.info(f"Saving analysis results for session: {session_id}")
|
| 224 |
+
print(f"[DEBUG] Attempting to save analysis for session: {session_id}, image: {image_path}")
|
| 225 |
+
result_id = self.db_manager.save_analysis(session_id, image_path, results, tags)
|
| 226 |
+
print(f"[DEBUG] Analysis saved successfully with ID: {result_id}")
|
| 227 |
+
return result_id
|
| 228 |
+
except Exception as e:
|
| 229 |
+
error_msg = f"Failed to save analysis results: {e}"
|
| 230 |
+
logger.error(error_msg)
|
| 231 |
+
print(f"[DEBUG] {error_msg}")
|
| 232 |
+
# Return 0 to indicate failure but continue execution
|
| 233 |
+
return 0
|
| 234 |
+
|
| 235 |
+
def get_history(self,
|
| 236 |
+
session_id: Optional[str] = None,
|
| 237 |
+
limit: int = 10) -> List[Dict[str, Any]]:
|
| 238 |
+
"""
|
| 239 |
+
Retrieve analysis history.
|
| 240 |
+
|
| 241 |
+
Args:
|
| 242 |
+
session_id: Optional session ID to filter by
|
| 243 |
+
limit: Maximum number of records to return
|
| 244 |
+
|
| 245 |
+
Returns:
|
| 246 |
+
List of analysis records
|
| 247 |
+
"""
|
| 248 |
+
try:
|
| 249 |
+
logger.info(f"Retrieving analysis history for session: {session_id or 'all sessions'}")
|
| 250 |
+
print(f"[DEBUG] Attempting to get history for session: {session_id}, limit: {limit}")
|
| 251 |
+
|
| 252 |
+
# If session_id is None, use current session ID
|
| 253 |
+
if session_id is None and hasattr(st, 'session_state') and 'session_id' in st.session_state:
|
| 254 |
+
session_id = st.session_state.session_id
|
| 255 |
+
print(f"[DEBUG] Using session_id from session_state: {session_id}")
|
| 256 |
+
|
| 257 |
+
# First check if the database file exists
|
| 258 |
+
if not os.path.exists(self.db_manager.db_path):
|
| 259 |
+
print(f"[DEBUG] Database file does not exist: {self.db_manager.db_path}")
|
| 260 |
+
logger.warning(f"Database file does not exist: {self.db_manager.db_path}")
|
| 261 |
+
return []
|
| 262 |
+
|
| 263 |
+
# Test database connection
|
| 264 |
+
try:
|
| 265 |
+
import sqlite3
|
| 266 |
+
conn = sqlite3.connect(self.db_manager.db_path)
|
| 267 |
+
cursor = conn.cursor()
|
| 268 |
+
|
| 269 |
+
# Check if the analyses table exists
|
| 270 |
+
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='analyses'")
|
| 271 |
+
if not cursor.fetchone():
|
| 272 |
+
print("[DEBUG] The 'analyses' table does not exist in the database")
|
| 273 |
+
conn.close()
|
| 274 |
+
return []
|
| 275 |
+
|
| 276 |
+
# Get a count of records for this session
|
| 277 |
+
if session_id:
|
| 278 |
+
cursor.execute("SELECT COUNT(*) FROM analyses WHERE session_id = ?", (session_id,))
|
| 279 |
+
else:
|
| 280 |
+
cursor.execute("SELECT COUNT(*) FROM analyses")
|
| 281 |
+
|
| 282 |
+
count = cursor.fetchone()[0]
|
| 283 |
+
print(f"[DEBUG] Found {count} records in the database")
|
| 284 |
+
|
| 285 |
+
conn.close()
|
| 286 |
+
except Exception as e:
|
| 287 |
+
print(f"[DEBUG] Error testing database connection: {e}")
|
| 288 |
+
logger.error(f"Error testing database connection: {e}")
|
| 289 |
+
|
| 290 |
+
# Try to get records using the db_manager
|
| 291 |
+
records = self.db_manager.get_analysis_history(session_id, limit)
|
| 292 |
+
print(f"[DEBUG] Retrieved {len(records)} records from db_manager")
|
| 293 |
+
|
| 294 |
+
# If no records were found but we expect some (from the count above)
|
| 295 |
+
# try a direct query as a fallback
|
| 296 |
+
if not records and count > 0:
|
| 297 |
+
print("[DEBUG] No records returned from db_manager but count > 0, trying direct query")
|
| 298 |
+
try:
|
| 299 |
+
conn = sqlite3.connect(self.db_manager.db_path)
|
| 300 |
+
conn.row_factory = sqlite3.Row
|
| 301 |
+
cursor = conn.cursor()
|
| 302 |
+
|
| 303 |
+
if session_id:
|
| 304 |
+
query = "SELECT * FROM analyses WHERE session_id = ? ORDER BY timestamp DESC LIMIT ?"
|
| 305 |
+
cursor.execute(query, (session_id, limit))
|
| 306 |
+
else:
|
| 307 |
+
query = "SELECT * FROM analyses ORDER BY timestamp DESC LIMIT ?"
|
| 308 |
+
cursor.execute(query, (limit,))
|
| 309 |
+
|
| 310 |
+
rows = cursor.fetchall()
|
| 311 |
+
records = []
|
| 312 |
+
|
| 313 |
+
for row in rows:
|
| 314 |
+
record = {key: row[key] for key in row.keys()}
|
| 315 |
+
# Parse the JSON fields
|
| 316 |
+
try:
|
| 317 |
+
if 'results' in record and record['results']:
|
| 318 |
+
record['results'] = json.loads(record['results'])
|
| 319 |
+
if 'tags' in record and record['tags']:
|
| 320 |
+
record['tags'] = json.loads(record['tags'])
|
| 321 |
+
except:
|
| 322 |
+
pass
|
| 323 |
+
records.append(record)
|
| 324 |
+
|
| 325 |
+
conn.close()
|
| 326 |
+
print(f"[DEBUG] Retrieved {len(records)} records via direct query")
|
| 327 |
+
except Exception as e:
|
| 328 |
+
print(f"[DEBUG] Error in direct SQL query fallback: {e}")
|
| 329 |
+
logger.error(f"Error in direct SQL query fallback: {e}")
|
| 330 |
+
|
| 331 |
+
return records
|
| 332 |
+
except Exception as e:
|
| 333 |
+
error_msg = f"Failed to retrieve analysis history: {e}"
|
| 334 |
+
logger.error(error_msg)
|
| 335 |
+
print(f"[DEBUG] {error_msg}")
|
| 336 |
+
# Return empty list to avoid breaking the UI
|
| 337 |
+
return []
|
| 338 |
+
|
| 339 |
+
def get_analysis(self, analysis_id: int) -> Optional[Dict[str, Any]]:
|
| 340 |
+
"""
|
| 341 |
+
Retrieve a specific analysis.
|
| 342 |
+
|
| 343 |
+
Args:
|
| 344 |
+
analysis_id: ID of the analysis to retrieve
|
| 345 |
+
|
| 346 |
+
Returns:
|
| 347 |
+
Analysis record or None if not found
|
| 348 |
+
"""
|
| 349 |
+
logger.info(f"Retrieving analysis with ID: {analysis_id}")
|
| 350 |
+
return self.db_manager.get_analysis_by_id(analysis_id)
|
| 351 |
+
|
| 352 |
+
def get_emotion_stats(self,
|
| 353 |
+
session_id: Optional[str] = None,
|
| 354 |
+
limit: int = 50) -> Dict[str, float]:
|
| 355 |
+
"""
|
| 356 |
+
Get emotion statistics.
|
| 357 |
+
|
| 358 |
+
Args:
|
| 359 |
+
session_id: Optional session ID to filter by
|
| 360 |
+
limit: Maximum number of records to analyze
|
| 361 |
+
|
| 362 |
+
Returns:
|
| 363 |
+
Dictionary of emotion frequencies
|
| 364 |
+
"""
|
| 365 |
+
logger.info(f"Computing emotion statistics for session: {session_id or 'all sessions'}")
|
| 366 |
+
return self.db_manager.get_emotion_statistics(session_id, limit)
|
| 367 |
+
|
| 368 |
+
def delete_record(self, analysis_id: int) -> bool:
|
| 369 |
+
"""
|
| 370 |
+
Delete an analysis record.
|
| 371 |
+
|
| 372 |
+
Args:
|
| 373 |
+
analysis_id: ID of the analysis to delete
|
| 374 |
+
|
| 375 |
+
Returns:
|
| 376 |
+
True if deletion was successful, False otherwise
|
| 377 |
+
"""
|
| 378 |
+
logger.info(f"Deleting analysis with ID: {analysis_id}")
|
| 379 |
+
return self.db_manager.delete_analysis(analysis_id)
|
| 380 |
+
|
| 381 |
+
def export_data(self,
|
| 382 |
+
analysis_id: Optional[int] = None,
|
| 383 |
+
session_id: Optional[str] = None,
|
| 384 |
+
limit: int = 100) -> Dict[str, Any]:
|
| 385 |
+
"""
|
| 386 |
+
Export analysis data in a structured format.
|
| 387 |
+
|
| 388 |
+
Args:
|
| 389 |
+
analysis_id: Optional specific analysis ID to export
|
| 390 |
+
session_id: Optional session ID to filter by
|
| 391 |
+
limit: Maximum number of records to export
|
| 392 |
+
|
| 393 |
+
Returns:
|
| 394 |
+
Dictionary containing the exported data
|
| 395 |
+
"""
|
| 396 |
+
logger.info(f"Exporting data for analysis ID: {analysis_id or 'multiple'}")
|
| 397 |
+
return self.db_manager.export_analysis_data(analysis_id, session_id, limit)
|
services/deepface_emotion_service.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Advanced emotion analysis service using DeepFace for EmotionMirror application.
|
| 3 |
+
Provides high-precision facial emotion detection and analysis.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
import logging
|
| 7 |
+
import tempfile
|
| 8 |
+
import cv2
|
| 9 |
+
import numpy as np
|
| 10 |
+
from typing import Dict, Any, List, Optional
|
| 11 |
+
import traceback
|
| 12 |
+
|
| 13 |
+
# Importar settings desde config
|
| 14 |
+
from config import settings
|
| 15 |
+
from services.emotion_service import EmotionService
|
| 16 |
+
|
| 17 |
+
# Importar DeepFace con manejo de errores
|
| 18 |
+
try:
|
| 19 |
+
from deepface import DeepFace
|
| 20 |
+
DEEPFACE_AVAILABLE = True
|
| 21 |
+
logging.info("DeepFace está disponible. Se puede usar la detección avanzada de emociones.")
|
| 22 |
+
except (ImportError, ModuleNotFoundError, ValueError) as e:
|
| 23 |
+
DEEPFACE_AVAILABLE = False
|
| 24 |
+
logging.warning(f"No se pudo importar DeepFace: {str(e)}. Se usará el servicio básico de emociones.")
|
| 25 |
+
|
| 26 |
+
class DeepFaceEmotionService(EmotionService):
|
| 27 |
+
"""
|
| 28 |
+
Servicio para análisis avanzado de emociones usando DeepFace.
|
| 29 |
+
Proporciona detección de emociones, edad y género.
|
| 30 |
+
Si DeepFace falla, recurrirá al servicio básico de emociones.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
_instance = None
|
| 34 |
+
|
| 35 |
+
def __new__(cls):
|
| 36 |
+
if cls._instance is None:
|
| 37 |
+
cls._instance = super(DeepFaceEmotionService, cls).__new__(cls)
|
| 38 |
+
cls._instance._initialized = False
|
| 39 |
+
return cls._instance
|
| 40 |
+
|
| 41 |
+
def __init__(self):
|
| 42 |
+
# Llamar al inicializador de la clase padre primero
|
| 43 |
+
super().__init__()
|
| 44 |
+
|
| 45 |
+
# Atributos de inicialización - siempre definidos
|
| 46 |
+
self.deepface_initialized = False
|
| 47 |
+
self.deepface_available = DEEPFACE_AVAILABLE
|
| 48 |
+
self.actions = ['emotion', 'age', 'gender'] # Definir actions aquí para que siempre exista
|
| 49 |
+
self.detector_backend = "opencv" # Valor por defecto
|
| 50 |
+
|
| 51 |
+
# Definir emotion_mapping aquí para que siempre exista
|
| 52 |
+
self.emotion_mapping = {
|
| 53 |
+
'angry': 'anger',
|
| 54 |
+
'disgust': 'disgust',
|
| 55 |
+
'fear': 'fear',
|
| 56 |
+
'happy': 'joy',
|
| 57 |
+
'sad': 'sadness',
|
| 58 |
+
'surprise': 'surprise',
|
| 59 |
+
'neutral': 'neutral'
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
# Evitar inicialización múltiple
|
| 63 |
+
if getattr(self, '_initialized', False):
|
| 64 |
+
return
|
| 65 |
+
|
| 66 |
+
self._initialized = True
|
| 67 |
+
|
| 68 |
+
# Crear directorio temporal si no existe
|
| 69 |
+
if not os.path.exists(settings.TEMP_DIR):
|
| 70 |
+
os.makedirs(settings.TEMP_DIR, exist_ok=True)
|
| 71 |
+
|
| 72 |
+
# Verificar disponibilidad de DeepFace
|
| 73 |
+
if not DEEPFACE_AVAILABLE:
|
| 74 |
+
logging.warning("DeepFace no está disponible. Usando EmotionService básico como respaldo.")
|
| 75 |
+
return
|
| 76 |
+
|
| 77 |
+
try:
|
| 78 |
+
# Verificar que DeepFace funciona correctamente
|
| 79 |
+
logging.info("Inicializando DeepFace...")
|
| 80 |
+
detector_backends = DeepFace.detector_backends()
|
| 81 |
+
logging.info(f"DeepFace detector backends disponibles: {detector_backends}")
|
| 82 |
+
|
| 83 |
+
# Actualizar la configuración de DeepFace si está disponible
|
| 84 |
+
if "opencv" in detector_backends:
|
| 85 |
+
self.detector_backend = "opencv"
|
| 86 |
+
elif "retinaface" in detector_backends:
|
| 87 |
+
self.detector_backend = "retinaface"
|
| 88 |
+
|
| 89 |
+
# Marcar como inicializado correctamente
|
| 90 |
+
self.deepface_initialized = True
|
| 91 |
+
logging.info("DeepFace inicializado correctamente")
|
| 92 |
+
|
| 93 |
+
except Exception as e:
|
| 94 |
+
logging.error(f"Error al inicializar DeepFace: {str(e)}")
|
| 95 |
+
logging.error(traceback.format_exc())
|
| 96 |
+
self.deepface_initialized = False
|
| 97 |
+
|
| 98 |
+
def is_advanced_service_active(self) -> bool:
|
| 99 |
+
"""
|
| 100 |
+
Check if the advanced emotion service (DeepFace) is active and available.
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
bool: True if advanced service is available and initialized
|
| 104 |
+
"""
|
| 105 |
+
return self.deepface_available and self.deepface_initialized
|
| 106 |
+
|
| 107 |
+
def analyze_emotion(self, face_img: np.ndarray) -> Dict[str, Any]:
|
| 108 |
+
"""
|
| 109 |
+
Analiza las emociones usando DeepFace.
|
| 110 |
+
Si ocurre un error, recurre al servicio básico de emociones.
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
face_img: Imagen de la cara a analizar
|
| 114 |
+
|
| 115 |
+
Returns:
|
| 116 |
+
Diccionario con resultados de análisis de emociones, edad y género
|
| 117 |
+
"""
|
| 118 |
+
# Si DeepFace no está disponible, usar el servicio básico
|
| 119 |
+
if not DEEPFACE_AVAILABLE:
|
| 120 |
+
logging.info("DeepFace no está disponible. Usando servicio básico.")
|
| 121 |
+
return super().analyze_emotion(face_img)
|
| 122 |
+
|
| 123 |
+
# Guardar la imagen temporalmente
|
| 124 |
+
temp_face_path = os.path.join(settings.TEMP_DIR, "temp_face.jpg")
|
| 125 |
+
try:
|
| 126 |
+
# Guardar imagen para análisis
|
| 127 |
+
cv2.imwrite(temp_face_path, face_img)
|
| 128 |
+
|
| 129 |
+
# Intentar análisis con DeepFace
|
| 130 |
+
try:
|
| 131 |
+
# Realizar análisis
|
| 132 |
+
analysis_result = DeepFace.analyze(
|
| 133 |
+
img_path=temp_face_path,
|
| 134 |
+
actions=self.actions,
|
| 135 |
+
detector_backend=self.detector_backend,
|
| 136 |
+
enforce_detection=False # Continuar incluso si no se detecta cara
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
# DeepFace returns a list for batch processing, get first result
|
| 140 |
+
if isinstance(analysis_result, list):
|
| 141 |
+
analysis_result = analysis_result[0]
|
| 142 |
+
|
| 143 |
+
# Extract emotion data
|
| 144 |
+
emotions_raw = analysis_result.get('emotion', {})
|
| 145 |
+
|
| 146 |
+
# Map emotions to our standard set
|
| 147 |
+
emotions = {}
|
| 148 |
+
for df_emotion, score in emotions_raw.items():
|
| 149 |
+
standard_emotion = self.emotion_mapping.get(df_emotion.lower(), df_emotion.lower())
|
| 150 |
+
emotions[standard_emotion] = score / 100.0 # DeepFace scores are 0-100
|
| 151 |
+
|
| 152 |
+
# Add any missing standard emotions with zero scores
|
| 153 |
+
for emotion in settings.EMOTIONS:
|
| 154 |
+
if emotion not in emotions:
|
| 155 |
+
emotions[emotion] = 0.0
|
| 156 |
+
|
| 157 |
+
# Find primary emotion (highest score)
|
| 158 |
+
primary_emotion = max(emotions.items(), key=lambda x: x[1])
|
| 159 |
+
|
| 160 |
+
# Calculate basic image features (for compatibility with existing code)
|
| 161 |
+
gray_face = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
|
| 162 |
+
brightness = np.mean(gray_face)
|
| 163 |
+
contrast = np.std(gray_face)
|
| 164 |
+
|
| 165 |
+
# Extract additional features
|
| 166 |
+
age = analysis_result.get('age', 0)
|
| 167 |
+
gender = analysis_result.get('gender', 'unknown')
|
| 168 |
+
|
| 169 |
+
# For compatibility with existing code
|
| 170 |
+
h, w = gray_face.shape
|
| 171 |
+
mid = w // 2
|
| 172 |
+
left_half = gray_face[:, :mid]
|
| 173 |
+
right_half = cv2.flip(gray_face[:, mid:], 1)
|
| 174 |
+
|
| 175 |
+
# Calculate symmetry (if sizes match)
|
| 176 |
+
if left_half.shape == right_half.shape:
|
| 177 |
+
symmetry = 1.0 - (np.sum(cv2.absdiff(left_half, right_half)) / (255 * left_half.size))
|
| 178 |
+
else:
|
| 179 |
+
symmetry = 0.5 # Default value if face isn't properly centered
|
| 180 |
+
|
| 181 |
+
# Formato del resultado
|
| 182 |
+
result = {
|
| 183 |
+
"emotion": primary_emotion[0],
|
| 184 |
+
"confidence": primary_emotion[1], # Añadir la clave 'confidence' con el valor de confianza
|
| 185 |
+
"emotions": emotions, # Usar 'emotions' en lugar de 'emotion_scores'
|
| 186 |
+
"is_advanced_detection": True,
|
| 187 |
+
"features": {
|
| 188 |
+
'brightness': float(brightness),
|
| 189 |
+
'contrast': float(contrast),
|
| 190 |
+
'symmetry': float(symmetry),
|
| 191 |
+
'smile_score': float(emotions.get('joy', 0.0)), # Use joy as smile score
|
| 192 |
+
'age': float(age),
|
| 193 |
+
'gender': gender
|
| 194 |
+
}
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
return result
|
| 198 |
+
|
| 199 |
+
except Exception as e:
|
| 200 |
+
# Log del error y caída al servicio básico
|
| 201 |
+
logging.error(f"Error en DeepFace.analyze: {str(e)}. Usando servicio básico.")
|
| 202 |
+
return super().analyze_emotion(face_img)
|
| 203 |
+
except Exception as e:
|
| 204 |
+
# Error al guardar o procesar la imagen
|
| 205 |
+
logging.error(f"Error al procesar la imagen para DeepFace: {str(e)}")
|
| 206 |
+
return super().analyze_emotion(face_img)
|
| 207 |
+
finally:
|
| 208 |
+
# Asegurar limpieza de archivos temporales
|
| 209 |
+
if os.path.exists(temp_face_path):
|
| 210 |
+
try:
|
| 211 |
+
os.remove(temp_face_path)
|
| 212 |
+
except Exception as e:
|
| 213 |
+
logging.warning(f"No se pudo eliminar el archivo temporal: {str(e)}")
|
| 214 |
+
|
| 215 |
+
def get_advanced_attributes(self, face_img: np.ndarray) -> Dict[str, Any]:
|
| 216 |
+
"""
|
| 217 |
+
Get advanced facial attributes using DeepFace.
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
face_img: Face image to analyze
|
| 221 |
+
|
| 222 |
+
Returns:
|
| 223 |
+
Dictionary with advanced attributes (age, gender, race)
|
| 224 |
+
"""
|
| 225 |
+
if not DEEPFACE_AVAILABLE or face_img is None or face_img.size == 0:
|
| 226 |
+
return {
|
| 227 |
+
'age': 0,
|
| 228 |
+
'gender': 'unknown',
|
| 229 |
+
'race': 'unknown'
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
try:
|
| 233 |
+
# Save face image to a temporary file
|
| 234 |
+
temp_face_path = os.path.join(settings.TEMP_DIR, "temp_face_attr.jpg")
|
| 235 |
+
os.makedirs(settings.TEMP_DIR, exist_ok=True)
|
| 236 |
+
cv2.imwrite(temp_face_path, face_img)
|
| 237 |
+
|
| 238 |
+
# Analyze with DeepFace
|
| 239 |
+
attr_result = DeepFace.analyze(
|
| 240 |
+
img_path=temp_face_path,
|
| 241 |
+
actions=['age', 'gender', 'race'],
|
| 242 |
+
detector_backend=self.detector_backend,
|
| 243 |
+
enforce_detection=False,
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
# Extract first result if list
|
| 247 |
+
if isinstance(attr_result, list):
|
| 248 |
+
attr_result = attr_result[0]
|
| 249 |
+
|
| 250 |
+
# Return attributes
|
| 251 |
+
return {
|
| 252 |
+
'age': attr_result.get('age', 0),
|
| 253 |
+
'gender': attr_result.get('gender', 'unknown'),
|
| 254 |
+
'race': attr_result.get('dominant_race', 'unknown')
|
| 255 |
+
}
|
| 256 |
+
except Exception as e:
|
| 257 |
+
logging.error(f"Error in advanced attribute detection: {e}")
|
| 258 |
+
return {
|
| 259 |
+
'age': 0,
|
| 260 |
+
'gender': 'unknown',
|
| 261 |
+
'race': 'unknown'
|
| 262 |
+
}
|
| 263 |
+
finally:
|
| 264 |
+
# Clean up temporary file
|
| 265 |
+
try:
|
| 266 |
+
if os.path.exists(temp_face_path):
|
| 267 |
+
os.remove(temp_face_path)
|
| 268 |
+
except:
|
| 269 |
+
pass
|
services/emotion_service.py
ADDED
|
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Emotion analysis service for EmotionMirror application.
|
| 3 |
+
Provides emotional classification and analysis features.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
import logging
|
| 7 |
+
import cv2
|
| 8 |
+
import numpy as np
|
| 9 |
+
from typing import Dict, Optional, Any, List, Tuple
|
| 10 |
+
|
| 11 |
+
# Definir constante para dlib
|
| 12 |
+
DLIB_AVAILABLE = False
|
| 13 |
+
logging.warning("dlib ha sido desactivado para compatibilidad con el despliegue.")
|
| 14 |
+
|
| 15 |
+
from config import settings
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
class EmotionService:
|
| 20 |
+
"""Service for emotion analysis and classification"""
|
| 21 |
+
|
| 22 |
+
_instance = None
|
| 23 |
+
|
| 24 |
+
def __new__(cls):
|
| 25 |
+
"""Singleton pattern implementation"""
|
| 26 |
+
if cls._instance is None:
|
| 27 |
+
cls._instance = super(EmotionService, cls).__new__(cls)
|
| 28 |
+
cls._instance._initialized = False
|
| 29 |
+
return cls._instance
|
| 30 |
+
|
| 31 |
+
def __init__(self):
|
| 32 |
+
"""Initialize the emotion service"""
|
| 33 |
+
if self._initialized:
|
| 34 |
+
return
|
| 35 |
+
|
| 36 |
+
self._initialized = True
|
| 37 |
+
logger.info("Initializing EmotionService")
|
| 38 |
+
|
| 39 |
+
# Initialize the face classifier
|
| 40 |
+
self.face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
| 41 |
+
|
| 42 |
+
# Inicializar el detector de sonrisas para mejorar la detección
|
| 43 |
+
cascade_path = os.path.join(settings.MODELS_DIR, "haarcascade_smile.xml")
|
| 44 |
+
|
| 45 |
+
# Verificar si existe el archivo
|
| 46 |
+
if os.path.exists(cascade_path):
|
| 47 |
+
self.smile_cascade = cv2.CascadeClassifier(cascade_path)
|
| 48 |
+
logger.info(f"Smile cascade loaded from: {cascade_path}")
|
| 49 |
+
else:
|
| 50 |
+
# Si no existe el archivo, intentar usar el predeterminado de OpenCV
|
| 51 |
+
logger.warning(f"Smile cascade not found at: {cascade_path}, using default")
|
| 52 |
+
self.smile_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_smile.xml')
|
| 53 |
+
|
| 54 |
+
# Inicializar el detector de landmarks faciales (opcional, solo si está disponible dlib)
|
| 55 |
+
self.landmark_detector = None
|
| 56 |
+
self.face_detector = None
|
| 57 |
+
|
| 58 |
+
if DLIB_AVAILABLE:
|
| 59 |
+
try:
|
| 60 |
+
# Cargar el detector de rostros
|
| 61 |
+
self.face_detector = dlib.get_frontal_face_detector()
|
| 62 |
+
|
| 63 |
+
# Cargar el predictor de landmarks faciales
|
| 64 |
+
model_path = os.path.join(settings.MODELS_DIR, "shape_predictor_68_face_landmarks.dat")
|
| 65 |
+
if os.path.exists(model_path):
|
| 66 |
+
self.landmark_detector = dlib.shape_predictor(model_path)
|
| 67 |
+
logger.info("Landmark detector loaded successfully")
|
| 68 |
+
else:
|
| 69 |
+
logger.warning(f"Landmark model not found at: {model_path}")
|
| 70 |
+
except Exception as e:
|
| 71 |
+
logger.error(f"Error loading facial landmark detector: {e}")
|
| 72 |
+
self.landmark_detector = None
|
| 73 |
+
self.face_detector = None
|
| 74 |
+
|
| 75 |
+
logger.info("Emotion classifier initialized")
|
| 76 |
+
|
| 77 |
+
def _initialize_emotion_classifier(self):
|
| 78 |
+
"""Initialize the emotion classifier"""
|
| 79 |
+
# This is a simplified implementation for Subfase 1.2
|
| 80 |
+
# In future phases, this will be replaced with a proper ML model
|
| 81 |
+
self.emotion_map = {
|
| 82 |
+
# Map facial features to emotions based on simple heuristics
|
| 83 |
+
# Will be replaced with ML model in later phases
|
| 84 |
+
}
|
| 85 |
+
logger.info("Emotion classifier initialized")
|
| 86 |
+
|
| 87 |
+
def analyze_emotion(self, face_img: np.ndarray) -> Dict[str, Any]:
|
| 88 |
+
"""
|
| 89 |
+
Analyze emotion from a face image.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
face_img: Face image to analyze
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
Dictionary with emotion data
|
| 96 |
+
"""
|
| 97 |
+
if face_img is None or face_img.size == 0:
|
| 98 |
+
return {
|
| 99 |
+
'emotion': 'unknown',
|
| 100 |
+
'confidence': 0.0,
|
| 101 |
+
'emotions': {'unknown': 1.0},
|
| 102 |
+
'features': {
|
| 103 |
+
'brightness': 0.0,
|
| 104 |
+
'contrast': 0.0,
|
| 105 |
+
'symmetry': 0.0,
|
| 106 |
+
'smile_score': 0.0
|
| 107 |
+
}
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
# Convert to grayscale for analysis
|
| 111 |
+
gray_face = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
|
| 112 |
+
|
| 113 |
+
# Calculate basic image features
|
| 114 |
+
brightness = np.mean(gray_face)
|
| 115 |
+
contrast = np.std(gray_face)
|
| 116 |
+
|
| 117 |
+
# Calculate symmetry
|
| 118 |
+
h, w = gray_face.shape
|
| 119 |
+
mid = w // 2
|
| 120 |
+
left_half = gray_face[:, :mid]
|
| 121 |
+
right_half = gray_face[:, mid:]
|
| 122 |
+
right_half_flipped = cv2.flip(right_half, 1)
|
| 123 |
+
|
| 124 |
+
# Resize if the halves have different shapes
|
| 125 |
+
if left_half.shape != right_half_flipped.shape:
|
| 126 |
+
min_width = min(left_half.shape[1], right_half_flipped.shape[1])
|
| 127 |
+
left_half = left_half[:, :min_width]
|
| 128 |
+
right_half_flipped = right_half_flipped[:, :min_width]
|
| 129 |
+
|
| 130 |
+
diff = cv2.absdiff(left_half, right_half_flipped)
|
| 131 |
+
symmetry = 1.0 - (np.sum(diff) / (h * mid * 255))
|
| 132 |
+
|
| 133 |
+
# Comprobar si el brillo general es bajo - indicativo de tristeza o falta de emoción
|
| 134 |
+
norm_brightness = brightness / 255.0
|
| 135 |
+
|
| 136 |
+
# Get base emotions from rule-based classification
|
| 137 |
+
emotions = self._rule_based_emotion_classification(brightness, contrast, symmetry)
|
| 138 |
+
|
| 139 |
+
# Check for smile indicators SOLO para imágenes de brillo alto
|
| 140 |
+
smile_detected = False
|
| 141 |
+
smile_score = 0.0
|
| 142 |
+
|
| 143 |
+
if norm_brightness >= 0.6: # Solo buscar sonrisas si el brillo es ALTO
|
| 144 |
+
smile_detected = self._detect_smile_cascade(gray_face)
|
| 145 |
+
|
| 146 |
+
# Try landmark-based smile detection if available
|
| 147 |
+
if self.landmark_detector:
|
| 148 |
+
smile_score = self._detect_smile_landmarks(gray_face)
|
| 149 |
+
logger.info(f"Landmark smile score: {smile_score}")
|
| 150 |
+
|
| 151 |
+
# Si se detecta una sonrisa con brillo alto, aumentamos alegría
|
| 152 |
+
if smile_detected or smile_score > 0.4:
|
| 153 |
+
smile_confidence = max(0.8 if smile_detected else 0.0, smile_score)
|
| 154 |
+
logger.info(f"Detectada SONRISA con score {smile_confidence:.2f} - clasificando como Joy")
|
| 155 |
+
emotions['joy'] = max(emotions['joy'], smile_confidence)
|
| 156 |
+
else:
|
| 157 |
+
# FORZAR tristeza/neutral para rostros oscuros
|
| 158 |
+
emotions['sadness'] = max(emotions['sadness'], 0.6)
|
| 159 |
+
emotions['neutral'] = max(emotions['neutral'], 0.3)
|
| 160 |
+
emotions['joy'] = min(emotions['joy'], 0.1) # Reducir alegría drásticamente
|
| 161 |
+
logger.info(f"FORZANDO TRISTEZA por brillo bajo: {norm_brightness}")
|
| 162 |
+
|
| 163 |
+
# Normalize emotions
|
| 164 |
+
total = sum(emotions.values())
|
| 165 |
+
if total > 0:
|
| 166 |
+
emotions = {k: v/total for k, v in emotions.items()}
|
| 167 |
+
|
| 168 |
+
# Find dominant emotion
|
| 169 |
+
emotion = max(emotions.items(), key=lambda x: x[1])
|
| 170 |
+
|
| 171 |
+
return {
|
| 172 |
+
'emotion': emotion[0],
|
| 173 |
+
'confidence': emotion[1],
|
| 174 |
+
'emotions': emotions,
|
| 175 |
+
'features': {
|
| 176 |
+
'brightness': float(brightness),
|
| 177 |
+
'contrast': float(contrast),
|
| 178 |
+
'symmetry': float(symmetry),
|
| 179 |
+
'smile_score': float(smile_score)
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
def _detect_smile_cascade(self, gray_face: np.ndarray) -> bool:
|
| 184 |
+
"""
|
| 185 |
+
Detecta sonrisas usando el clasificador en cascada de OpenCV.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
gray_face: Imagen en escala de grises de la cara
|
| 189 |
+
|
| 190 |
+
Returns:
|
| 191 |
+
bool: True si se detecta sonrisa, False en caso contrario
|
| 192 |
+
"""
|
| 193 |
+
try:
|
| 194 |
+
# Ajustar parámetros para hacer la detección MÁS SENSIBLE
|
| 195 |
+
# Increasing minNeighbors makes detection more selective
|
| 196 |
+
smiles = self.smile_cascade.detectMultiScale(
|
| 197 |
+
gray_face,
|
| 198 |
+
scaleFactor=1.3,
|
| 199 |
+
minNeighbors=25, # Aumentado de 20 a 25 para mayor selectividad
|
| 200 |
+
minSize=(25, 25)
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
logger.info(f"Smile detection: {len(smiles)} smiles detected")
|
| 204 |
+
|
| 205 |
+
if len(smiles) > 0:
|
| 206 |
+
logger.info("Sonrisa detectada con detector de cascada")
|
| 207 |
+
return True
|
| 208 |
+
return False
|
| 209 |
+
except Exception as e:
|
| 210 |
+
logger.error(f"Error in smile detection: {e}")
|
| 211 |
+
return False
|
| 212 |
+
|
| 213 |
+
def _detect_smile_landmarks(self, face_img: np.ndarray) -> float:
|
| 214 |
+
"""
|
| 215 |
+
Detecta sonrisas usando landmarks faciales.
|
| 216 |
+
|
| 217 |
+
Args:
|
| 218 |
+
face_img: Imagen de la cara
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
float: Puntaje de sonrisa entre 0 y 1
|
| 222 |
+
"""
|
| 223 |
+
if not DLIB_AVAILABLE or self.landmark_detector is None:
|
| 224 |
+
return 0.0
|
| 225 |
+
|
| 226 |
+
try:
|
| 227 |
+
# Convertir a escala de grises si es necesario
|
| 228 |
+
if len(face_img.shape) == 3:
|
| 229 |
+
gray = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
|
| 230 |
+
else:
|
| 231 |
+
gray = face_img
|
| 232 |
+
|
| 233 |
+
faces = self.face_detector(gray)
|
| 234 |
+
|
| 235 |
+
if len(faces) == 0:
|
| 236 |
+
return 0.0
|
| 237 |
+
|
| 238 |
+
# Usar el primer rostro detectado
|
| 239 |
+
landmarks = self.landmark_detector(gray, faces[0])
|
| 240 |
+
|
| 241 |
+
# Extraer los puntos como coordenadas (x,y)
|
| 242 |
+
landmarks_points = []
|
| 243 |
+
for i in range(68):
|
| 244 |
+
x = landmarks.part(i).x
|
| 245 |
+
y = landmarks.part(i).y
|
| 246 |
+
landmarks_points.append((x, y))
|
| 247 |
+
|
| 248 |
+
# Calcular la curvatura de los labios
|
| 249 |
+
mouth_top = landmarks_points[48:55]
|
| 250 |
+
mouth_bottom = landmarks_points[55:60]
|
| 251 |
+
|
| 252 |
+
x = [p[0] for p in mouth_top]
|
| 253 |
+
y = [p[1] for p in mouth_top]
|
| 254 |
+
|
| 255 |
+
# Encontrar la línea que mejor ajusta los puntos
|
| 256 |
+
z = np.polyfit(x, y, 2)
|
| 257 |
+
|
| 258 |
+
# El coeficiente cuadrático indica la curvatura (negativo = sonrisa)
|
| 259 |
+
curvature = -z[0] # Negativo porque en coordenadas de imagen, y aumenta hacia abajo
|
| 260 |
+
|
| 261 |
+
# Convertir a un score entre 0 y 1
|
| 262 |
+
smile_score = max(0, min(1, curvature * 500)) # Ajustar según sea necesario
|
| 263 |
+
|
| 264 |
+
return smile_score
|
| 265 |
+
except Exception as e:
|
| 266 |
+
logger.error(f"Error detectando sonrisa con landmarks: {e}")
|
| 267 |
+
return 0.0
|
| 268 |
+
|
| 269 |
+
def _rule_based_emotion_classification(self, brightness, contrast, symmetry) -> Dict[str, float]:
|
| 270 |
+
"""
|
| 271 |
+
Simple rule-based emotion classification.
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
brightness: Image brightness
|
| 275 |
+
contrast: Image contrast
|
| 276 |
+
symmetry: Facial symmetry
|
| 277 |
+
|
| 278 |
+
Returns:
|
| 279 |
+
Dictionary with emotion probabilities
|
| 280 |
+
"""
|
| 281 |
+
# Normalize values
|
| 282 |
+
norm_brightness = brightness / 255.0
|
| 283 |
+
norm_contrast = min(contrast / 80.0, 1.0)
|
| 284 |
+
|
| 285 |
+
# Initialize all emotions with low probability
|
| 286 |
+
emotions = {e: 0.05 for e in settings.EMOTIONS}
|
| 287 |
+
|
| 288 |
+
# DETECCIÓN FUERTE: Para rostros con brillo bajo, FORZAR tristeza
|
| 289 |
+
if norm_brightness < 0.6:
|
| 290 |
+
# Cuanto más oscuro el rostro, más tristeza
|
| 291 |
+
sadness_score = 0.7 + (0.6 - norm_brightness) * 0.7
|
| 292 |
+
logger.info(f"REGLA FORZADA: Detectando TRISTEZA por brillo bajo: {norm_brightness}")
|
| 293 |
+
|
| 294 |
+
emotions['sadness'] = sadness_score
|
| 295 |
+
emotions['neutral'] = 0.4
|
| 296 |
+
emotions['joy'] = 0.1 # Reducir alegría significativamente
|
| 297 |
+
|
| 298 |
+
# Normalizar
|
| 299 |
+
total = sum(emotions.values())
|
| 300 |
+
emotions = {k: v/total for k, v in emotions.items()}
|
| 301 |
+
return emotions
|
| 302 |
+
|
| 303 |
+
# Para rostros brillantes favorecemos alegría
|
| 304 |
+
if norm_brightness > 0.65:
|
| 305 |
+
emotions['joy'] = 0.7 + norm_brightness * 0.3
|
| 306 |
+
logger.info(f"REGLA BALANCEADA: Detectando alegría por brillo alto: {norm_brightness}")
|
| 307 |
+
|
| 308 |
+
# Apply standard rules for other scenarios
|
| 309 |
+
# Low brightness + high contrast → anger, sadness
|
| 310 |
+
if norm_brightness < 0.4 and norm_contrast > 0.4:
|
| 311 |
+
emotions['anger'] = 0.4 + norm_contrast * 0.3
|
| 312 |
+
emotions['sadness'] = 0.3 + (1 - norm_brightness) * 0.3
|
| 313 |
+
|
| 314 |
+
# High symmetry → neutral
|
| 315 |
+
if symmetry > 0.7:
|
| 316 |
+
emotions['neutral'] = 0.3 + symmetry * 0.4
|
| 317 |
+
# If symmetry is high but brightness is also high, joy overrides neutral
|
| 318 |
+
if norm_brightness > 0.7:
|
| 319 |
+
emotions['joy'] = max(emotions['joy'], 0.6 + norm_brightness * 0.3)
|
| 320 |
+
|
| 321 |
+
# Medium brightness, medium contrast → surprise
|
| 322 |
+
if 0.4 < norm_brightness < 0.6 and 0.3 < norm_contrast < 0.5:
|
| 323 |
+
emotions['surprise'] = 0.4 + norm_contrast * 0.2
|
| 324 |
+
|
| 325 |
+
# Low symmetry → disgust, fear
|
| 326 |
+
if symmetry < 0.5:
|
| 327 |
+
emotions['disgust'] = 0.3 + (1 - symmetry) * 0.3
|
| 328 |
+
emotions['fear'] = 0.2 + (1 - symmetry) * 0.3
|
| 329 |
+
|
| 330 |
+
# Normalize so they sum to 1
|
| 331 |
+
total = sum(emotions.values())
|
| 332 |
+
if total > 0:
|
| 333 |
+
emotions = {k: v/total for k, v in emotions.items()}
|
| 334 |
+
|
| 335 |
+
return emotions
|
| 336 |
+
|
| 337 |
+
def get_emotion_color(self, emotion: str) -> Tuple[int, int, int]:
|
| 338 |
+
"""
|
| 339 |
+
Get a color associated with an emotion.
|
| 340 |
+
|
| 341 |
+
Args:
|
| 342 |
+
emotion: Emotion name
|
| 343 |
+
|
| 344 |
+
Returns:
|
| 345 |
+
RGB color tuple
|
| 346 |
+
"""
|
| 347 |
+
emotion_colors = {
|
| 348 |
+
'joy': (0, 255, 255), # Yellow
|
| 349 |
+
'sadness': (205, 108, 0), # Blue
|
| 350 |
+
'anger': (0, 0, 255), # Red
|
| 351 |
+
'fear': (211, 0, 148), # Purple
|
| 352 |
+
'surprise': (0, 140, 255), # Orange
|
| 353 |
+
'disgust': (50, 205, 50), # Green
|
| 354 |
+
'neutral': (200, 200, 200), # Gray
|
| 355 |
+
'unknown': (150, 150, 150) # Light Gray
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
return emotion_colors.get(emotion, (200, 200, 200))
|
| 359 |
+
|
| 360 |
+
def get_emotion_emoji(self, emotion: str) -> str:
|
| 361 |
+
"""
|
| 362 |
+
Get an emoji representing the emotion.
|
| 363 |
+
|
| 364 |
+
Args:
|
| 365 |
+
emotion: Emotion name
|
| 366 |
+
|
| 367 |
+
Returns:
|
| 368 |
+
Emoji string
|
| 369 |
+
"""
|
| 370 |
+
emotion_emojis = {
|
| 371 |
+
'joy': '😊',
|
| 372 |
+
'sadness': '😢',
|
| 373 |
+
'anger': '😠',
|
| 374 |
+
'fear': '😨',
|
| 375 |
+
'surprise': '😲',
|
| 376 |
+
'disgust': '🤢',
|
| 377 |
+
'neutral': '😐',
|
| 378 |
+
'unknown': '❓'
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
return emotion_emojis.get(emotion, '❓')
|
services/image_service.py
ADDED
|
@@ -0,0 +1,828 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Image service for EmotionMirror application.
|
| 3 |
+
Provides functionality for image loading, validation and basic processing.
|
| 4 |
+
|
| 5 |
+
This module implements:
|
| 6 |
+
1. Design of the upload interface - Clear presentation and information
|
| 7 |
+
2. Implementation of format validation - Extension and MIME type verification
|
| 8 |
+
3. Validation of dimensions and size - Ensuring optimal images for facial analysis
|
| 9 |
+
4. Image preprocessing - Adjusting brightness, contrast, and resizing
|
| 10 |
+
"""
|
| 11 |
+
import os
|
| 12 |
+
import logging
|
| 13 |
+
import numpy as np
|
| 14 |
+
import cv2
|
| 15 |
+
import imghdr
|
| 16 |
+
import mimetypes
|
| 17 |
+
import uuid
|
| 18 |
+
import tempfile
|
| 19 |
+
from typing import Tuple, Optional, Dict, Any, List
|
| 20 |
+
import io
|
| 21 |
+
import base64
|
| 22 |
+
from PIL import Image
|
| 23 |
+
import time
|
| 24 |
+
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
class ImageService:
|
| 28 |
+
"""
|
| 29 |
+
Service for handling image operations including format validation.
|
| 30 |
+
|
| 31 |
+
This service encapsulates functionality for validating image formats,
|
| 32 |
+
checking file sizes, dimensions, and quality to ensure images meet
|
| 33 |
+
application requirements for effective facial detection.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self):
|
| 37 |
+
"""Initialize image service with default settings"""
|
| 38 |
+
# Define supported formats for upload
|
| 39 |
+
self.supported_formats = ["jpg", "jpeg", "png"]
|
| 40 |
+
|
| 41 |
+
# Define maximum file size (10MB by default)
|
| 42 |
+
self.max_file_size = 10 * 1024 * 1024 # 10MB
|
| 43 |
+
|
| 44 |
+
# STEP 3: Define dimension constraints for image validation
|
| 45 |
+
self.min_width = 320
|
| 46 |
+
self.min_height = 240
|
| 47 |
+
self.max_width = 4096
|
| 48 |
+
self.max_height = 4096
|
| 49 |
+
self.optimal_width = 640
|
| 50 |
+
self.optimal_height = 480
|
| 51 |
+
|
| 52 |
+
# Initialize temp directory for processed images
|
| 53 |
+
self.temp_dir = tempfile.gettempdir()
|
| 54 |
+
logger.info(f"Image service initialized. Temp directory: {self.temp_dir}")
|
| 55 |
+
|
| 56 |
+
# Initialize MIME types mapping
|
| 57 |
+
self._init_mime_types()
|
| 58 |
+
|
| 59 |
+
def _init_mime_types(self):
|
| 60 |
+
"""Initialize MIME types mapping for common image formats"""
|
| 61 |
+
# Ensure MIME types are properly registered
|
| 62 |
+
mimetypes.init()
|
| 63 |
+
|
| 64 |
+
# Add common image MIME types if not registered
|
| 65 |
+
if not mimetypes.guess_type('.jpg')[0]:
|
| 66 |
+
mimetypes.add_type('image/jpeg', '.jpg')
|
| 67 |
+
mimetypes.add_type('image/jpeg', '.jpeg')
|
| 68 |
+
|
| 69 |
+
if not mimetypes.guess_type('.png')[0]:
|
| 70 |
+
mimetypes.add_type('image/png', '.png')
|
| 71 |
+
|
| 72 |
+
def get_supported_formats(self) -> list:
|
| 73 |
+
"""
|
| 74 |
+
Return list of supported image formats.
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
List of file extensions that the application accepts
|
| 78 |
+
"""
|
| 79 |
+
return self.supported_formats
|
| 80 |
+
|
| 81 |
+
def get_format_info(self) -> str:
|
| 82 |
+
"""
|
| 83 |
+
Return user-friendly format info message.
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
A human-readable string explaining supported formats
|
| 87 |
+
"""
|
| 88 |
+
return "Supported formats: JPG, JPEG, PNG."
|
| 89 |
+
|
| 90 |
+
def validate_file_extension(self, filename: str) -> Tuple[bool, str]:
|
| 91 |
+
"""
|
| 92 |
+
Validates if the file has an allowed extension.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
filename: The name of the uploaded file
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
Tuple containing (is_valid, message)
|
| 99 |
+
"""
|
| 100 |
+
if not filename:
|
| 101 |
+
return False, "No file provided."
|
| 102 |
+
|
| 103 |
+
# Get the file extension
|
| 104 |
+
file_ext = os.path.splitext(filename)[1].lower().replace('.', '')
|
| 105 |
+
|
| 106 |
+
if file_ext not in self.supported_formats:
|
| 107 |
+
return False, f"Invalid file format. {self.get_format_info()}"
|
| 108 |
+
|
| 109 |
+
return True, "Valid file format."
|
| 110 |
+
|
| 111 |
+
def validate_file_mime(self, file_content: bytes) -> Tuple[bool, str]:
|
| 112 |
+
"""
|
| 113 |
+
Validates the actual MIME type of the file by examining its content.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
file_content: The content of the file as bytes
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
Tuple containing (is_valid, message)
|
| 120 |
+
"""
|
| 121 |
+
if not file_content:
|
| 122 |
+
return False, "Could not read file content."
|
| 123 |
+
|
| 124 |
+
# Use imghdr to check the image format from the file content
|
| 125 |
+
img_format = imghdr.what(None, h=file_content)
|
| 126 |
+
|
| 127 |
+
# Log the detected format for debugging
|
| 128 |
+
logger.debug(f"Detected image format: {img_format}")
|
| 129 |
+
|
| 130 |
+
if img_format is None:
|
| 131 |
+
# Try alternative detection method
|
| 132 |
+
try:
|
| 133 |
+
# Try to decode with OpenCV to verify it's a valid image
|
| 134 |
+
img_array = np.frombuffer(file_content, np.uint8)
|
| 135 |
+
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
|
| 136 |
+
|
| 137 |
+
if img is None:
|
| 138 |
+
return False, "The file does not appear to be a valid image."
|
| 139 |
+
|
| 140 |
+
# If OpenCV can read it but imghdr couldn't identify format,
|
| 141 |
+
# we'll assume it's valid but with unknown format
|
| 142 |
+
return True, "Valid image (specific format not identified)."
|
| 143 |
+
|
| 144 |
+
except Exception as e:
|
| 145 |
+
logger.error(f"Error validating image content: {e}")
|
| 146 |
+
return False, "Error validating image content."
|
| 147 |
+
|
| 148 |
+
# Handle specific formats - normalize format names
|
| 149 |
+
if img_format == "jpeg":
|
| 150 |
+
img_format = "jpg"
|
| 151 |
+
|
| 152 |
+
if img_format not in ["jpg", "png"]:
|
| 153 |
+
return False, f"Unsupported image format: {img_format}. {self.get_format_info()}"
|
| 154 |
+
|
| 155 |
+
return True, f"Valid image in {img_format.upper()} format."
|
| 156 |
+
|
| 157 |
+
# STEP 3: Implementation of dimension validation
|
| 158 |
+
def validate_image_dimensions(self, img: np.ndarray) -> Dict[str, Any]:
|
| 159 |
+
"""
|
| 160 |
+
Validates image dimensions for facial processing.
|
| 161 |
+
Part of Step 3: Validation of dimensions and size.
|
| 162 |
+
|
| 163 |
+
This method analyzes the image dimensions to ensure they meet
|
| 164 |
+
the requirements for effective facial detection and analysis.
|
| 165 |
+
It checks for minimum/maximum sizes and aspect ratio issues.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
img: The image as a numpy array
|
| 169 |
+
|
| 170 |
+
Returns:
|
| 171 |
+
Dict with validation results including:
|
| 172 |
+
- valid: True if dimensions are acceptable, False otherwise
|
| 173 |
+
- width, height: Image dimensions
|
| 174 |
+
- warnings: List of warnings if dimensions are not optimal
|
| 175 |
+
- messages: List of informational messages
|
| 176 |
+
"""
|
| 177 |
+
result = {
|
| 178 |
+
"valid": True,
|
| 179 |
+
"width": 0,
|
| 180 |
+
"height": 0,
|
| 181 |
+
"aspect_ratio": 0,
|
| 182 |
+
"warnings": [],
|
| 183 |
+
"messages": []
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
if img is None:
|
| 187 |
+
result["valid"] = False
|
| 188 |
+
result["messages"].append("Could not process image to validate dimensions.")
|
| 189 |
+
return result
|
| 190 |
+
|
| 191 |
+
# Get dimensions
|
| 192 |
+
height, width = img.shape[:2]
|
| 193 |
+
aspect_ratio = width / height if height > 0 else 0
|
| 194 |
+
|
| 195 |
+
result["width"] = width
|
| 196 |
+
result["height"] = height
|
| 197 |
+
result["aspect_ratio"] = aspect_ratio
|
| 198 |
+
|
| 199 |
+
# Validate minimum size
|
| 200 |
+
if width < self.min_width or height < self.min_height:
|
| 201 |
+
result["valid"] = False
|
| 202 |
+
result["warnings"].append(f"Image too small: {width}x{height}. Minimum recommended size: {self.min_width}x{self.min_height}")
|
| 203 |
+
|
| 204 |
+
# Validate maximum size
|
| 205 |
+
if width > self.max_width or height > self.max_height:
|
| 206 |
+
result["warnings"].append(f"Image very large: {width}x{height}. May affect performance.")
|
| 207 |
+
|
| 208 |
+
# Check if dimensions are optimal
|
| 209 |
+
if self.min_width <= width < self.optimal_width or self.min_height <= height < self.optimal_height:
|
| 210 |
+
result["messages"].append(f"Acceptable but not optimal dimensions. Ideal: {self.optimal_width}x{self.optimal_height}")
|
| 211 |
+
elif width >= self.optimal_width and height >= self.optimal_height:
|
| 212 |
+
result["messages"].append(f"Optimal dimensions for facial analysis: {width}x{height}")
|
| 213 |
+
|
| 214 |
+
# Verify aspect ratio (very elongated can be problematic)
|
| 215 |
+
if aspect_ratio > 2.5 or aspect_ratio < 0.4:
|
| 216 |
+
result["warnings"].append(f"Unusual aspect ratio: {aspect_ratio:.2f}. May affect facial detection.")
|
| 217 |
+
|
| 218 |
+
return result
|
| 219 |
+
|
| 220 |
+
# STEP 3: Implementation of quality assessment
|
| 221 |
+
def check_image_quality(self, img: np.ndarray) -> Dict[str, Any]:
|
| 222 |
+
"""
|
| 223 |
+
Evaluates the overall image quality for facial detection.
|
| 224 |
+
Part of Step 3: Validation of dimensions and size.
|
| 225 |
+
|
| 226 |
+
This method performs various quality assessments including:
|
| 227 |
+
- Blurriness detection using Laplacian variance
|
| 228 |
+
- Brightness level measurement
|
| 229 |
+
- Contrast calculation
|
| 230 |
+
|
| 231 |
+
These metrics are combined into an overall quality score and
|
| 232 |
+
specific recommendations are provided for improving quality.
|
| 233 |
+
|
| 234 |
+
Args:
|
| 235 |
+
img: The image as a numpy array
|
| 236 |
+
|
| 237 |
+
Returns:
|
| 238 |
+
Dict with quality metrics and recommendations
|
| 239 |
+
"""
|
| 240 |
+
result = {
|
| 241 |
+
"quality_score": 0.0,
|
| 242 |
+
"is_blurry": False,
|
| 243 |
+
"brightness": 0.0,
|
| 244 |
+
"contrast": 0.0,
|
| 245 |
+
"warnings": [],
|
| 246 |
+
"suggestions": []
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
if img is None:
|
| 250 |
+
result["warnings"].append("Could not evaluate image quality.")
|
| 251 |
+
return result
|
| 252 |
+
|
| 253 |
+
# Convert to grayscale for some metrics
|
| 254 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if len(img.shape) == 3 else img
|
| 255 |
+
|
| 256 |
+
# Evaluate sharpness (using Laplacian variance)
|
| 257 |
+
laplacian_var = cv2.Laplacian(gray, cv2.CV_64F).var()
|
| 258 |
+
result["is_blurry"] = laplacian_var < 100
|
| 259 |
+
|
| 260 |
+
# Calculate brightness (pixel average)
|
| 261 |
+
brightness = np.mean(gray)
|
| 262 |
+
result["brightness"] = brightness / 255.0 # Normalized to 0-1
|
| 263 |
+
|
| 264 |
+
# Calculate contrast (pixel standard deviation)
|
| 265 |
+
contrast = np.std(gray)
|
| 266 |
+
result["contrast"] = contrast / 255.0 # Normalized to 0-1
|
| 267 |
+
|
| 268 |
+
# General quality evaluation (weighted combination)
|
| 269 |
+
sharpness_score = min(1.0, laplacian_var / 500)
|
| 270 |
+
brightness_score = 1.0 - abs(0.5 - result["brightness"]) * 2 # optimal near 0.5
|
| 271 |
+
contrast_score = min(1.0, result["contrast"] * 3) # higher contrast is better
|
| 272 |
+
|
| 273 |
+
# Total quality (0-1)
|
| 274 |
+
result["quality_score"] = (sharpness_score * 0.5 + brightness_score * 0.25 + contrast_score * 0.25)
|
| 275 |
+
|
| 276 |
+
# Generate warnings and suggestions based on metrics
|
| 277 |
+
if result["is_blurry"]:
|
| 278 |
+
result["warnings"].append("The image appears to be blurry.")
|
| 279 |
+
result["suggestions"].append("Use a sharper image to improve facial detection accuracy.")
|
| 280 |
+
|
| 281 |
+
if result["brightness"] < 0.2:
|
| 282 |
+
result["warnings"].append("The image is too dark.")
|
| 283 |
+
result["suggestions"].append("Try using a better illuminated image.")
|
| 284 |
+
elif result["brightness"] > 0.8:
|
| 285 |
+
result["warnings"].append("The image is too bright.")
|
| 286 |
+
result["suggestions"].append("Try using an image with more balanced lighting.")
|
| 287 |
+
|
| 288 |
+
if result["contrast"] < 0.1:
|
| 289 |
+
result["warnings"].append("The image has very low contrast.")
|
| 290 |
+
result["suggestions"].append("An image with higher contrast will improve facial feature detection.")
|
| 291 |
+
|
| 292 |
+
return result
|
| 293 |
+
|
| 294 |
+
def load_image(self, file) -> np.ndarray:
|
| 295 |
+
"""
|
| 296 |
+
Load image from file to numpy array.
|
| 297 |
+
|
| 298 |
+
Args:
|
| 299 |
+
file: The uploaded file object
|
| 300 |
+
|
| 301 |
+
Returns:
|
| 302 |
+
The image as a numpy array
|
| 303 |
+
"""
|
| 304 |
+
try:
|
| 305 |
+
# Read file content
|
| 306 |
+
file_bytes = np.frombuffer(file.read(), np.uint8)
|
| 307 |
+
# Reset file pointer for subsequent reads
|
| 308 |
+
file.seek(0)
|
| 309 |
+
|
| 310 |
+
# Decode image
|
| 311 |
+
img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
| 312 |
+
|
| 313 |
+
if img is None:
|
| 314 |
+
logger.error("Failed to decode image")
|
| 315 |
+
raise ValueError("Could not decode image. The file may be corrupted.")
|
| 316 |
+
|
| 317 |
+
return img
|
| 318 |
+
except Exception as e:
|
| 319 |
+
logger.error(f"Error loading image: {str(e)}")
|
| 320 |
+
raise ValueError(f"Error loading image: {str(e)}")
|
| 321 |
+
|
| 322 |
+
def load_image_from_path(self, path: str) -> np.ndarray:
|
| 323 |
+
"""
|
| 324 |
+
Load image from file path to numpy array.
|
| 325 |
+
|
| 326 |
+
Args:
|
| 327 |
+
path: Path to the image file
|
| 328 |
+
|
| 329 |
+
Returns:
|
| 330 |
+
The image as a numpy array
|
| 331 |
+
"""
|
| 332 |
+
try:
|
| 333 |
+
# Read image from file path
|
| 334 |
+
img = cv2.imread(path)
|
| 335 |
+
|
| 336 |
+
if img is None:
|
| 337 |
+
logger.error(f"Failed to load image from path: {path}")
|
| 338 |
+
raise ValueError("Could not load image. The file may be corrupted or not found.")
|
| 339 |
+
|
| 340 |
+
return img
|
| 341 |
+
except Exception as e:
|
| 342 |
+
logger.error(f"Error loading image from path: {str(e)}")
|
| 343 |
+
raise ValueError(f"Error loading image: {str(e)}")
|
| 344 |
+
|
| 345 |
+
# STEP 4: Image Preprocessing Methods
|
| 346 |
+
|
| 347 |
+
def preprocess_image(self, img: np.ndarray) -> Dict[str, Any]:
|
| 348 |
+
"""
|
| 349 |
+
Apply preprocessing to improve image quality for facial analysis.
|
| 350 |
+
Step 4: Implementation of preprocessing techniques to enhance image for better detection.
|
| 351 |
+
|
| 352 |
+
This method applies a series of image enhancements to make the image more suitable
|
| 353 |
+
for facial detection and emotion analysis:
|
| 354 |
+
1. Adjust brightness if needed
|
| 355 |
+
2. Adjust contrast for better feature visibility
|
| 356 |
+
3. Resize if dimensions are too large or too small
|
| 357 |
+
|
| 358 |
+
Args:
|
| 359 |
+
img: The image as a numpy array
|
| 360 |
+
|
| 361 |
+
Returns:
|
| 362 |
+
Dict containing:
|
| 363 |
+
- original_image: The original image
|
| 364 |
+
- processed_image: The enhanced image
|
| 365 |
+
- improvements: List of applied enhancements
|
| 366 |
+
"""
|
| 367 |
+
if img is None or img.size == 0:
|
| 368 |
+
raise ValueError("Invalid image provided for preprocessing")
|
| 369 |
+
|
| 370 |
+
# Make a copy of the original image
|
| 371 |
+
original = img.copy()
|
| 372 |
+
processed = img.copy()
|
| 373 |
+
improvements = []
|
| 374 |
+
|
| 375 |
+
# Get image quality metrics
|
| 376 |
+
quality = self.check_image_quality(processed)
|
| 377 |
+
brightness = quality.get('brightness', 0.5)
|
| 378 |
+
contrast = quality.get('contrast', 0.5)
|
| 379 |
+
|
| 380 |
+
# 1. Adjust brightness if needed
|
| 381 |
+
if brightness < 0.3:
|
| 382 |
+
# Image is too dark, increase brightness
|
| 383 |
+
processed = self.adjust_brightness(processed, factor=1.3)
|
| 384 |
+
improvements.append("Brightness increased by 30%")
|
| 385 |
+
elif brightness > 0.8:
|
| 386 |
+
# Image is too bright, decrease brightness
|
| 387 |
+
processed = self.adjust_brightness(processed, factor=0.7)
|
| 388 |
+
improvements.append("Brightness decreased by 30%")
|
| 389 |
+
|
| 390 |
+
# 2. Adjust contrast for better facial feature detection
|
| 391 |
+
# For facial analysis, slightly reducing contrast often helps
|
| 392 |
+
# with reducing shadows and improving feature detection
|
| 393 |
+
processed = self.adjust_contrast(processed, factor=0.7)
|
| 394 |
+
improvements.append("Contrast decreased by 30%")
|
| 395 |
+
|
| 396 |
+
# 3. Resize if dimensions are not optimal
|
| 397 |
+
h, w = processed.shape[:2]
|
| 398 |
+
if w > self.max_width or h > self.max_height:
|
| 399 |
+
# Scale down to optimal size
|
| 400 |
+
processed = self.resize_image(processed,
|
| 401 |
+
target_width=min(w, self.optimal_width),
|
| 402 |
+
target_height=min(h, self.optimal_height))
|
| 403 |
+
improvements.append(f"Resized to optimal dimensions")
|
| 404 |
+
|
| 405 |
+
# Return results of preprocessing
|
| 406 |
+
return {
|
| 407 |
+
"original_image": original,
|
| 408 |
+
"processed_image": processed,
|
| 409 |
+
"improvements": improvements
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
def resize_image(self, img: np.ndarray, target_width: int = None, target_height: int = None) -> np.ndarray:
|
| 413 |
+
"""
|
| 414 |
+
Resize image to target dimensions while maintaining aspect ratio.
|
| 415 |
+
|
| 416 |
+
Args:
|
| 417 |
+
img: The image as a numpy array
|
| 418 |
+
target_width: Target width for the resized image
|
| 419 |
+
target_height: Target height for the resized image
|
| 420 |
+
|
| 421 |
+
Returns:
|
| 422 |
+
Resized image as a numpy array
|
| 423 |
+
"""
|
| 424 |
+
if img is None or img.size == 0:
|
| 425 |
+
raise ValueError("Invalid image provided for resizing")
|
| 426 |
+
|
| 427 |
+
# Get current dimensions
|
| 428 |
+
h, w = img.shape[:2]
|
| 429 |
+
|
| 430 |
+
if target_width is None and target_height is None:
|
| 431 |
+
# If no target dimensions provided, use optimal values
|
| 432 |
+
target_width = self.optimal_width
|
| 433 |
+
target_height = self.optimal_height
|
| 434 |
+
|
| 435 |
+
# Calculate aspect ratio
|
| 436 |
+
aspect = w / h
|
| 437 |
+
|
| 438 |
+
# Determine new dimensions preserving aspect ratio
|
| 439 |
+
if target_width and not target_height:
|
| 440 |
+
# Width specified, calculate height
|
| 441 |
+
new_width = min(target_width, w)
|
| 442 |
+
new_height = int(new_width / aspect)
|
| 443 |
+
elif target_height and not target_width:
|
| 444 |
+
# Height specified, calculate width
|
| 445 |
+
new_height = min(target_height, h)
|
| 446 |
+
new_width = int(new_height * aspect)
|
| 447 |
+
else:
|
| 448 |
+
# Both dimensions specified, use the most constraining one
|
| 449 |
+
width_ratio = target_width / w
|
| 450 |
+
height_ratio = target_height / h
|
| 451 |
+
|
| 452 |
+
if width_ratio < height_ratio:
|
| 453 |
+
new_width = target_width
|
| 454 |
+
new_height = int(target_width / aspect)
|
| 455 |
+
else:
|
| 456 |
+
new_height = target_height
|
| 457 |
+
new_width = int(target_height * aspect)
|
| 458 |
+
|
| 459 |
+
# Resize the image
|
| 460 |
+
resized = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_AREA)
|
| 461 |
+
|
| 462 |
+
return resized
|
| 463 |
+
|
| 464 |
+
def adjust_brightness(self, img: np.ndarray, factor: float = 1.0) -> np.ndarray:
|
| 465 |
+
"""
|
| 466 |
+
Adjust image brightness.
|
| 467 |
+
|
| 468 |
+
Args:
|
| 469 |
+
img: The image as a numpy array
|
| 470 |
+
factor: Brightness adjustment factor (1.0 = no change, >1.0 = brighter, <1.0 = darker)
|
| 471 |
+
|
| 472 |
+
Returns:
|
| 473 |
+
Brightness-adjusted image as a numpy array
|
| 474 |
+
"""
|
| 475 |
+
if img is None or img.size == 0:
|
| 476 |
+
raise ValueError("Invalid image provided for brightness adjustment")
|
| 477 |
+
|
| 478 |
+
# Convert to HSV
|
| 479 |
+
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
| 480 |
+
|
| 481 |
+
# Scale the V channel
|
| 482 |
+
hsv[:,:,2] = np.clip(hsv[:,:,2] * factor, 0, 255).astype(np.uint8)
|
| 483 |
+
|
| 484 |
+
# Convert back to BGR
|
| 485 |
+
adjusted = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
|
| 486 |
+
|
| 487 |
+
return adjusted
|
| 488 |
+
|
| 489 |
+
def adjust_contrast(self, img: np.ndarray, factor: float = 1.0) -> np.ndarray:
|
| 490 |
+
"""
|
| 491 |
+
Adjust image contrast.
|
| 492 |
+
|
| 493 |
+
Args:
|
| 494 |
+
img: The image as a numpy array
|
| 495 |
+
factor: Contrast adjustment factor (1.0 = no change, >1.0 = more contrast, <1.0 = less contrast)
|
| 496 |
+
|
| 497 |
+
Returns:
|
| 498 |
+
Contrast-adjusted image as a numpy array
|
| 499 |
+
"""
|
| 500 |
+
if img is None or img.size == 0:
|
| 501 |
+
raise ValueError("Invalid image provided for contrast adjustment")
|
| 502 |
+
|
| 503 |
+
# Apply contrast adjustment: pixel = (pixel - 127.5) * factor + 127.5
|
| 504 |
+
mean = 127.5
|
| 505 |
+
adjusted = np.clip((img.astype(np.float32) - mean) * factor + mean, 0, 255).astype(np.uint8)
|
| 506 |
+
|
| 507 |
+
return adjusted
|
| 508 |
+
|
| 509 |
+
def save_processed_image(self, img: np.ndarray) -> str:
|
| 510 |
+
"""
|
| 511 |
+
Save a processed image to a temporary file.
|
| 512 |
+
|
| 513 |
+
Args:
|
| 514 |
+
img: The processed image as a numpy array
|
| 515 |
+
|
| 516 |
+
Returns:
|
| 517 |
+
Path to the saved temporary image file
|
| 518 |
+
"""
|
| 519 |
+
if img is None or img.size == 0:
|
| 520 |
+
raise ValueError("Invalid image provided for saving")
|
| 521 |
+
|
| 522 |
+
# Generate a unique filename
|
| 523 |
+
filename = f"processed_{uuid.uuid4()}.jpg"
|
| 524 |
+
filepath = os.path.join(self.temp_dir, filename)
|
| 525 |
+
|
| 526 |
+
# Save the image
|
| 527 |
+
success, buffer = cv2.imencode('.jpg', img)
|
| 528 |
+
if not success:
|
| 529 |
+
raise ValueError("Failed to encode image")
|
| 530 |
+
|
| 531 |
+
with open(filepath, 'wb') as f:
|
| 532 |
+
f.write(buffer)
|
| 533 |
+
|
| 534 |
+
logger.info(f"Saved processed image to {filepath}")
|
| 535 |
+
|
| 536 |
+
return filepath
|
| 537 |
+
|
| 538 |
+
def save_uploaded_image(self, img: np.ndarray) -> str:
|
| 539 |
+
"""
|
| 540 |
+
Save an uploaded image to a temporary file, ensuring correct color conversion.
|
| 541 |
+
|
| 542 |
+
Args:
|
| 543 |
+
img: The image as a numpy array in BGR format (OpenCV default)
|
| 544 |
+
|
| 545 |
+
Returns:
|
| 546 |
+
Path to the saved temporary image file
|
| 547 |
+
"""
|
| 548 |
+
if img is None or img.size == 0:
|
| 549 |
+
raise ValueError("Invalid image provided for saving")
|
| 550 |
+
|
| 551 |
+
# Create a unique filename
|
| 552 |
+
unique_id = str(uuid.uuid4())
|
| 553 |
+
temp_filename = f"uploaded_image_{unique_id}.jpg"
|
| 554 |
+
temp_path = os.path.join(self.temp_dir, temp_filename)
|
| 555 |
+
|
| 556 |
+
# Convert BGR to RGB for correct color display
|
| 557 |
+
if len(img.shape) == 3 and img.shape[2] == 3:
|
| 558 |
+
# Convert BGR to RGB for proper display
|
| 559 |
+
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 560 |
+
|
| 561 |
+
# Then convert RGB back to BGR for OpenCV to save correctly
|
| 562 |
+
img_to_save = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
|
| 563 |
+
else:
|
| 564 |
+
# If not a 3-channel image, just use as is
|
| 565 |
+
img_to_save = img
|
| 566 |
+
|
| 567 |
+
# Save the image with high quality
|
| 568 |
+
cv2.imwrite(temp_path, img_to_save, [cv2.IMWRITE_JPEG_QUALITY, 95])
|
| 569 |
+
|
| 570 |
+
logger.info(f"Saved uploaded image to {temp_path}")
|
| 571 |
+
return temp_path
|
| 572 |
+
|
| 573 |
+
def encode_image_to_base64(self, img: np.ndarray) -> str:
|
| 574 |
+
"""
|
| 575 |
+
Encode an image to base64 for embedding in HTML.
|
| 576 |
+
|
| 577 |
+
Args:
|
| 578 |
+
img: Image as numpy array (RGB format)
|
| 579 |
+
|
| 580 |
+
Returns:
|
| 581 |
+
Base64 encoded string
|
| 582 |
+
"""
|
| 583 |
+
try:
|
| 584 |
+
# Ensure the image is in RGB format
|
| 585 |
+
img_rgb = img
|
| 586 |
+
if len(img.shape) == 3 and img.shape[2] == 3:
|
| 587 |
+
# Only convert if it's a 3-channel image (skip grayscale)
|
| 588 |
+
# Check if already RGB by looking at a known color pattern
|
| 589 |
+
# This is just a heuristic, not foolproof
|
| 590 |
+
img_rgb = img
|
| 591 |
+
|
| 592 |
+
# Convert to PIL Image
|
| 593 |
+
pil_img = Image.fromarray(img_rgb)
|
| 594 |
+
|
| 595 |
+
# Save to bytes buffer
|
| 596 |
+
buffer = io.BytesIO()
|
| 597 |
+
pil_img.save(buffer, format="JPEG")
|
| 598 |
+
buffer.seek(0)
|
| 599 |
+
|
| 600 |
+
# Encode to base64
|
| 601 |
+
import base64
|
| 602 |
+
img_str = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
| 603 |
+
|
| 604 |
+
return img_str
|
| 605 |
+
|
| 606 |
+
except Exception as e:
|
| 607 |
+
logger.error(f"Error encoding image to base64: {e}")
|
| 608 |
+
# Return empty string on error
|
| 609 |
+
return ""
|
| 610 |
+
|
| 611 |
+
def validate_image_file(self, file, check_content: bool = True, check_dimensions: bool = True) -> Dict[str, Any]:
|
| 612 |
+
"""
|
| 613 |
+
Complete validation of an image file, checking format, dimensions and quality.
|
| 614 |
+
Combines Steps 2 & 3: Interface design, format validation,
|
| 615 |
+
and dimension/quality validation.
|
| 616 |
+
|
| 617 |
+
Args:
|
| 618 |
+
file: The uploaded file object from Streamlit
|
| 619 |
+
check_content: Whether to also validate the file content
|
| 620 |
+
check_dimensions: Whether to validate image dimensions and quality
|
| 621 |
+
|
| 622 |
+
Returns:
|
| 623 |
+
Dict with validation results and messages
|
| 624 |
+
"""
|
| 625 |
+
result = {
|
| 626 |
+
"valid": False,
|
| 627 |
+
"messages": [],
|
| 628 |
+
"warnings": [],
|
| 629 |
+
"file_info": {},
|
| 630 |
+
"dimensions": {},
|
| 631 |
+
"quality": {}
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
if file is None:
|
| 635 |
+
result["messages"].append("No file selected.")
|
| 636 |
+
return result
|
| 637 |
+
|
| 638 |
+
# Validate file extension
|
| 639 |
+
ext_valid, ext_msg = self.validate_file_extension(file.name)
|
| 640 |
+
result["messages"].append(ext_msg)
|
| 641 |
+
|
| 642 |
+
if not ext_valid:
|
| 643 |
+
return result
|
| 644 |
+
|
| 645 |
+
# Check file size
|
| 646 |
+
file_size = len(file.getvalue())
|
| 647 |
+
result["file_info"]["size"] = file_size
|
| 648 |
+
result["file_info"]["name"] = file.name
|
| 649 |
+
|
| 650 |
+
if file_size > self.max_file_size:
|
| 651 |
+
result["messages"].append(f"File is too large. Maximum size: {self.max_file_size/1024/1024:.1f}MB")
|
| 652 |
+
return result
|
| 653 |
+
|
| 654 |
+
# Validate MIME type if requested
|
| 655 |
+
if check_content:
|
| 656 |
+
mime_valid, mime_msg = self.validate_file_mime(file.getvalue())
|
| 657 |
+
result["messages"].append(mime_msg)
|
| 658 |
+
|
| 659 |
+
if not mime_valid:
|
| 660 |
+
return result
|
| 661 |
+
|
| 662 |
+
# STEP 3: Validate dimensions and quality if requested
|
| 663 |
+
if check_dimensions:
|
| 664 |
+
try:
|
| 665 |
+
# Convert file to image for analysis
|
| 666 |
+
file_bytes = np.asarray(bytearray(file.getvalue()), dtype=np.uint8)
|
| 667 |
+
img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
| 668 |
+
|
| 669 |
+
# Validate dimensions
|
| 670 |
+
dimension_results = self.validate_image_dimensions(img)
|
| 671 |
+
result["dimensions"] = dimension_results
|
| 672 |
+
|
| 673 |
+
# Add dimension warnings to main result
|
| 674 |
+
result["warnings"].extend(dimension_results.get("warnings", []))
|
| 675 |
+
result["messages"].extend(dimension_results.get("messages", []))
|
| 676 |
+
|
| 677 |
+
# If dimensions are invalid, mark the entire result as invalid
|
| 678 |
+
if not dimension_results.get("valid", True):
|
| 679 |
+
result["valid"] = False
|
| 680 |
+
return result
|
| 681 |
+
|
| 682 |
+
# Check quality
|
| 683 |
+
quality_results = self.check_image_quality(img)
|
| 684 |
+
result["quality"] = quality_results
|
| 685 |
+
|
| 686 |
+
# Add quality warnings
|
| 687 |
+
result["warnings"].extend(quality_results.get("warnings", []))
|
| 688 |
+
|
| 689 |
+
# Add suggestions as messages
|
| 690 |
+
for suggestion in quality_results.get("suggestions", []):
|
| 691 |
+
result["messages"].append(suggestion)
|
| 692 |
+
|
| 693 |
+
except Exception as e:
|
| 694 |
+
logger.error(f"Error validating image dimensions/quality: {e}")
|
| 695 |
+
result["warnings"].append(f"Could not validate dimensions or quality: {str(e)}")
|
| 696 |
+
|
| 697 |
+
# All validations passed
|
| 698 |
+
result["valid"] = True
|
| 699 |
+
return result
|
| 700 |
+
|
| 701 |
+
def setup_image_upload_interface(self, st):
|
| 702 |
+
"""
|
| 703 |
+
Implements the image upload interface according to specification.
|
| 704 |
+
Combines Steps 1, 2 & 3: Interface design, format validation,
|
| 705 |
+
and dimension/quality validation.
|
| 706 |
+
|
| 707 |
+
Args:
|
| 708 |
+
st: The Streamlit module instance
|
| 709 |
+
|
| 710 |
+
Returns:
|
| 711 |
+
The uploaded_file object if valid, None otherwise
|
| 712 |
+
"""
|
| 713 |
+
st.subheader("1. Image Upload and Validation")
|
| 714 |
+
|
| 715 |
+
# Clear information about supported formats
|
| 716 |
+
st.info(f"Please upload an image for facial analysis. {self.get_format_info()}")
|
| 717 |
+
|
| 718 |
+
# Implement the file selection component
|
| 719 |
+
uploaded_file = st.file_uploader(
|
| 720 |
+
"Select an image",
|
| 721 |
+
type=self.get_supported_formats(),
|
| 722 |
+
help="Use clear, well-lit images for best results."
|
| 723 |
+
)
|
| 724 |
+
|
| 725 |
+
# Implement validation if a file is uploaded
|
| 726 |
+
if uploaded_file is not None:
|
| 727 |
+
with st.spinner("Validating image..."):
|
| 728 |
+
# Complete validation including dimensions and quality (Step 3)
|
| 729 |
+
validation_result = self.validate_image_file(
|
| 730 |
+
uploaded_file,
|
| 731 |
+
check_content=True,
|
| 732 |
+
check_dimensions=True
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
# Display validation results with a space-efficient two-column layout
|
| 736 |
+
if validation_result["valid"]:
|
| 737 |
+
# Basic file information
|
| 738 |
+
file_info = validation_result["file_info"]
|
| 739 |
+
size_kb = file_info["size"] / 1024
|
| 740 |
+
st.success(f"✅ Valid image: {file_info['name']} ({size_kb:.1f} KB)")
|
| 741 |
+
|
| 742 |
+
# Create two columns for better space usage
|
| 743 |
+
image_col, info_col = st.columns([1, 1])
|
| 744 |
+
|
| 745 |
+
with image_col:
|
| 746 |
+
# Display the image
|
| 747 |
+
try:
|
| 748 |
+
# Convert the uploaded file to a format that can be displayed
|
| 749 |
+
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
| 750 |
+
img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
| 751 |
+
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 752 |
+
|
| 753 |
+
# Reset file pointer for subsequent reads
|
| 754 |
+
uploaded_file.seek(0)
|
| 755 |
+
|
| 756 |
+
# Display the image in the left column
|
| 757 |
+
st.image(img_rgb, caption="Uploaded Image", use_column_width=True)
|
| 758 |
+
except Exception as e:
|
| 759 |
+
st.warning(f"Could not display image preview: {str(e)}")
|
| 760 |
+
|
| 761 |
+
with info_col:
|
| 762 |
+
# Create a compact display for all image information
|
| 763 |
+
st.markdown("### Image Analysis")
|
| 764 |
+
|
| 765 |
+
# STEP 3: Show dimension information compactly
|
| 766 |
+
if "dimensions" in validation_result and validation_result["dimensions"]:
|
| 767 |
+
dims = validation_result["dimensions"]
|
| 768 |
+
if "width" in dims and "height" in dims:
|
| 769 |
+
dim_status = "✅ Optimal" if "optimal" in " ".join(dims.get("messages", [])).lower() else "ℹ️ Acceptable"
|
| 770 |
+
st.write(f"**Dimensions:** {dims['width']}x{dims['height']} pixels ({dim_status})")
|
| 771 |
+
|
| 772 |
+
# STEP 3: Show quality information with a more compact UI
|
| 773 |
+
if "quality" in validation_result and validation_result["quality"]:
|
| 774 |
+
quality = validation_result["quality"]
|
| 775 |
+
if "quality_score" in quality:
|
| 776 |
+
# Convert quality score to percentage
|
| 777 |
+
quality_percent = int(quality["quality_score"] * 100)
|
| 778 |
+
|
| 779 |
+
# Show quality with a more compact display
|
| 780 |
+
quality_label = "Good" if quality_percent >= 70 else "Fair" if quality_percent >= 40 else "Low"
|
| 781 |
+
|
| 782 |
+
# Display quality score with progress bar
|
| 783 |
+
st.write(f"**Image Quality:** {quality_percent}% ({quality_label})")
|
| 784 |
+
st.progress(quality_percent/100)
|
| 785 |
+
|
| 786 |
+
# Show metrics in a more compact way
|
| 787 |
+
metrics_cols = st.columns(3)
|
| 788 |
+
with metrics_cols[0]:
|
| 789 |
+
st.metric("Sharpness", "Good" if not quality["is_blurry"] else "Poor")
|
| 790 |
+
with metrics_cols[1]:
|
| 791 |
+
st.metric("Brightness", f"{int(quality['brightness']*100)}%")
|
| 792 |
+
with metrics_cols[2]:
|
| 793 |
+
st.metric("Contrast", f"{int(quality['contrast']*100)}%")
|
| 794 |
+
|
| 795 |
+
# Show any warnings in a compact expandable section
|
| 796 |
+
if validation_result.get("warnings", []):
|
| 797 |
+
with st.expander("⚠️ Recommendations"):
|
| 798 |
+
for i, warning in enumerate(validation_result.get("warnings", [])):
|
| 799 |
+
st.write(f"{i+1}. {warning}")
|
| 800 |
+
else:
|
| 801 |
+
# Handle invalid images - show error messages
|
| 802 |
+
# Separate error messages and informational messages
|
| 803 |
+
error_messages = []
|
| 804 |
+
info_messages = []
|
| 805 |
+
|
| 806 |
+
for message in validation_result["messages"]:
|
| 807 |
+
if "invalid" in message.lower() or "unsupported" in message.lower() or "error" in message.lower():
|
| 808 |
+
error_messages.append(message)
|
| 809 |
+
elif "valid" in message.lower():
|
| 810 |
+
info_messages.append(message)
|
| 811 |
+
|
| 812 |
+
# Display error messages
|
| 813 |
+
for msg in error_messages:
|
| 814 |
+
st.error(f"❌ {msg}")
|
| 815 |
+
|
| 816 |
+
# If there are errors, show instructions for correction
|
| 817 |
+
if error_messages:
|
| 818 |
+
st.warning("""
|
| 819 |
+
Please select an image that meets the following requirements:
|
| 820 |
+
- Format: JPG, JPEG or PNG
|
| 821 |
+
- Maximum size: 10MB
|
| 822 |
+
- Minimum resolution: 320x240 pixels
|
| 823 |
+
""")
|
| 824 |
+
|
| 825 |
+
# If there are critical errors, return None to prevent further processing
|
| 826 |
+
return None
|
| 827 |
+
|
| 828 |
+
return uploaded_file
|
services/model_service.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Model service for EmotionMirror application.
|
| 3 |
+
Handles loading and management of YOLO models.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
import logging
|
| 7 |
+
from typing import Dict, Any, Optional
|
| 8 |
+
import torch
|
| 9 |
+
from ultralytics import YOLO
|
| 10 |
+
|
| 11 |
+
from config import settings
|
| 12 |
+
|
| 13 |
+
# Configure logging
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
class ModelService:
|
| 17 |
+
"""Service for managing YOLO models"""
|
| 18 |
+
|
| 19 |
+
_instance = None
|
| 20 |
+
_models = {}
|
| 21 |
+
|
| 22 |
+
def __new__(cls):
|
| 23 |
+
"""Singleton pattern implementation"""
|
| 24 |
+
if cls._instance is None:
|
| 25 |
+
cls._instance = super(ModelService, cls).__new__(cls)
|
| 26 |
+
cls._instance._initialized = False
|
| 27 |
+
return cls._instance
|
| 28 |
+
|
| 29 |
+
def __init__(self):
|
| 30 |
+
"""Initialize the model service"""
|
| 31 |
+
if self._initialized:
|
| 32 |
+
return
|
| 33 |
+
|
| 34 |
+
self._initialized = True
|
| 35 |
+
logger.info("Initializing ModelService")
|
| 36 |
+
|
| 37 |
+
def load_model(self, model_type: str) -> Optional[YOLO]:
|
| 38 |
+
"""
|
| 39 |
+
Load a YOLO model based on the specified type.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
model_type: Type of model to load (detection, segmentation, pose, classification)
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
YOLO model instance or None if loading fails
|
| 46 |
+
"""
|
| 47 |
+
# Check if model is already loaded
|
| 48 |
+
if model_type in self._models:
|
| 49 |
+
logger.info(f"Using cached model for {model_type}")
|
| 50 |
+
return self._models[model_type]
|
| 51 |
+
|
| 52 |
+
# Check if model type is valid
|
| 53 |
+
if model_type not in settings.MODEL_MAPPING:
|
| 54 |
+
logger.error(f"Invalid model type: {model_type}")
|
| 55 |
+
return None
|
| 56 |
+
|
| 57 |
+
try:
|
| 58 |
+
model_file = settings.MODEL_MAPPING[model_type]
|
| 59 |
+
logger.info(f"Loading {model_type} model: {model_file}")
|
| 60 |
+
|
| 61 |
+
# Try to load from local models directory first
|
| 62 |
+
model_path = os.path.join(settings.MODELS_DIR, model_file)
|
| 63 |
+
|
| 64 |
+
if os.path.exists(model_path):
|
| 65 |
+
logger.info(f"Loading model from local path: {model_path}")
|
| 66 |
+
model = YOLO(model_path)
|
| 67 |
+
else:
|
| 68 |
+
# Fall back to downloading from Ultralytics
|
| 69 |
+
logger.info(f"Model not found locally, downloading from Ultralytics: {model_file}")
|
| 70 |
+
model = YOLO(model_file)
|
| 71 |
+
|
| 72 |
+
# Cache the loaded model
|
| 73 |
+
self._models[model_type] = model
|
| 74 |
+
return model
|
| 75 |
+
|
| 76 |
+
except Exception as e:
|
| 77 |
+
logger.error(f"Error loading model {model_type}: {str(e)}")
|
| 78 |
+
return None
|
| 79 |
+
|
| 80 |
+
def get_model_info(self, model_type: str) -> Dict[str, Any]:
|
| 81 |
+
"""
|
| 82 |
+
Get information about a specific model.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
model_type: Type of model
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
Dictionary with model information
|
| 89 |
+
"""
|
| 90 |
+
if model_type not in settings.MODEL_MAPPING:
|
| 91 |
+
return {"error": f"Invalid model type: {model_type}"}
|
| 92 |
+
|
| 93 |
+
model_file = settings.MODEL_MAPPING[model_type]
|
| 94 |
+
model_path = os.path.join(settings.MODELS_DIR, model_file)
|
| 95 |
+
|
| 96 |
+
return {
|
| 97 |
+
"type": model_type,
|
| 98 |
+
"file": model_file,
|
| 99 |
+
"available_locally": os.path.exists(model_path),
|
| 100 |
+
"is_loaded": model_type in self._models
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
def get_available_models(self) -> Dict[str, Dict[str, Any]]:
|
| 104 |
+
"""
|
| 105 |
+
Get information about all available models.
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
Dictionary with information about all available models
|
| 109 |
+
"""
|
| 110 |
+
models_info = {}
|
| 111 |
+
for model_type in settings.MODEL_MAPPING:
|
| 112 |
+
models_info[model_type] = self.get_model_info(model_type)
|
| 113 |
+
return models_info
|
utils/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Utilities package for EmotionMirror application.
|
| 3 |
+
Contains helper functions and utility classes.
|
| 4 |
+
"""
|
utils/download_models.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Script para descargar modelos adicionales necesarios para EmotionMirror
|
| 3 |
+
"""
|
| 4 |
+
import os
|
| 5 |
+
import sys
|
| 6 |
+
import logging
|
| 7 |
+
import requests
|
| 8 |
+
import bz2
|
| 9 |
+
import shutil
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
|
| 12 |
+
# Configurar logging
|
| 13 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
# URL del modelo de landmarks faciales
|
| 17 |
+
LANDMARKS_MODEL_URL = "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"
|
| 18 |
+
|
| 19 |
+
def get_project_root():
|
| 20 |
+
"""Obtiene la ruta raíz del proyecto"""
|
| 21 |
+
path = Path(__file__).parent.parent
|
| 22 |
+
return path
|
| 23 |
+
|
| 24 |
+
def create_models_dir():
|
| 25 |
+
"""Crea el directorio de modelos si no existe"""
|
| 26 |
+
root = get_project_root()
|
| 27 |
+
models_dir = root / "models"
|
| 28 |
+
models_dir.mkdir(exist_ok=True)
|
| 29 |
+
return models_dir
|
| 30 |
+
|
| 31 |
+
def download_file(url, destination):
|
| 32 |
+
"""Descarga un archivo desde una URL"""
|
| 33 |
+
try:
|
| 34 |
+
logger.info(f"Descargando {url} en {destination}")
|
| 35 |
+
response = requests.get(url, stream=True)
|
| 36 |
+
response.raise_for_status() # Verificar si hay errores
|
| 37 |
+
|
| 38 |
+
with open(destination, 'wb') as f:
|
| 39 |
+
for chunk in response.iter_content(chunk_size=8192):
|
| 40 |
+
if chunk:
|
| 41 |
+
f.write(chunk)
|
| 42 |
+
|
| 43 |
+
logger.info(f"Descarga completada: {destination}")
|
| 44 |
+
return True
|
| 45 |
+
except Exception as e:
|
| 46 |
+
logger.error(f"Error descargando archivo: {e}")
|
| 47 |
+
return False
|
| 48 |
+
|
| 49 |
+
def extract_bz2(source, destination):
|
| 50 |
+
"""Extrae un archivo .bz2 a un destino"""
|
| 51 |
+
try:
|
| 52 |
+
logger.info(f"Descomprimiendo {source} en {destination}")
|
| 53 |
+
with bz2.BZ2File(source, 'rb') as source_file:
|
| 54 |
+
with open(destination, 'wb') as dest_file:
|
| 55 |
+
shutil.copyfileobj(source_file, dest_file)
|
| 56 |
+
|
| 57 |
+
logger.info(f"Descompresión completada: {destination}")
|
| 58 |
+
return True
|
| 59 |
+
except Exception as e:
|
| 60 |
+
logger.error(f"Error descomprimiendo archivo: {e}")
|
| 61 |
+
return False
|
| 62 |
+
|
| 63 |
+
def download_landmarks_model():
|
| 64 |
+
"""Descarga y configura el modelo de landmarks faciales"""
|
| 65 |
+
models_dir = create_models_dir()
|
| 66 |
+
model_compressed = models_dir / "shape_predictor_68_face_landmarks.dat.bz2"
|
| 67 |
+
model_path = models_dir / "shape_predictor_68_face_landmarks.dat"
|
| 68 |
+
|
| 69 |
+
# Verificar si el modelo ya existe
|
| 70 |
+
if model_path.exists():
|
| 71 |
+
logger.info(f"El modelo ya existe en {model_path}")
|
| 72 |
+
return True
|
| 73 |
+
|
| 74 |
+
# Descargar el modelo comprimido
|
| 75 |
+
if not download_file(LANDMARKS_MODEL_URL, model_compressed):
|
| 76 |
+
return False
|
| 77 |
+
|
| 78 |
+
# Extraer el modelo
|
| 79 |
+
if not extract_bz2(model_compressed, model_path):
|
| 80 |
+
return False
|
| 81 |
+
|
| 82 |
+
# Eliminar el archivo comprimido después de extraerlo
|
| 83 |
+
try:
|
| 84 |
+
os.remove(model_compressed)
|
| 85 |
+
logger.info(f"Archivo comprimido eliminado: {model_compressed}")
|
| 86 |
+
except Exception as e:
|
| 87 |
+
logger.warning(f"No se pudo eliminar el archivo comprimido: {e}")
|
| 88 |
+
|
| 89 |
+
return True
|
| 90 |
+
|
| 91 |
+
if __name__ == "__main__":
|
| 92 |
+
logger.info("Iniciando descarga de modelos adicionales...")
|
| 93 |
+
|
| 94 |
+
if download_landmarks_model():
|
| 95 |
+
logger.info("Modelo de landmarks faciales descargado correctamente.")
|
| 96 |
+
else:
|
| 97 |
+
logger.error("Error descargando el modelo de landmarks faciales.")
|
| 98 |
+
sys.exit(1)
|
| 99 |
+
|
| 100 |
+
logger.info("Todos los modelos han sido descargados correctamente.")
|
utils/export_utils.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Export Utilities for EmotionMirror Application
|
| 3 |
+
|
| 4 |
+
This module provides functions for exporting analysis data to various formats,
|
| 5 |
+
including CSV, JSON, and visual reports.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import csv
|
| 10 |
+
import json
|
| 11 |
+
import logging
|
| 12 |
+
from typing import Dict, List, Any, Optional
|
| 13 |
+
from datetime import datetime
|
| 14 |
+
import pandas as pd
|
| 15 |
+
import base64
|
| 16 |
+
from io import BytesIO, StringIO
|
| 17 |
+
|
| 18 |
+
# Configure logging
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
def export_to_json(data: Dict[str, Any], filepath: Optional[str] = None) -> str:
|
| 22 |
+
"""
|
| 23 |
+
Export data to JSON format.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
data: Data to export
|
| 27 |
+
filepath: Optional path to save the JSON file
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
JSON string if filepath is None, else filepath where saved
|
| 31 |
+
"""
|
| 32 |
+
try:
|
| 33 |
+
json_str = json.dumps(data, indent=4)
|
| 34 |
+
|
| 35 |
+
if filepath:
|
| 36 |
+
with open(filepath, 'w', encoding='utf-8') as f:
|
| 37 |
+
f.write(json_str)
|
| 38 |
+
logger.info(f"Data exported to JSON file: {filepath}")
|
| 39 |
+
return filepath
|
| 40 |
+
|
| 41 |
+
return json_str
|
| 42 |
+
|
| 43 |
+
except Exception as e:
|
| 44 |
+
logger.error(f"Error exporting data to JSON: {e}")
|
| 45 |
+
raise
|
| 46 |
+
|
| 47 |
+
def export_to_csv(data: Dict[str, Any], filepath: Optional[str] = None) -> str:
|
| 48 |
+
"""
|
| 49 |
+
Export analysis data to CSV format.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
data: Analysis data to export
|
| 53 |
+
filepath: Optional path to save the CSV file
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
CSV string if filepath is None, else filepath where saved
|
| 57 |
+
"""
|
| 58 |
+
try:
|
| 59 |
+
# Flatten the data structure for CSV format
|
| 60 |
+
rows = []
|
| 61 |
+
|
| 62 |
+
for analysis in data.get('analyses', []):
|
| 63 |
+
base_row = {
|
| 64 |
+
'analysis_id': analysis.get('id'),
|
| 65 |
+
'session_id': analysis.get('session_id'),
|
| 66 |
+
'timestamp': analysis.get('timestamp'),
|
| 67 |
+
'image_path': analysis.get('image_path'),
|
| 68 |
+
'face_count': analysis.get('face_count')
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
# Add tags if present
|
| 72 |
+
if analysis.get('tags'):
|
| 73 |
+
base_row['tags'] = ','.join(analysis['tags'])
|
| 74 |
+
|
| 75 |
+
# Add face-specific data
|
| 76 |
+
for i, face in enumerate(analysis.get('faces', [])):
|
| 77 |
+
row = base_row.copy()
|
| 78 |
+
row['face_index'] = i
|
| 79 |
+
row['emotion'] = face.get('emotion')
|
| 80 |
+
row['confidence'] = face.get('confidence')
|
| 81 |
+
|
| 82 |
+
# Add feature values
|
| 83 |
+
for feature, value in face.get('features', {}).items():
|
| 84 |
+
row[f'feature_{feature}'] = value
|
| 85 |
+
|
| 86 |
+
# Add emotion values
|
| 87 |
+
for emotion, value in face.get('emotions', {}).items():
|
| 88 |
+
row[f'emotion_{emotion}'] = value
|
| 89 |
+
|
| 90 |
+
rows.append(row)
|
| 91 |
+
|
| 92 |
+
if not rows:
|
| 93 |
+
logger.warning("No data to export to CSV")
|
| 94 |
+
return "" if filepath is None else filepath
|
| 95 |
+
|
| 96 |
+
# Create a DataFrame and export to CSV
|
| 97 |
+
df = pd.DataFrame(rows)
|
| 98 |
+
|
| 99 |
+
if filepath:
|
| 100 |
+
df.to_csv(filepath, index=False)
|
| 101 |
+
logger.info(f"Data exported to CSV file: {filepath}")
|
| 102 |
+
return filepath
|
| 103 |
+
|
| 104 |
+
# Return CSV string if no filepath provided
|
| 105 |
+
csv_buffer = StringIO()
|
| 106 |
+
df.to_csv(csv_buffer, index=False)
|
| 107 |
+
return csv_buffer.getvalue()
|
| 108 |
+
|
| 109 |
+
except Exception as e:
|
| 110 |
+
logger.error(f"Error exporting data to CSV: {e}")
|
| 111 |
+
raise
|
| 112 |
+
|
| 113 |
+
def get_download_link(content: str, filename: str, mimetype: str) -> str:
|
| 114 |
+
"""
|
| 115 |
+
Generate a download link for the content.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
content: String content to download
|
| 119 |
+
filename: Name for the downloaded file
|
| 120 |
+
mimetype: MIME type of the file
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
HTML download link
|
| 124 |
+
"""
|
| 125 |
+
try:
|
| 126 |
+
b64 = base64.b64encode(content.encode()).decode()
|
| 127 |
+
href = f'data:{mimetype};base64,{b64}'
|
| 128 |
+
return f'<a href="{href}" download="{filename}" class="download-button">Download {filename}</a>'
|
| 129 |
+
|
| 130 |
+
except Exception as e:
|
| 131 |
+
logger.error(f"Error creating download link: {e}")
|
| 132 |
+
return ""
|
| 133 |
+
|
| 134 |
+
def generate_emotion_summary(data: Dict[str, Any]) -> Dict[str, Any]:
|
| 135 |
+
"""
|
| 136 |
+
Generate a summary of emotion data across multiple analyses.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
data: Dictionary of analysis data
|
| 140 |
+
|
| 141 |
+
Returns:
|
| 142 |
+
Dictionary with summarized emotion data
|
| 143 |
+
"""
|
| 144 |
+
try:
|
| 145 |
+
# Initialize counters
|
| 146 |
+
emotion_counts = {}
|
| 147 |
+
total_faces = 0
|
| 148 |
+
emotion_confidence = {}
|
| 149 |
+
|
| 150 |
+
# Process each analysis
|
| 151 |
+
for analysis in data.get('analyses', []):
|
| 152 |
+
for face in analysis.get('faces', []):
|
| 153 |
+
# Count primary emotions
|
| 154 |
+
emotion = face.get('emotion')
|
| 155 |
+
if emotion:
|
| 156 |
+
emotion_counts[emotion] = emotion_counts.get(emotion, 0) + 1
|
| 157 |
+
|
| 158 |
+
# Track confidence values
|
| 159 |
+
if emotion not in emotion_confidence:
|
| 160 |
+
emotion_confidence[emotion] = []
|
| 161 |
+
emotion_confidence[emotion].append(face.get('confidence', 0))
|
| 162 |
+
|
| 163 |
+
total_faces += 1
|
| 164 |
+
|
| 165 |
+
# Calculate statistics
|
| 166 |
+
if total_faces > 0:
|
| 167 |
+
# Calculate percentages
|
| 168 |
+
emotion_percentages = {
|
| 169 |
+
emotion: (count / total_faces) * 100
|
| 170 |
+
for emotion, count in emotion_counts.items()
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
# Calculate average confidence per emotion
|
| 174 |
+
avg_confidence = {
|
| 175 |
+
emotion: sum(values) / len(values)
|
| 176 |
+
for emotion, values in emotion_confidence.items() if values
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
return {
|
| 180 |
+
'total_analyses': len(data.get('analyses', [])),
|
| 181 |
+
'total_faces': total_faces,
|
| 182 |
+
'emotion_counts': emotion_counts,
|
| 183 |
+
'emotion_percentages': emotion_percentages,
|
| 184 |
+
'avg_confidence': avg_confidence
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
return {
|
| 188 |
+
'total_analyses': len(data.get('analyses', [])),
|
| 189 |
+
'total_faces': 0,
|
| 190 |
+
'emotion_counts': {},
|
| 191 |
+
'emotion_percentages': {},
|
| 192 |
+
'avg_confidence': {}
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
except Exception as e:
|
| 196 |
+
logger.error(f"Error generating emotion summary: {e}")
|
| 197 |
+
return {
|
| 198 |
+
'error': str(e),
|
| 199 |
+
'total_analyses': 0,
|
| 200 |
+
'total_faces': 0
|
| 201 |
+
}
|
utils/file_utils.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
File utility functions for EmotionMirror application.
|
| 3 |
+
"""
|
| 4 |
+
import os
|
| 5 |
+
import uuid
|
| 6 |
+
import logging
|
| 7 |
+
from typing import Set, Optional, Tuple
|
| 8 |
+
|
| 9 |
+
# Configure logging
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
def allowed_file(filename: str, allowed_extensions: Set[str]) -> bool:
|
| 13 |
+
"""
|
| 14 |
+
Check if a file has an allowed extension.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
filename: Name of the file to check
|
| 18 |
+
allowed_extensions: Set of allowed extensions
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
True if file extension is allowed, False otherwise
|
| 22 |
+
"""
|
| 23 |
+
if '.' not in filename:
|
| 24 |
+
return False
|
| 25 |
+
extension = filename.rsplit('.', 1)[1].lower()
|
| 26 |
+
return extension in allowed_extensions
|
| 27 |
+
|
| 28 |
+
def generate_unique_filename(filename: str) -> str:
|
| 29 |
+
"""
|
| 30 |
+
Generate a unique filename preserving the original extension.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
filename: Original filename
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
Unique filename with original extension
|
| 37 |
+
"""
|
| 38 |
+
if '.' not in filename:
|
| 39 |
+
return f"{uuid.uuid4().hex}"
|
| 40 |
+
|
| 41 |
+
extension = filename.rsplit('.', 1)[1].lower()
|
| 42 |
+
return f"{uuid.uuid4().hex}.{extension}"
|
| 43 |
+
|
| 44 |
+
def save_uploaded_file(uploaded_file, upload_dir: str) -> Tuple[bool, str, Optional[str]]:
|
| 45 |
+
"""
|
| 46 |
+
Save an uploaded file to the specified directory.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
uploaded_file: File object from Streamlit
|
| 50 |
+
upload_dir: Directory to save the file
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
Tuple of (success, message, file_path)
|
| 54 |
+
"""
|
| 55 |
+
try:
|
| 56 |
+
os.makedirs(upload_dir, exist_ok=True)
|
| 57 |
+
|
| 58 |
+
# Generate unique filename
|
| 59 |
+
unique_filename = generate_unique_filename(uploaded_file.name)
|
| 60 |
+
file_path = os.path.join(upload_dir, unique_filename)
|
| 61 |
+
|
| 62 |
+
# Save the file
|
| 63 |
+
with open(file_path, "wb") as f:
|
| 64 |
+
f.write(uploaded_file.getbuffer())
|
| 65 |
+
|
| 66 |
+
return True, f"File saved successfully as {unique_filename}", file_path
|
| 67 |
+
except Exception as e:
|
| 68 |
+
logger.error(f"Error saving file: {e}")
|
| 69 |
+
return False, f"Error saving file: {str(e)}", None
|
utils/image_visualization.py
ADDED
|
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Image Visualization Module for EmotionMirror application.
|
| 3 |
+
|
| 4 |
+
This module contains functions for visualizing images with interactive controls
|
| 5 |
+
such as zoom, pan, and reset functionality.
|
| 6 |
+
"""
|
| 7 |
+
import streamlit as st
|
| 8 |
+
import numpy as np
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import io
|
| 11 |
+
import logging
|
| 12 |
+
import base64
|
| 13 |
+
from typing import Dict, Any, Tuple, Optional
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
def get_image_download_link(img, filename="emotion_mirror_image.png", text="Download Image"):
|
| 18 |
+
"""
|
| 19 |
+
Generate a download link for an image.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
img: PIL Image or numpy array
|
| 23 |
+
filename: Name of the file to download
|
| 24 |
+
text: Text to display for the download link
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
HTML string with download link
|
| 28 |
+
"""
|
| 29 |
+
try:
|
| 30 |
+
# Convert numpy array to PIL Image if necessary
|
| 31 |
+
if isinstance(img, np.ndarray):
|
| 32 |
+
img_pil = Image.fromarray(img)
|
| 33 |
+
else:
|
| 34 |
+
img_pil = img
|
| 35 |
+
|
| 36 |
+
# Create a byte buffer
|
| 37 |
+
buffered = io.BytesIO()
|
| 38 |
+
img_pil.save(buffered, format="PNG")
|
| 39 |
+
|
| 40 |
+
# Encode the bytes to base64
|
| 41 |
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
| 42 |
+
|
| 43 |
+
# Create download link HTML
|
| 44 |
+
href = f'<a href="data:file/png;base64,{img_str}" download="{filename}">{text}</a>'
|
| 45 |
+
return href
|
| 46 |
+
except Exception as e:
|
| 47 |
+
logger.error(f"Error creating image download link: {e}")
|
| 48 |
+
return None
|
| 49 |
+
|
| 50 |
+
def display_image_with_controls(image,
|
| 51 |
+
title: str = "Image Viewer",
|
| 52 |
+
allow_zoom: bool = True,
|
| 53 |
+
allow_download: bool = True) -> Dict[str, Any]:
|
| 54 |
+
"""
|
| 55 |
+
Display an image with zoom, pan and reset controls.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
image: PIL Image or numpy array to display
|
| 59 |
+
title: Title to display above the image
|
| 60 |
+
allow_zoom: Whether to show zoom controls
|
| 61 |
+
allow_download: Whether to show download option
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
Dict containing the status and any relevant info
|
| 65 |
+
"""
|
| 66 |
+
try:
|
| 67 |
+
# Ensure image is not None
|
| 68 |
+
if image is None:
|
| 69 |
+
return {"success": False, "message": "No image provided"}
|
| 70 |
+
|
| 71 |
+
# Convert numpy array to PIL Image if necessary
|
| 72 |
+
if isinstance(image, np.ndarray):
|
| 73 |
+
pil_image = Image.fromarray(image)
|
| 74 |
+
else:
|
| 75 |
+
pil_image = image
|
| 76 |
+
|
| 77 |
+
# Store original image dimensions
|
| 78 |
+
original_width, original_height = pil_image.size
|
| 79 |
+
|
| 80 |
+
# Create container for image display
|
| 81 |
+
st.subheader(title)
|
| 82 |
+
|
| 83 |
+
# Image controls in columns for better layout
|
| 84 |
+
col1, col2, col3 = st.columns([1, 2, 1])
|
| 85 |
+
|
| 86 |
+
# Zoom functionality
|
| 87 |
+
zoom_factor = 1.0
|
| 88 |
+
if allow_zoom:
|
| 89 |
+
with col1:
|
| 90 |
+
zoom_factor = st.slider(
|
| 91 |
+
"Zoom",
|
| 92 |
+
min_value=0.5,
|
| 93 |
+
max_value=2.0,
|
| 94 |
+
value=1.0,
|
| 95 |
+
step=0.1,
|
| 96 |
+
help="Adjust the zoom level of the image"
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
# Reset button in the third column
|
| 100 |
+
with col3:
|
| 101 |
+
reset_pressed = st.button("Reset View", help="Reset zoom and view settings")
|
| 102 |
+
if reset_pressed:
|
| 103 |
+
zoom_factor = 1.0
|
| 104 |
+
st.session_state.zoom_factor = 1.0 # Store in session state
|
| 105 |
+
|
| 106 |
+
# Apply zoom if needed
|
| 107 |
+
if zoom_factor != 1.0:
|
| 108 |
+
# Calculate new dimensions
|
| 109 |
+
new_width = int(original_width * zoom_factor)
|
| 110 |
+
new_height = int(original_height * zoom_factor)
|
| 111 |
+
|
| 112 |
+
# Resize the image
|
| 113 |
+
resized_image = pil_image.resize((new_width, new_height), Image.LANCZOS)
|
| 114 |
+
else:
|
| 115 |
+
resized_image = pil_image
|
| 116 |
+
|
| 117 |
+
# Display the image
|
| 118 |
+
st.image(resized_image, use_column_width=True)
|
| 119 |
+
|
| 120 |
+
# Add download option
|
| 121 |
+
if allow_download:
|
| 122 |
+
download_link = get_image_download_link(pil_image)
|
| 123 |
+
if download_link:
|
| 124 |
+
st.markdown(download_link, unsafe_allow_html=True)
|
| 125 |
+
|
| 126 |
+
return {
|
| 127 |
+
"success": True,
|
| 128 |
+
"zoom_factor": zoom_factor,
|
| 129 |
+
"image_dimensions": (original_width, original_height),
|
| 130 |
+
"displayed_dimensions": resized_image.size
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
except Exception as e:
|
| 134 |
+
logger.error(f"Error displaying image with controls: {e}")
|
| 135 |
+
return {"success": False, "message": str(e)}
|
| 136 |
+
|
| 137 |
+
def handle_image_viewer(image_data,
|
| 138 |
+
title: str = "Image Viewer",
|
| 139 |
+
description: str = None,
|
| 140 |
+
allow_zoom: bool = True,
|
| 141 |
+
allow_reset: bool = True,
|
| 142 |
+
allow_download: bool = True) -> Dict[str, Any]:
|
| 143 |
+
"""
|
| 144 |
+
Complete image viewer with all controls and options.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
image_data: PIL Image or numpy array
|
| 148 |
+
title: Title for the image viewer
|
| 149 |
+
description: Optional description text
|
| 150 |
+
allow_zoom: Whether to include zoom controls
|
| 151 |
+
allow_reset: Whether to include reset button
|
| 152 |
+
allow_download: Whether to include download option
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
Dict containing status and control information
|
| 156 |
+
"""
|
| 157 |
+
st.subheader(title)
|
| 158 |
+
|
| 159 |
+
if description:
|
| 160 |
+
st.markdown(description)
|
| 161 |
+
|
| 162 |
+
# Create container for the viewer
|
| 163 |
+
viewer_container = st.container()
|
| 164 |
+
|
| 165 |
+
with viewer_container:
|
| 166 |
+
# Create a two-column layout for controls and image
|
| 167 |
+
control_col, image_col = st.columns([1, 3])
|
| 168 |
+
|
| 169 |
+
with control_col:
|
| 170 |
+
st.markdown("### Controls")
|
| 171 |
+
|
| 172 |
+
# Zoom controls
|
| 173 |
+
zoom_factor = 1.0
|
| 174 |
+
if allow_zoom:
|
| 175 |
+
zoom_factor = st.slider(
|
| 176 |
+
"Zoom",
|
| 177 |
+
min_value=0.5,
|
| 178 |
+
max_value=3.0,
|
| 179 |
+
value=1.0,
|
| 180 |
+
step=0.1,
|
| 181 |
+
help="Adjust the zoom level"
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
# Reset button
|
| 185 |
+
if allow_reset:
|
| 186 |
+
if st.button("Reset View", key="reset_view"):
|
| 187 |
+
zoom_factor = 1.0
|
| 188 |
+
# Reset any other relevant state
|
| 189 |
+
|
| 190 |
+
# Add some spacing
|
| 191 |
+
st.markdown("<br>", unsafe_allow_html=True)
|
| 192 |
+
|
| 193 |
+
# Download option
|
| 194 |
+
if allow_download and image_data is not None:
|
| 195 |
+
download_link = get_image_download_link(image_data)
|
| 196 |
+
if download_link:
|
| 197 |
+
st.markdown("### Download")
|
| 198 |
+
st.markdown(download_link, unsafe_allow_html=True)
|
| 199 |
+
|
| 200 |
+
with image_col:
|
| 201 |
+
if image_data is not None:
|
| 202 |
+
# Process the image based on controls
|
| 203 |
+
try:
|
| 204 |
+
# Convert to PIL if numpy array
|
| 205 |
+
if isinstance(image_data, np.ndarray):
|
| 206 |
+
pil_image = Image.fromarray(image_data)
|
| 207 |
+
else:
|
| 208 |
+
pil_image = image_data
|
| 209 |
+
|
| 210 |
+
# Apply zoom if needed
|
| 211 |
+
if zoom_factor != 1.0:
|
| 212 |
+
original_width, original_height = pil_image.size
|
| 213 |
+
new_width = int(original_width * zoom_factor)
|
| 214 |
+
new_height = int(original_height * zoom_factor)
|
| 215 |
+
displayed_image = pil_image.resize((new_width, new_height), Image.LANCZOS)
|
| 216 |
+
else:
|
| 217 |
+
displayed_image = pil_image
|
| 218 |
+
|
| 219 |
+
# Display the image
|
| 220 |
+
st.image(displayed_image, use_column_width=True)
|
| 221 |
+
|
| 222 |
+
return {
|
| 223 |
+
"success": True,
|
| 224 |
+
"zoom_factor": zoom_factor,
|
| 225 |
+
"image_displayed": True
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
except Exception as e:
|
| 229 |
+
st.error(f"Error displaying image: {str(e)}")
|
| 230 |
+
logger.error(f"Error in image viewer: {e}")
|
| 231 |
+
return {"success": False, "message": str(e)}
|
| 232 |
+
else:
|
| 233 |
+
st.info("No image to display. Please upload an image first.")
|
| 234 |
+
return {"success": False, "message": "No image data provided"}
|
| 235 |
+
|
| 236 |
+
def create_image_tabs(original_image, processed_image=None):
|
| 237 |
+
"""
|
| 238 |
+
Create tabs to display original and processed images side by side.
|
| 239 |
+
|
| 240 |
+
Args:
|
| 241 |
+
original_image: The original image (PIL or numpy array)
|
| 242 |
+
processed_image: The processed image (PIL or numpy array), optional
|
| 243 |
+
|
| 244 |
+
Returns:
|
| 245 |
+
Dict with status information
|
| 246 |
+
"""
|
| 247 |
+
# Create tabs
|
| 248 |
+
if processed_image is not None:
|
| 249 |
+
tab1, tab2 = st.tabs(["Original Image", "Processed Image"])
|
| 250 |
+
|
| 251 |
+
with tab1:
|
| 252 |
+
result1 = display_image_with_controls(
|
| 253 |
+
original_image,
|
| 254 |
+
title="Original Image",
|
| 255 |
+
allow_zoom=True,
|
| 256 |
+
allow_download=True
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
with tab2:
|
| 260 |
+
result2 = display_image_with_controls(
|
| 261 |
+
processed_image,
|
| 262 |
+
title="Processed Image",
|
| 263 |
+
allow_zoom=True,
|
| 264 |
+
allow_download=True
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
return {
|
| 268 |
+
"success": result1.get("success", False) and result2.get("success", False),
|
| 269 |
+
"tabs_created": True
|
| 270 |
+
}
|
| 271 |
+
else:
|
| 272 |
+
# Only original image available
|
| 273 |
+
result = display_image_with_controls(
|
| 274 |
+
original_image,
|
| 275 |
+
title="Uploaded Image",
|
| 276 |
+
allow_zoom=True,
|
| 277 |
+
allow_download=True
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
return {
|
| 281 |
+
"success": result.get("success", False),
|
| 282 |
+
"tabs_created": False
|
| 283 |
+
}
|
utils/page_handlers.py
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Page Handlers Module for EmotionMirror application.
|
| 3 |
+
|
| 4 |
+
This module contains handler functions for different pages in the EmotionMirror application.
|
| 5 |
+
Following the principle of modularization, each page's logic is separated into its own function.
|
| 6 |
+
"""
|
| 7 |
+
import logging
|
| 8 |
+
import streamlit as st
|
| 9 |
+
import cv2
|
| 10 |
+
import numpy as np
|
| 11 |
+
from utils.preprocessing_ui import show_preprocessing_ui, get_processing_image
|
| 12 |
+
from utils.image_visualization import display_image_with_controls, create_image_tabs, handle_image_viewer
|
| 13 |
+
from PIL import Image
|
| 14 |
+
import io
|
| 15 |
+
import os
|
| 16 |
+
import json
|
| 17 |
+
import time
|
| 18 |
+
from typing import Dict, Any
|
| 19 |
+
from datetime import datetime
|
| 20 |
+
from utils.export_utils import export_to_json, export_to_csv, get_download_link, generate_emotion_summary
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
def handle_visual_analysis(agent_manager, image_service, db_service, uploaded_file=None):
|
| 25 |
+
"""
|
| 26 |
+
Handle the Visual Analysis page logic.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
agent_manager: The agent manager instance
|
| 30 |
+
image_service: The image service instance
|
| 31 |
+
db_service: The database service instance
|
| 32 |
+
uploaded_file: The uploaded file, if any
|
| 33 |
+
"""
|
| 34 |
+
st.header("Visual Emotion Analysis")
|
| 35 |
+
st.markdown("""
|
| 36 |
+
Upload an image to analyze emotions.
|
| 37 |
+
For best results, use a clear image of a face with good lighting.
|
| 38 |
+
""")
|
| 39 |
+
|
| 40 |
+
# Ensure agent_manager is properly initialized
|
| 41 |
+
if not agent_manager:
|
| 42 |
+
st.error("Agent manager is not initialized correctly. Please refresh the page.")
|
| 43 |
+
return
|
| 44 |
+
|
| 45 |
+
# Initialize the visual agent at the start
|
| 46 |
+
visual_agent = agent_manager.get_agent("VisualAgent")
|
| 47 |
+
if not visual_agent:
|
| 48 |
+
st.warning("Visual agent not available. The system is initializing or there is a configuration issue.")
|
| 49 |
+
logger.error("Failed to get VisualAgent from agent_manager")
|
| 50 |
+
|
| 51 |
+
# Create numbered sections to match local version
|
| 52 |
+
st.subheader("1. Upload an Image")
|
| 53 |
+
|
| 54 |
+
# Add reset button for clearing current image
|
| 55 |
+
if "original_image" in st.session_state:
|
| 56 |
+
col1, col2 = st.columns([3, 1])
|
| 57 |
+
with col2:
|
| 58 |
+
if st.button("Clear Current Image", key="clear_image"):
|
| 59 |
+
# Clear the session state
|
| 60 |
+
if "original_image" in st.session_state:
|
| 61 |
+
del st.session_state["original_image"]
|
| 62 |
+
if "processed_image" in st.session_state:
|
| 63 |
+
del st.session_state["processed_image"]
|
| 64 |
+
if "current_image_path" in st.session_state:
|
| 65 |
+
del st.session_state["current_image_path"]
|
| 66 |
+
st.experimental_rerun()
|
| 67 |
+
|
| 68 |
+
# Create file uploader
|
| 69 |
+
if uploaded_file is None:
|
| 70 |
+
uploaded_file = st.file_uploader(
|
| 71 |
+
"Choose an image...",
|
| 72 |
+
type=["jpg", "jpeg", "png"],
|
| 73 |
+
help="Upload a clear image of a face for analysis."
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# Display information about detection methods
|
| 77 |
+
with st.expander("About the Detection Methods", expanded=False):
|
| 78 |
+
st.markdown("""
|
| 79 |
+
### About the Detection Methods
|
| 80 |
+
|
| 81 |
+
Currently using: **Advanced Detection**
|
| 82 |
+
|
| 83 |
+
* **Basic detection** is faster but less accurate. It works by analyzing simple facial features.
|
| 84 |
+
* **Advanced detection (DeepFace)** uses deep learning models that are trained on thousands of faces to recognize subtle emotional cues.
|
| 85 |
+
|
| 86 |
+
You can change the default detection method in the sidebar settings.
|
| 87 |
+
""")
|
| 88 |
+
|
| 89 |
+
# Display image and interface when uploaded
|
| 90 |
+
if uploaded_file is not None:
|
| 91 |
+
try:
|
| 92 |
+
# Process the uploaded file
|
| 93 |
+
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
| 94 |
+
uploaded_file.seek(0) # Reset the file pointer for further processing
|
| 95 |
+
|
| 96 |
+
# Validate image file - includes format, size, and dimensions
|
| 97 |
+
validation_result = image_service.validate_image_file(
|
| 98 |
+
uploaded_file,
|
| 99 |
+
check_content=True,
|
| 100 |
+
check_dimensions=True
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
# If the image is valid, process it
|
| 104 |
+
if validation_result["valid"]:
|
| 105 |
+
# Process the uploaded image to improve quality if possible
|
| 106 |
+
try:
|
| 107 |
+
# Load image for processing with OpenCV (this will be in BGR format)
|
| 108 |
+
img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
| 109 |
+
|
| 110 |
+
# Convert from BGR to RGB for preprocessing display
|
| 111 |
+
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 112 |
+
|
| 113 |
+
# Store original RGB image for display
|
| 114 |
+
if "original_image" not in st.session_state:
|
| 115 |
+
st.session_state.original_image = img_rgb.copy()
|
| 116 |
+
|
| 117 |
+
# Determine which image to process based on preprocessing settings
|
| 118 |
+
use_improved = st.session_state.get("use_improved_image", False)
|
| 119 |
+
|
| 120 |
+
# Choose image for processing
|
| 121 |
+
img_to_process = get_processing_image(img_rgb, image_service, use_improved)
|
| 122 |
+
|
| 123 |
+
# Step 5: Display the image with visualization controls
|
| 124 |
+
st.subheader("2. Image Visualization")
|
| 125 |
+
st.markdown("""
|
| 126 |
+
View the uploaded image with basic controls. You can:
|
| 127 |
+
- Zoom in/out to see details
|
| 128 |
+
- Reset view to original size
|
| 129 |
+
- Download the current image
|
| 130 |
+
""")
|
| 131 |
+
|
| 132 |
+
# Check if we have both original and processed images
|
| 133 |
+
if "processed_image" in st.session_state:
|
| 134 |
+
# Create tabs to display original and processed images
|
| 135 |
+
image_tabs_result = create_image_tabs(
|
| 136 |
+
st.session_state.original_image,
|
| 137 |
+
st.session_state.processed_image
|
| 138 |
+
)
|
| 139 |
+
else:
|
| 140 |
+
# Display just the original image
|
| 141 |
+
display_result = display_image_with_controls(
|
| 142 |
+
st.session_state.original_image,
|
| 143 |
+
title="Uploaded Image",
|
| 144 |
+
allow_zoom=True,
|
| 145 |
+
allow_download=True
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
# Store the processed image if available for comparison
|
| 149 |
+
if use_improved and img_to_process is not None:
|
| 150 |
+
st.session_state.processed_image = img_to_process
|
| 151 |
+
|
| 152 |
+
# Add space between sections
|
| 153 |
+
st.markdown("<div style='height: 20px;'></div>", unsafe_allow_html=True)
|
| 154 |
+
|
| 155 |
+
# Display image analysis from validation
|
| 156 |
+
st.subheader("3. Image Analysis")
|
| 157 |
+
analysis_expander = st.expander("View Image Analysis Details", expanded=False)
|
| 158 |
+
|
| 159 |
+
with analysis_expander:
|
| 160 |
+
# Show image metadata
|
| 161 |
+
if "image_metadata" in validation_result:
|
| 162 |
+
metadata = validation_result["image_metadata"]
|
| 163 |
+
st.markdown("#### Image Details")
|
| 164 |
+
|
| 165 |
+
# Create columns for metadata display
|
| 166 |
+
meta_cols = st.columns(3)
|
| 167 |
+
|
| 168 |
+
with meta_cols[0]:
|
| 169 |
+
st.markdown("**Dimensions**")
|
| 170 |
+
width = metadata.get("width", 0)
|
| 171 |
+
height = metadata.get("height", 0)
|
| 172 |
+
st.write(f"{width} × {height}")
|
| 173 |
+
|
| 174 |
+
with meta_cols[1]:
|
| 175 |
+
st.markdown("**Format**")
|
| 176 |
+
img_format = metadata.get("format", "Unknown")
|
| 177 |
+
st.write(f"{img_format}")
|
| 178 |
+
|
| 179 |
+
with meta_cols[2]:
|
| 180 |
+
st.markdown("**Size**")
|
| 181 |
+
size_kb = metadata.get("size_kb", 0)
|
| 182 |
+
st.write(f"{size_kb:.1f} KB")
|
| 183 |
+
|
| 184 |
+
# Quality metrics if available
|
| 185 |
+
if "quality" in metadata:
|
| 186 |
+
st.markdown("#### Quality Metrics")
|
| 187 |
+
quality = metadata["quality"]
|
| 188 |
+
|
| 189 |
+
# Create columns for quality metrics
|
| 190 |
+
metric_cols = st.columns(3)
|
| 191 |
+
|
| 192 |
+
with metric_cols[0]:
|
| 193 |
+
st.markdown("**Sharpness**")
|
| 194 |
+
sharpness = int(quality.get("sharpness", 0) * 100)
|
| 195 |
+
st.write(f"{sharpness}%")
|
| 196 |
+
|
| 197 |
+
with metric_cols[1]:
|
| 198 |
+
st.markdown("**Brightness**")
|
| 199 |
+
brightness = int(quality.get("brightness", 0) * 100)
|
| 200 |
+
st.write(f"{brightness}%")
|
| 201 |
+
|
| 202 |
+
with metric_cols[2]:
|
| 203 |
+
st.markdown("**Contrast**")
|
| 204 |
+
contrast = int(quality.get("contrast", 0.26) * 100)
|
| 205 |
+
st.write(f"{contrast}%")
|
| 206 |
+
|
| 207 |
+
# Add space between sections
|
| 208 |
+
st.markdown("<div style='height: 20px;'></div>", unsafe_allow_html=True)
|
| 209 |
+
|
| 210 |
+
# NOW display the preprocessing UI AFTER the Image Analysis section
|
| 211 |
+
st.subheader("4. Image Preprocessing")
|
| 212 |
+
preprocessing_result = show_preprocessing_ui(image_service, img_rgb)
|
| 213 |
+
|
| 214 |
+
if not preprocessing_result.get("success", False):
|
| 215 |
+
st.error(f"Error in preprocessing: {preprocessing_result.get('message', 'Unknown error')}")
|
| 216 |
+
|
| 217 |
+
# Store path in session state for future use
|
| 218 |
+
if "current_image_path" not in st.session_state:
|
| 219 |
+
# Save the file for reference
|
| 220 |
+
save_path = image_service.save_uploaded_image(img_to_process)
|
| 221 |
+
st.session_state["current_image_path"] = save_path
|
| 222 |
+
|
| 223 |
+
# Add some space to improve layout
|
| 224 |
+
st.markdown("<div style='height: 20px;'></div>", unsafe_allow_html=True)
|
| 225 |
+
|
| 226 |
+
# Analysis section
|
| 227 |
+
st.subheader("5. Emotion Analysis")
|
| 228 |
+
st.info("Image successfully uploaded. Emotion analysis functionality will be available soon.")
|
| 229 |
+
|
| 230 |
+
# Add a disabled button as placeholder for future functionality
|
| 231 |
+
st.button("Analyze Emotions (Coming Soon)", disabled=True, key="analyze_button")
|
| 232 |
+
|
| 233 |
+
except Exception as e:
|
| 234 |
+
logger.error(f"Error processing image: {e}", exc_info=True)
|
| 235 |
+
st.error(f"Error processing image: {str(e)}")
|
| 236 |
+
else:
|
| 237 |
+
# Show validation issues
|
| 238 |
+
st.error("Image validation failed:")
|
| 239 |
+
for issue in validation_result["issues"]:
|
| 240 |
+
st.warning(issue)
|
| 241 |
+
|
| 242 |
+
except Exception as e:
|
| 243 |
+
logger.error(f"Error in Visual Analysis: {e}", exc_info=True)
|
| 244 |
+
st.error(f"Error processing image: {str(e)}")
|
| 245 |
+
|
| 246 |
+
def handle_history_page(db_service):
|
| 247 |
+
"""
|
| 248 |
+
Handle the History page logic.
|
| 249 |
+
|
| 250 |
+
Args:
|
| 251 |
+
db_service: The database service instance
|
| 252 |
+
"""
|
| 253 |
+
st.header("Analysis History")
|
| 254 |
+
st.markdown("View your previous analyses and export results.")
|
| 255 |
+
|
| 256 |
+
# Create tabs for different views - Using numbered sequence for clear navigation
|
| 257 |
+
history_tabs = ["1. Recent Analyses", "2. Statistics", "3. Export Data"]
|
| 258 |
+
tab1, tab2, tab3 = st.tabs(history_tabs)
|
| 259 |
+
|
| 260 |
+
with tab1:
|
| 261 |
+
# Implementation of recent analyses tab
|
| 262 |
+
pass # Implement the rest of the history page functionality
|
| 263 |
+
|
| 264 |
+
with tab2:
|
| 265 |
+
# Implementation of statistics tab
|
| 266 |
+
pass # Implement statistics functionality
|
| 267 |
+
|
| 268 |
+
with tab3:
|
| 269 |
+
# Implementation of export data tab
|
| 270 |
+
pass # Implement export functionality
|
| 271 |
+
|
| 272 |
+
def handle_about_page():
|
| 273 |
+
"""Handle the About page logic."""
|
| 274 |
+
st.header("About EmotionMirror")
|
| 275 |
+
st.markdown("""
|
| 276 |
+
EmotionMirror is an application that uses computer vision to analyze emotions in facial expressions.
|
| 277 |
+
|
| 278 |
+
**Version:** 0.1.3 (Phase 1.3)
|
| 279 |
+
**Features:**
|
| 280 |
+
- Facial emotion detection using computer vision
|
| 281 |
+
- Support for image uploads
|
| 282 |
+
- Analysis history and data export
|
| 283 |
+
- Image preprocessing for improved detection
|
| 284 |
+
|
| 285 |
+
**Technology:**
|
| 286 |
+
- Python
|
| 287 |
+
- OpenCV for image processing
|
| 288 |
+
- Deep learning models for emotion classification
|
| 289 |
+
- Streamlit for the user interface
|
| 290 |
+
""")
|
utils/preprocessing_ui.py
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Image Preprocessing UI Component for EmotionMirror application.
|
| 3 |
+
|
| 4 |
+
This module implements the UI components for showing image preprocessing options,
|
| 5 |
+
including the comparison between original and processed images.
|
| 6 |
+
Part of Step 4: Implementation of preprocessing techniques.
|
| 7 |
+
"""
|
| 8 |
+
import streamlit as st
|
| 9 |
+
import logging
|
| 10 |
+
import numpy as np
|
| 11 |
+
import cv2
|
| 12 |
+
from typing import Dict, Any, Optional
|
| 13 |
+
import io
|
| 14 |
+
from PIL import Image
|
| 15 |
+
import os
|
| 16 |
+
import time
|
| 17 |
+
import traceback
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
def show_preprocessing_ui(image_service, img: np.ndarray) -> Dict[str, Any]:
|
| 22 |
+
"""
|
| 23 |
+
Complete UI handler for image preprocessing - this is the main entry point
|
| 24 |
+
that should be called from app.py
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
image_service: The image service instance
|
| 28 |
+
img: The image to preprocess
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
Dict with information about the preprocessing state and user choices
|
| 32 |
+
"""
|
| 33 |
+
try:
|
| 34 |
+
# Save the original image in session state
|
| 35 |
+
if "original_image" not in st.session_state:
|
| 36 |
+
st.session_state.original_image = img.copy()
|
| 37 |
+
|
| 38 |
+
# Apply image preprocessing
|
| 39 |
+
preprocessing_result = image_service.preprocess_image(img)
|
| 40 |
+
|
| 41 |
+
# Store preprocessed image result in session state
|
| 42 |
+
st.session_state.preprocessing_result = preprocessing_result
|
| 43 |
+
|
| 44 |
+
# Print debugging information
|
| 45 |
+
logger.info("[DEBUG] Preprocessing result obtained successfully")
|
| 46 |
+
logger.info(f"[DEBUG] Improvements: {preprocessing_result.get('improvements', [])}")
|
| 47 |
+
|
| 48 |
+
# Show preprocessing status if already set
|
| 49 |
+
display_processing_status()
|
| 50 |
+
|
| 51 |
+
# Display preprocessing UI with expandable section
|
| 52 |
+
show_preprocessing_expandable(image_service, preprocessing_result)
|
| 53 |
+
|
| 54 |
+
# Return success information
|
| 55 |
+
return {
|
| 56 |
+
"success": True,
|
| 57 |
+
"message": "Preprocessing UI displayed successfully",
|
| 58 |
+
"using_preprocessed": st.session_state.get("using_preprocessed_image", False)
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
except Exception as e:
|
| 62 |
+
# Log detailed error information
|
| 63 |
+
error_trace = traceback.format_exc()
|
| 64 |
+
logger.error(f"Error in preprocessing UI: {str(e)}\n{error_trace}")
|
| 65 |
+
|
| 66 |
+
# Show error to user
|
| 67 |
+
st.error(f"Error preparing image preprocessing: {str(e)}")
|
| 68 |
+
|
| 69 |
+
# Return error information
|
| 70 |
+
return {
|
| 71 |
+
"success": False,
|
| 72 |
+
"message": f"Error: {str(e)}",
|
| 73 |
+
"error": str(e)
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
def show_preprocessing_expandable(image_service, preprocessing_result: Dict[str, Any]) -> None:
|
| 77 |
+
"""
|
| 78 |
+
Display preprocessing UI with an expandable section.
|
| 79 |
+
This is the standalone implementation that doesn't depend on other functions.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
image_service: The image service instance
|
| 83 |
+
preprocessing_result: Dict with preprocessing results
|
| 84 |
+
"""
|
| 85 |
+
# Only show if there are improvements
|
| 86 |
+
if not preprocessing_result or "improvements" not in preprocessing_result or not preprocessing_result["improvements"]:
|
| 87 |
+
logger.info("No improvements to display")
|
| 88 |
+
return
|
| 89 |
+
|
| 90 |
+
try:
|
| 91 |
+
# Direct expandable implementation
|
| 92 |
+
with st.expander("Suggested improvements available", expanded=True):
|
| 93 |
+
# Create columns for side-by-side comparison
|
| 94 |
+
original_col, improved_col = st.columns(2)
|
| 95 |
+
|
| 96 |
+
with original_col:
|
| 97 |
+
st.markdown("**Original Image**")
|
| 98 |
+
st.image(preprocessing_result["original_image"], use_column_width=True)
|
| 99 |
+
|
| 100 |
+
with improved_col:
|
| 101 |
+
st.markdown("**Improved Image**")
|
| 102 |
+
st.image(preprocessing_result["processed_image"], use_column_width=True)
|
| 103 |
+
|
| 104 |
+
# Display improvements applied
|
| 105 |
+
st.markdown("**Improvements applied:**")
|
| 106 |
+
for improvement in preprocessing_result["improvements"]:
|
| 107 |
+
st.markdown(f"- {improvement}")
|
| 108 |
+
|
| 109 |
+
# Add explanation about benefits
|
| 110 |
+
st.markdown("### Why these improvements help facial analysis")
|
| 111 |
+
st.markdown("""
|
| 112 |
+
**Technical Benefits:**
|
| 113 |
+
- **Balanced contrast:** Enhances the visibility of facial features while reducing shadows and highlights
|
| 114 |
+
- **Optimal brightness:** Ensures facial features are clearly distinguishable without over-exposure
|
| 115 |
+
- **Proper sizing:** Maintains ideal dimensions for detection algorithms to recognize facial landmarks
|
| 116 |
+
|
| 117 |
+
**Impact on Emotion Detection:**
|
| 118 |
+
- **More accurate emotion classification:** Cleaner input images lead to more reliable emotion detection
|
| 119 |
+
- **Better feature extraction:** Facial features like eyes, mouth, and eyebrows are more clearly defined
|
| 120 |
+
- **Reduced noise and artifacts:** Minimizes false detections and improves confidence scores
|
| 121 |
+
""")
|
| 122 |
+
|
| 123 |
+
# Add buttons to select image
|
| 124 |
+
setup_image_selection_buttons(image_service, preprocessing_result)
|
| 125 |
+
|
| 126 |
+
except Exception as e:
|
| 127 |
+
logger.error(f"Error in expandable UI: {str(e)}")
|
| 128 |
+
st.warning(f"Could not display image comparison: {str(e)}")
|
| 129 |
+
|
| 130 |
+
# Fallback to old display method if needed
|
| 131 |
+
try:
|
| 132 |
+
display_preprocessing_comparison(preprocessing_result)
|
| 133 |
+
setup_preprocessing_controls(image_service, preprocessing_result)
|
| 134 |
+
except Exception as fallback_error:
|
| 135 |
+
logger.error(f"Fallback display also failed: {str(fallback_error)}")
|
| 136 |
+
|
| 137 |
+
def display_preprocessing_comparison(preprocessing_result: Dict[str, Any]) -> None:
|
| 138 |
+
"""
|
| 139 |
+
Display the comparison between original and processed images.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
preprocessing_result: Dictionary containing preprocessing results
|
| 143 |
+
"""
|
| 144 |
+
if not preprocessing_result or "improvements" not in preprocessing_result:
|
| 145 |
+
return
|
| 146 |
+
|
| 147 |
+
# Only show if there are improvements applied
|
| 148 |
+
if preprocessing_result["improvements"]:
|
| 149 |
+
st.subheader("Image Enhancement Options")
|
| 150 |
+
|
| 151 |
+
# Display side-by-side comparison
|
| 152 |
+
before_col, after_col = st.columns(2)
|
| 153 |
+
|
| 154 |
+
with before_col:
|
| 155 |
+
st.markdown("**Original Image**")
|
| 156 |
+
st.image(preprocessing_result["original_image"], use_column_width=True)
|
| 157 |
+
|
| 158 |
+
with after_col:
|
| 159 |
+
st.markdown("**Improved Image**")
|
| 160 |
+
st.image(preprocessing_result["processed_image"], use_column_width=True)
|
| 161 |
+
|
| 162 |
+
# Display improvements applied
|
| 163 |
+
st.markdown("**Improvements applied:**")
|
| 164 |
+
for improvement in preprocessing_result["improvements"]:
|
| 165 |
+
st.markdown(f"- {improvement}")
|
| 166 |
+
|
| 167 |
+
# Add explanation about why these improvements are beneficial
|
| 168 |
+
st.markdown("### Why these improvements help facial analysis")
|
| 169 |
+
st.markdown("""
|
| 170 |
+
**Technical Benefits:**
|
| 171 |
+
- **Balanced contrast:** Enhances the visibility of facial features while reducing shadows and highlights
|
| 172 |
+
- **Optimal brightness:** Ensures facial features are clearly distinguishable without over-exposure
|
| 173 |
+
- **Proper sizing:** Maintains ideal dimensions for detection algorithms to recognize facial landmarks
|
| 174 |
+
|
| 175 |
+
**Impact on Emotion Detection:**
|
| 176 |
+
- **More accurate emotion classification:** Cleaner input images lead to more reliable emotion detection
|
| 177 |
+
- **Better feature extraction:** Facial features like eyes, mouth, and eyebrows are more clearly defined
|
| 178 |
+
- **Reduced noise and artifacts:** Minimizes false detections and improves confidence scores
|
| 179 |
+
|
| 180 |
+
These improvements help our algorithms perform more consistently across different lighting conditions and image sources.
|
| 181 |
+
""")
|
| 182 |
+
|
| 183 |
+
def setup_image_selection_buttons(image_service, preprocessing_result: Dict[str, Any]) -> None:
|
| 184 |
+
"""
|
| 185 |
+
Set up buttons for selecting between original and improved images.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
image_service: The image service instance
|
| 189 |
+
preprocessing_result: Dictionary containing preprocessing results
|
| 190 |
+
"""
|
| 191 |
+
# Add buttons to use original or improved image
|
| 192 |
+
col1, col2 = st.columns(2)
|
| 193 |
+
|
| 194 |
+
# Button for original image
|
| 195 |
+
with col1:
|
| 196 |
+
if st.button("Continue with Original"):
|
| 197 |
+
# Set session state to use original
|
| 198 |
+
st.session_state["using_preprocessed_image"] = False
|
| 199 |
+
st.session_state["image_processing_status"] = "using_original"
|
| 200 |
+
|
| 201 |
+
# Display confirmation message
|
| 202 |
+
st.markdown("""
|
| 203 |
+
<div style="background-color: #17a2b8; color: white; padding: 10px; border-radius: 5px; margin-bottom: 10px;">
|
| 204 |
+
<strong>ℹ️ Using original image for analysis.</strong>
|
| 205 |
+
</div>
|
| 206 |
+
""", unsafe_allow_html=True)
|
| 207 |
+
|
| 208 |
+
# Button for improved image
|
| 209 |
+
with col2:
|
| 210 |
+
if st.button("Use Improved Image"):
|
| 211 |
+
try:
|
| 212 |
+
# Save the processed image to a temporary file
|
| 213 |
+
temp_path = image_service.save_processed_image(
|
| 214 |
+
preprocessing_result["processed_image"]
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
# Update session state
|
| 218 |
+
st.session_state["preprocessed_image_path"] = temp_path
|
| 219 |
+
st.session_state["using_preprocessed_image"] = True
|
| 220 |
+
st.session_state["image_processing_status"] = "using_improved"
|
| 221 |
+
|
| 222 |
+
# Display confirmation
|
| 223 |
+
st.markdown("""
|
| 224 |
+
<div style="background-color: #28a745; color: white; padding: 10px; border-radius: 5px; margin-bottom: 10px;">
|
| 225 |
+
<strong>✅ Using improved image for analysis!</strong>
|
| 226 |
+
</div>
|
| 227 |
+
""", unsafe_allow_html=True)
|
| 228 |
+
except Exception as e:
|
| 229 |
+
logger.error(f"Error saving processed image: {str(e)}")
|
| 230 |
+
st.error(f"Could not save processed image: {str(e)}")
|
| 231 |
+
|
| 232 |
+
def setup_preprocessing_controls(image_service, preprocessing_result: Dict[str, Any]) -> None:
|
| 233 |
+
"""
|
| 234 |
+
Set up the controls for selecting between original and processed images.
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
image_service: The image service instance
|
| 238 |
+
preprocessing_result: Dictionary containing preprocessing results
|
| 239 |
+
"""
|
| 240 |
+
if not preprocessing_result or "improvements" not in preprocessing_result:
|
| 241 |
+
return
|
| 242 |
+
|
| 243 |
+
# Only show if there are improvements applied
|
| 244 |
+
if preprocessing_result["improvements"]:
|
| 245 |
+
# Add buttons to use original or improved image
|
| 246 |
+
col1, col2 = st.columns(2)
|
| 247 |
+
with col1:
|
| 248 |
+
use_original = st.button("Continue with Original")
|
| 249 |
+
with col2:
|
| 250 |
+
use_improved = st.button("Use Improved Image")
|
| 251 |
+
|
| 252 |
+
# Handle the user's choice
|
| 253 |
+
if use_improved:
|
| 254 |
+
# Save the processed image to a temporary file
|
| 255 |
+
temp_path = image_service.save_processed_image(
|
| 256 |
+
preprocessing_result["processed_image"]
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
# Update session state to use the processed image
|
| 260 |
+
st.session_state["preprocessed_image_path"] = temp_path
|
| 261 |
+
st.session_state["using_preprocessed_image"] = True
|
| 262 |
+
|
| 263 |
+
# Display a more prominent success message
|
| 264 |
+
st.markdown("""
|
| 265 |
+
<div style="background-color: #28a745; color: white; padding: 10px; border-radius: 5px; margin-bottom: 10px;">
|
| 266 |
+
<strong>✅ Using improved image for analysis!</strong>
|
| 267 |
+
</div>
|
| 268 |
+
""", unsafe_allow_html=True)
|
| 269 |
+
|
| 270 |
+
# Store confirmation message in session state for persistence
|
| 271 |
+
st.session_state["image_processing_status"] = "using_improved"
|
| 272 |
+
|
| 273 |
+
# Small delay to ensure UI updates
|
| 274 |
+
time.sleep(0.5)
|
| 275 |
+
|
| 276 |
+
elif use_original:
|
| 277 |
+
# Set session state to use original
|
| 278 |
+
st.session_state["using_preprocessed_image"] = False
|
| 279 |
+
st.session_state["image_processing_status"] = "using_original"
|
| 280 |
+
|
| 281 |
+
# Display a clear message
|
| 282 |
+
st.markdown("""
|
| 283 |
+
<div style="background-color: #17a2b8; color: white; padding: 10px; border-radius: 5px; margin-bottom: 10px;">
|
| 284 |
+
<strong>ℹ️ Using original image for analysis.</strong>
|
| 285 |
+
</div>
|
| 286 |
+
""", unsafe_allow_html=True)
|
| 287 |
+
|
| 288 |
+
def display_processing_status() -> None:
|
| 289 |
+
"""
|
| 290 |
+
Display the current image processing status (original or improved).
|
| 291 |
+
"""
|
| 292 |
+
# Display persistent status indicator at the top of the interface
|
| 293 |
+
if "image_processing_status" in st.session_state:
|
| 294 |
+
if st.session_state["image_processing_status"] == "using_improved":
|
| 295 |
+
st.markdown("""
|
| 296 |
+
<div style="background-color: #28a745; color: white; padding: 5px; border-radius: 5px; margin-bottom: 10px;">
|
| 297 |
+
<strong>✅ Currently using improved image for analysis</strong>
|
| 298 |
+
</div>
|
| 299 |
+
""", unsafe_allow_html=True)
|
| 300 |
+
elif st.session_state["image_processing_status"] == "using_original":
|
| 301 |
+
st.markdown("""
|
| 302 |
+
<div style="background-color: #17a2b8; color: white; padding: 5px; border-radius: 5px; margin-bottom: 10px;">
|
| 303 |
+
<strong>ℹ️ Using original image for analysis</strong>
|
| 304 |
+
</div>
|
| 305 |
+
""", unsafe_allow_html=True)
|
| 306 |
+
|
| 307 |
+
def get_processing_image(image_service, original_image: np.ndarray) -> np.ndarray:
|
| 308 |
+
"""
|
| 309 |
+
Get the appropriate image for processing (preprocessed or original).
|
| 310 |
+
|
| 311 |
+
Args:
|
| 312 |
+
image_service: The image service instance
|
| 313 |
+
original_image: The original image as backup
|
| 314 |
+
|
| 315 |
+
Returns:
|
| 316 |
+
The appropriate image to use for processing
|
| 317 |
+
"""
|
| 318 |
+
if "using_preprocessed_image" in st.session_state and st.session_state["using_preprocessed_image"] and "preprocessed_image_path" in st.session_state:
|
| 319 |
+
try:
|
| 320 |
+
# Load the preprocessed image from the temporary file
|
| 321 |
+
preprocessed_path = st.session_state["preprocessed_image_path"]
|
| 322 |
+
img = image_service.load_image_from_path(preprocessed_path)
|
| 323 |
+
|
| 324 |
+
# Check if image was loaded successfully
|
| 325 |
+
if img is None or img.size == 0:
|
| 326 |
+
logger.warning("Could not load preprocessed image. Using original instead.")
|
| 327 |
+
return original_image
|
| 328 |
+
|
| 329 |
+
return img
|
| 330 |
+
except Exception as e:
|
| 331 |
+
# Log error and fallback to original
|
| 332 |
+
logger.error(f"Error loading preprocessed image: {e}")
|
| 333 |
+
return original_image
|
| 334 |
+
else:
|
| 335 |
+
# Return the original image
|
| 336 |
+
return original_image
|