Spaces:
Sleeping
Sleeping
Delete huggingface-space
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- huggingface-space/.dvc/.gitignore +0 -2
- huggingface-space/.dvc/config +0 -0
- huggingface-space/.dvcignore +0 -0
- huggingface-space/.gitattributes +0 -1
- huggingface-space/.gitignore +0 -112
- huggingface-space/Dockerfile +0 -0
- huggingface-space/LICENSE +0 -21
- huggingface-space/README.md +0 -53
- huggingface-space/app.py +0 -181
- huggingface-space/config/config.yaml +0 -25
- huggingface-space/dvc.lock +0 -203
- huggingface-space/dvc.yaml +0 -39
- huggingface-space/gpuCheck.py +0 -42
- huggingface-space/huggingface-space/.dvc/.gitignore +0 -2
- huggingface-space/huggingface-space/.dvc/config +0 -0
- huggingface-space/huggingface-space/.dvcignore +0 -0
- huggingface-space/huggingface-space/.gitattributes +0 -1
- huggingface-space/huggingface-space/.gitignore +0 -112
- huggingface-space/huggingface-space/Dockerfile +0 -0
- huggingface-space/huggingface-space/LICENSE +0 -21
- huggingface-space/huggingface-space/README.md +0 -42
- huggingface-space/huggingface-space/app.py +0 -195
- huggingface-space/huggingface-space/config/config.yaml +0 -25
- huggingface-space/huggingface-space/dvc.lock +0 -203
- huggingface-space/huggingface-space/dvc.yaml +0 -39
- huggingface-space/huggingface-space/gpuCheck.py +0 -42
- huggingface-space/huggingface-space/huggingface-space/.dvc/.gitignore +0 -2
- huggingface-space/huggingface-space/huggingface-space/.dvc/config +0 -0
- huggingface-space/huggingface-space/huggingface-space/.dvcignore +0 -0
- huggingface-space/huggingface-space/huggingface-space/.gitattributes +0 -1
- huggingface-space/huggingface-space/huggingface-space/.gitignore +0 -112
- huggingface-space/huggingface-space/huggingface-space/Dockerfile +0 -0
- huggingface-space/huggingface-space/huggingface-space/LICENSE +0 -21
- huggingface-space/huggingface-space/huggingface-space/README.md +0 -42
- huggingface-space/huggingface-space/huggingface-space/app.py +0 -195
- huggingface-space/huggingface-space/huggingface-space/config/config.yaml +0 -25
- huggingface-space/huggingface-space/huggingface-space/dvc.lock +0 -203
- huggingface-space/huggingface-space/huggingface-space/dvc.yaml +0 -39
- huggingface-space/huggingface-space/huggingface-space/gpuCheck.py +0 -42
- huggingface-space/huggingface-space/huggingface-space/huggingface-space/.gitattributes +0 -1
- huggingface-space/huggingface-space/huggingface-space/huggingface-space/README.md +0 -42
- huggingface-space/huggingface-space/huggingface-space/main.py +0 -70
- huggingface-space/huggingface-space/huggingface-space/params.yaml +0 -15
- huggingface-space/huggingface-space/huggingface-space/requirements.txt +0 -44
- huggingface-space/huggingface-space/huggingface-space/research/01_data_exploration.ipynb +0 -0
- huggingface-space/huggingface-space/huggingface-space/setup.py +0 -28
- huggingface-space/huggingface-space/huggingface-space/src/EmotionRecognition/__init__.py +0 -25
- huggingface-space/huggingface-space/huggingface-space/src/EmotionRecognition/components/__init__.py +0 -0
- huggingface-space/huggingface-space/huggingface-space/src/EmotionRecognition/components/data_ingestion.py +0 -27
- huggingface-space/huggingface-space/huggingface-space/src/EmotionRecognition/components/data_preparation.py +0 -118
huggingface-space/.dvc/.gitignore
DELETED
|
@@ -1,2 +0,0 @@
|
|
| 1 |
-
/config.local
|
| 2 |
-
/tmp
|
|
|
|
|
|
|
|
|
huggingface-space/.dvc/config
DELETED
|
File without changes
|
huggingface-space/.dvcignore
DELETED
|
File without changes
|
huggingface-space/.gitattributes
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
sota_model/model.safetensors filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
huggingface-space/.gitignore
DELETED
|
@@ -1,112 +0,0 @@
|
|
| 1 |
-
# MLOps & Data Science Artifacts
|
| 2 |
-
# -------------------------------------------------------------------
|
| 3 |
-
# Ignore all data, models, and artifacts. These should be tracked by DVC.
|
| 4 |
-
/artifacts/
|
| 5 |
-
/data/
|
| 6 |
-
/sota_model/
|
| 7 |
-
# Ignore the DVC local cache. This is where the actual data files are stored.
|
| 8 |
-
.dvc/cache
|
| 9 |
-
|
| 10 |
-
# Ignore MLflow experiment tracking output
|
| 11 |
-
/mlruns/
|
| 12 |
-
|
| 13 |
-
# Ignore logs
|
| 14 |
-
/logs/
|
| 15 |
-
*.log
|
| 16 |
-
|
| 17 |
-
# Ignore common model file extensions, just in case
|
| 18 |
-
*.h5
|
| 19 |
-
*.pkl
|
| 20 |
-
*.model
|
| 21 |
-
*.onnx
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
# Python Virtual Environments
|
| 25 |
-
# -------------------------------------------------------------------
|
| 26 |
-
/venv/
|
| 27 |
-
/myenv/
|
| 28 |
-
/.venv/
|
| 29 |
-
/env/
|
| 30 |
-
/ENV/
|
| 31 |
-
*/.venv/
|
| 32 |
-
*/venv/
|
| 33 |
-
*/myenv/
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
# Python Byte-code and Caches
|
| 37 |
-
# -------------------------------------------------------------------
|
| 38 |
-
__pycache__/
|
| 39 |
-
*.py[cod]
|
| 40 |
-
*$py.class
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
# Python Packaging & Distribution
|
| 44 |
-
# -------------------------------------------------------------------
|
| 45 |
-
build/
|
| 46 |
-
develop-eggs/
|
| 47 |
-
dist/
|
| 48 |
-
downloads/
|
| 49 |
-
eggs/
|
| 50 |
-
.eggs/
|
| 51 |
-
lib/
|
| 52 |
-
lib64/
|
| 53 |
-
parts/
|
| 54 |
-
sdist/
|
| 55 |
-
var/
|
| 56 |
-
wheels/
|
| 57 |
-
*.egg-info/
|
| 58 |
-
.installed.cfg
|
| 59 |
-
*.egg
|
| 60 |
-
MANIFEST
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
# IDE and Editor Configuration
|
| 64 |
-
# -------------------------------------------------------------------
|
| 65 |
-
# PyCharm
|
| 66 |
-
.idea/
|
| 67 |
-
|
| 68 |
-
# Visual Studio Code (allow sharing of recommended extensions)
|
| 69 |
-
.vscode/*
|
| 70 |
-
!.vscode/extensions.json
|
| 71 |
-
|
| 72 |
-
# Sublime Text
|
| 73 |
-
*.sublime-project
|
| 74 |
-
*.sublime-workspace
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
# Secrets and Environment Variables
|
| 78 |
-
# -------------------------------------------------------------------
|
| 79 |
-
# NEVER commit secrets or environment variables
|
| 80 |
-
.env
|
| 81 |
-
*.env
|
| 82 |
-
secrets.yaml
|
| 83 |
-
secrets.json
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
# Operating System Files
|
| 87 |
-
# -------------------------------------------------------------------
|
| 88 |
-
# macOS
|
| 89 |
-
.DS_Store
|
| 90 |
-
|
| 91 |
-
# Windows
|
| 92 |
-
Thumbs.db
|
| 93 |
-
desktop.ini
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
# Jupyter Notebook Checkpoints
|
| 97 |
-
# -------------------------------------------------------------------
|
| 98 |
-
.ipynb_checkpoints/
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
# Other
|
| 102 |
-
# -------------------------------------------------------------------
|
| 103 |
-
# Temporary files
|
| 104 |
-
*.tmp
|
| 105 |
-
*.bak
|
| 106 |
-
*.swp
|
| 107 |
-
|
| 108 |
-
.env
|
| 109 |
-
*.env
|
| 110 |
-
secrets.yaml
|
| 111 |
-
secrets.json
|
| 112 |
-
processed_video.mp4
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/Dockerfile
DELETED
|
File without changes
|
huggingface-space/LICENSE
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
MIT License
|
| 2 |
-
|
| 3 |
-
Copyright (c) 2025 ALYYAN
|
| 4 |
-
|
| 5 |
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
-
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
-
in the Software without restriction, including without limitation the rights
|
| 8 |
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
-
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
-
furnished to do so, subject to the following conditions:
|
| 11 |
-
|
| 12 |
-
The above copyright notice and this permission notice shall be included in all
|
| 13 |
-
copies or substantial portions of the Software.
|
| 14 |
-
|
| 15 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
-
SOFTWARE.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/README.md
DELETED
|
@@ -1,53 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Facial Emotion Detector
|
| 3 |
-
emoji: 🎭
|
| 4 |
-
colorFrom: purple
|
| 5 |
-
colorTo: indigo
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: "3.50.2"
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
-
---
|
| 11 |
-
|
| 12 |
-
# 🎭 End-to-End Facial Emotion Recognition
|
| 13 |
-
|
| 14 |
-
<!-- Replace with a link to your final app screenshot -->
|
| 15 |
-
|
| 16 |
-
This repository contains a complete, end-to-end MLOps pipeline and a production-ready web application for real-time facial emotion recognition. The project leverages a state-of-the-art Vision Transformer model and is deployed as a user-friendly Gradio application on Hugging Face Spaces.
|
| 17 |
-
|
| 18 |
-
**Live Demo:** [🚀 Click here to try the application on Hugging Face Spaces!](https://huggingface.co/spaces/ALYYAN/Emotion-Recognition) <!-- Replace with your HF Space URL -->
|
| 19 |
-
|
| 20 |
-
---
|
| 21 |
-
|
| 22 |
-
## ✨ Features
|
| 23 |
-
|
| 24 |
-
- **Real-time Emotion Detection:** Analyzes your webcam feed to predict emotions in real-time.
|
| 25 |
-
- **High Accuracy:** Powered by a pre-trained Swin Transformer model fine-tuned on the massive AffectNet dataset for superior performance on "in the wild" faces.
|
| 26 |
-
- **Static Image & Video Analysis:** Upload your own images or videos for emotion prediction.
|
| 27 |
-
- **Polished UI:** A professional and responsive user interface with an animated background, built with Gradio.
|
| 28 |
-
- **Reproducible MLOps Pipeline:** The entire model training and data processing workflow is managed by DVC, ensuring 100% reproducibility.
|
| 29 |
-
- **Containerized for Deployment:** The application is packaged with Docker for easy and consistent deployment anywhere.
|
| 30 |
-
|
| 31 |
-
## 🛠️ Tech Stack
|
| 32 |
-
|
| 33 |
-
- **Model:** Swin Transformer (`PangPang/affectnet-swin-tiny-patch4-window7-224`)
|
| 34 |
-
- **ML/Ops:** Python, TensorFlow/Keras, DVC, MLflow, Hugging Face `transformers`
|
| 35 |
-
- **Backend & UI:** Gradio
|
| 36 |
-
- **Face Detection:** MTCNN
|
| 37 |
-
- **Deployment:** Hugging Face Spaces, Docker
|
| 38 |
-
|
| 39 |
-
## 🚀 Getting Started
|
| 40 |
-
|
| 41 |
-
Follow these steps to run the project locally.
|
| 42 |
-
|
| 43 |
-
### Prerequisites
|
| 44 |
-
|
| 45 |
-
- Python 3.10+
|
| 46 |
-
- Git and Git LFS ([installation guide](https://git-lfs.github.com))
|
| 47 |
-
- An NVIDIA GPU with CUDA drivers is recommended for the training pipeline, but the deployed app runs on CPU.
|
| 48 |
-
|
| 49 |
-
### 1. Clone the Repository
|
| 50 |
-
|
| 51 |
-
```bash
|
| 52 |
-
git clone https://github.com/YOUR-USERNAME/Emotion-Recognition-MLOps.git
|
| 53 |
-
cd Emotion-Recognition-MLOps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/app.py
DELETED
|
@@ -1,181 +0,0 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import os
|
| 3 |
-
import cv2
|
| 4 |
-
import time
|
| 5 |
-
|
| 6 |
-
# Ensure the correct predictor class is imported
|
| 7 |
-
from src.EmotionRecognition.pipeline.hf_predictor import HFPredictor
|
| 8 |
-
|
| 9 |
-
# --- INITIALIZE THE MODEL ---
|
| 10 |
-
print("[INFO] Initializing predictor...")
|
| 11 |
-
try:
|
| 12 |
-
predictor = HFPredictor()
|
| 13 |
-
print("[INFO] Predictor initialized successfully.")
|
| 14 |
-
except Exception as e:
|
| 15 |
-
predictor = None
|
| 16 |
-
print(f"[FATAL ERROR] Failed to initialize predictor: {e}")
|
| 17 |
-
|
| 18 |
-
# --- UI CONTENT & STYLING ---
|
| 19 |
-
CSS = """
|
| 20 |
-
/* Animated Gradient Background */
|
| 21 |
-
body {
|
| 22 |
-
background: linear-gradient(-45deg, #0b0f19, #131a2d, #2a2a72, #522a72);
|
| 23 |
-
background-size: 400% 400%;
|
| 24 |
-
animation: gradient 15s ease infinite;
|
| 25 |
-
}
|
| 26 |
-
@keyframes gradient { 0% { background-position: 0% 50%; } 50% { background-position: 100% 50%; } 100% { background-position: 0% 50%; } }
|
| 27 |
-
|
| 28 |
-
/* General Layout & Typography */
|
| 29 |
-
.gradio-container { max-width: 1320px !important; margin: auto !important; }
|
| 30 |
-
#title { text-align: center; font-size: 3rem !important; font-weight: 700; color: #FFF; margin-bottom: 0.5rem; }
|
| 31 |
-
#subtitle { text-align: center; color: #bebebe; margin-top: 0; margin-bottom: 40px; font-size: 1.2rem; font-weight: 300; }
|
| 32 |
-
.gr-button { font-weight: bold !important; }
|
| 33 |
-
|
| 34 |
-
/* Main Content Card */
|
| 35 |
-
#main-card {
|
| 36 |
-
background: rgba(22, 22, 34, 0.65);
|
| 37 |
-
border-radius: 16px;
|
| 38 |
-
box-shadow: 0 8px 32px 0 rgba(0, 0, 0, 0.37);
|
| 39 |
-
backdrop-filter: blur(12px); -webkit-backdrop-filter: blur(12px);
|
| 40 |
-
border: 1px solid rgba(255, 255, 255, 0.18);
|
| 41 |
-
padding: 1rem;
|
| 42 |
-
}
|
| 43 |
-
|
| 44 |
-
/* Prediction Bar Styling */
|
| 45 |
-
#predictions-column { background-color: transparent !important; padding: 1.5rem; }
|
| 46 |
-
#predictions-column > .gr-label { display: none; }
|
| 47 |
-
.prediction-list { list-style-type: none; padding: 0; margin-top: 1.5rem; }
|
| 48 |
-
.prediction-list li { display: flex; align-items: center; margin-bottom: 12px; font-size: 1.1rem; }
|
| 49 |
-
.prediction-list .label { width: 100px; text-transform: capitalize; color: #e0e0e0; }
|
| 50 |
-
.prediction-list .bar-container { flex-grow: 1; height: 24px; background-color: rgba(255,255,255,0.1); border-radius: 12px; margin: 0 15px; overflow: hidden; }
|
| 51 |
-
.prediction-list .bar { height: 100%; background: linear-gradient(90deg, #8A2BE2, #C71585); border-radius: 12px; transition: width: 0.1s linear; }
|
| 52 |
-
.prediction-list .percent { width: 60px; text-align: right; font-weight: bold; color: #FFF; }
|
| 53 |
-
footer { display: none !important; }
|
| 54 |
-
"""
|
| 55 |
-
|
| 56 |
-
ABOUT_MARKDOWN = """
|
| 57 |
-
## 🚀 About This Project
|
| 58 |
-
|
| 59 |
-
This application is the culmination of a complete, end-to-end MLOps project, demonstrating the full lifecycle from research and experimentation to a final, deployed, state-of-the-art solution.
|
| 60 |
-
|
| 61 |
-
**💻 [View Project on GitHub](https://github.com/YOUR-USERNAME/Emotion-Recognition-MLOps)** <!--- REPLACE WITH YOUR GITHUB REPO LINK --->
|
| 62 |
-
|
| 63 |
-
---
|
| 64 |
-
|
| 65 |
-
### Key Technical Features:
|
| 66 |
-
|
| 67 |
-
* **State-of-the-Art AI Model:** Utilizes a **Swin Transformer**, a powerful Vision Transformer (ViT) architecture, pre-trained on the massive **AffectNet** dataset. This ensures high accuracy and robust generalization to real-world, "in the wild" facial expressions.
|
| 68 |
-
* **Reproducible MLOps Pipeline:** The original model training and data processing workflows were built using **DVC (Data Version Control)**, ensuring that every experiment is versioned and reproducible.
|
| 69 |
-
* **Full-Stack & Deployment:** The application architecture evolved from a Python-only script to a decoupled **FastAPI backend** and a **React frontend**, and was ultimately deployed as this streamlined and robust **Gradio** application.
|
| 70 |
-
* **Containerized & Automated:** The entire application is packaged with **Docker** and is set up for **CI/CD with GitHub Actions**, enabling automated testing and deployment to cloud platforms like Hugging Face Spaces.
|
| 71 |
-
|
| 72 |
-
---
|
| 73 |
-
|
| 74 |
-
### 🛠️ Architecture & Tech Stack
|
| 75 |
-
|
| 76 |
-
* **Machine Learning & CV:** Python, PyTorch, Hugging Face `transformers`, MTCNN, OpenCV
|
| 77 |
-
* **MLOps & DevOps:** DVC, GitHub Actions, Docker, Git LFS
|
| 78 |
-
* **Application & UI:** Gradio
|
| 79 |
-
|
| 80 |
-
"""
|
| 81 |
-
|
| 82 |
-
# --- BACKEND LOGIC ---
|
| 83 |
-
|
| 84 |
-
def create_prediction_html(probabilities):
|
| 85 |
-
"""Generates clean HTML for the prediction bars."""
|
| 86 |
-
if not probabilities:
|
| 87 |
-
return "<div style='padding: 2rem; text-align: center; color: #999;'>Waiting for prediction...</div>"
|
| 88 |
-
html = "<ul class='prediction-list'>"
|
| 89 |
-
sorted_preds = sorted(probabilities.items(), key=lambda item: item[1], reverse=True)
|
| 90 |
-
for emotion, prob in sorted_preds:
|
| 91 |
-
html += f"""
|
| 92 |
-
<li>
|
| 93 |
-
<strong class='label'>{emotion}</strong>
|
| 94 |
-
<div class='bar-container'><div class='bar' style='width: {prob*100:.1f}%;'></div></div>
|
| 95 |
-
<span class='percent'>{(prob*100):.1f}%</span>
|
| 96 |
-
</li>
|
| 97 |
-
"""
|
| 98 |
-
html += "</ul>"
|
| 99 |
-
return html
|
| 100 |
-
|
| 101 |
-
def unified_prediction_function(frame):
|
| 102 |
-
"""A single, robust function that takes any frame (from webcam or upload) and returns the annotated frame and the prediction HTML."""
|
| 103 |
-
if frame is None:
|
| 104 |
-
return None, create_prediction_html({})
|
| 105 |
-
|
| 106 |
-
# The predictor class handles all annotation and prediction logic
|
| 107 |
-
annotated_frame, probabilities = predictor.process_frame(frame)
|
| 108 |
-
|
| 109 |
-
return annotated_frame, create_prediction_html(probabilities)
|
| 110 |
-
|
| 111 |
-
def process_video(video_path, progress=gr.Progress(track_tqdm=True)):
|
| 112 |
-
"""Processes an uploaded video file frame-by-frame."""
|
| 113 |
-
if video_path is None:
|
| 114 |
-
return None
|
| 115 |
-
try:
|
| 116 |
-
cap = cv2.VideoCapture(video_path)
|
| 117 |
-
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 118 |
-
output_path = "processed_video.mp4"
|
| 119 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 120 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 121 |
-
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 122 |
-
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 123 |
-
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
| 124 |
-
for _ in progress.tqdm(range(frame_count), desc="Processing Video"):
|
| 125 |
-
ret, frame = cap.read()
|
| 126 |
-
if not ret: break
|
| 127 |
-
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 128 |
-
annotated_frame, _ = predictor.process_frame(frame_rgb)
|
| 129 |
-
if annotated_frame is not None:
|
| 130 |
-
out.write(cv2.cvtColor(annotated_frame, cv2.COLOR_RGB2BGR))
|
| 131 |
-
cap.release()
|
| 132 |
-
out.release()
|
| 133 |
-
return output_path
|
| 134 |
-
except Exception as e:
|
| 135 |
-
print(f"[ERROR] Video processing failed: {e}")
|
| 136 |
-
return None
|
| 137 |
-
|
| 138 |
-
# --- GRADIO UI ---
|
| 139 |
-
with gr.Blocks(css=CSS, theme=gr.themes.Base()) as demo:
|
| 140 |
-
gr.Markdown("# Facial Emotion Detector", elem_id="title")
|
| 141 |
-
gr.Markdown("A real-time AI application powered by Vision Transformers", elem_id="subtitle")
|
| 142 |
-
|
| 143 |
-
with gr.Box(elem_id="main-card"):
|
| 144 |
-
with gr.Tabs():
|
| 145 |
-
with gr.TabItem("Live Detection"):
|
| 146 |
-
with gr.Row(equal_height=False):
|
| 147 |
-
with gr.Column(scale=3):
|
| 148 |
-
# The single, correct component for a live webcam feed.
|
| 149 |
-
live_feed = gr.Image(source="webcam", streaming=True, type="numpy", label="Live Feed", height=550, mirror_webcam=True)
|
| 150 |
-
with gr.Column(scale=2, elem_id="predictions-column"):
|
| 151 |
-
gr.Markdown("### Emotion Probabilities")
|
| 152 |
-
live_predictions = gr.HTML()
|
| 153 |
-
|
| 154 |
-
with gr.TabItem("Upload Image"):
|
| 155 |
-
with gr.Row(equal_height=False):
|
| 156 |
-
with gr.Column(scale=3):
|
| 157 |
-
image_input = gr.Image(type="numpy", label="Upload an Image", height=550)
|
| 158 |
-
with gr.Column(scale=2, elem_id="predictions-column"):
|
| 159 |
-
image_predictions = gr.HTML()
|
| 160 |
-
image_button = gr.Button("Analyze Image", variant="primary")
|
| 161 |
-
|
| 162 |
-
with gr.TabItem("Upload Video"):
|
| 163 |
-
with gr.Row(equal_height=False):
|
| 164 |
-
video_input = gr.Video(label="Upload a Video File")
|
| 165 |
-
video_output = gr.Video(label="Processed Video")
|
| 166 |
-
video_button = gr.Button("Analyze Video", variant="primary")
|
| 167 |
-
|
| 168 |
-
with gr.TabItem("About"):
|
| 169 |
-
gr.Markdown(ABOUT_MARKDOWN)
|
| 170 |
-
|
| 171 |
-
# --- EVENT LISTENERS ---
|
| 172 |
-
live_feed.stream(fn=unified_prediction_function, inputs=live_feed, outputs=[live_feed, live_predictions])
|
| 173 |
-
image_button.click(fn=unified_prediction_function, inputs=[image_input], outputs=[image_input, image_predictions])
|
| 174 |
-
video_button.click(fn=process_video, inputs=[video_input], outputs=[video_output])
|
| 175 |
-
|
| 176 |
-
# --- LAUNCH THE APP ---
|
| 177 |
-
if predictor:
|
| 178 |
-
# Enabling the queue is essential for the video processing progress bar.
|
| 179 |
-
demo.queue().launch(debug=True)
|
| 180 |
-
else:
|
| 181 |
-
print("\n[FATAL ERROR] Could not start the application.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/config/config.yaml
DELETED
|
@@ -1,25 +0,0 @@
|
|
| 1 |
-
artifacts_root: artifacts
|
| 2 |
-
|
| 3 |
-
data_preparation: # This is our Stage 1
|
| 4 |
-
root_dir: artifacts/data_preparation
|
| 5 |
-
# Inputs from raw data
|
| 6 |
-
ferplus_pixels_csv: data/raw/fer2013.csv
|
| 7 |
-
ferplus_labels_csv: data/raw/fer2013new.csv
|
| 8 |
-
ckplus_dir: data/raw/CK+48
|
| 9 |
-
# Outputs
|
| 10 |
-
combined_train_dir: artifacts/data_preparation/train
|
| 11 |
-
ferplus_test_dir: artifacts/data_preparation/test
|
| 12 |
-
|
| 13 |
-
model_trainer:
|
| 14 |
-
root_dir: artifacts/training
|
| 15 |
-
# The trainer now takes its input directly from the preparation stage
|
| 16 |
-
train_data_dir: artifacts/data_preparation/train
|
| 17 |
-
test_data_dir: artifacts/data_preparation/test
|
| 18 |
-
trained_model_path: artifacts/training/model.keras
|
| 19 |
-
|
| 20 |
-
model_evaluation:
|
| 21 |
-
root_dir: artifacts/evaluation
|
| 22 |
-
test_data_dir: artifacts/data_preparation/test
|
| 23 |
-
trained_model_path: artifacts/training/model.keras
|
| 24 |
-
metrics_file_name: artifacts/evaluation/metrics.json
|
| 25 |
-
mlflow_uri: https://dagshub.com/AlyyanAhmed21/Emotion-Recognition-MLOps.mlflow # Example for DagsHub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/dvc.lock
DELETED
|
@@ -1,203 +0,0 @@
|
|
| 1 |
-
schema: '2.0'
|
| 2 |
-
stages:
|
| 3 |
-
data_validation:
|
| 4 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_data_validation.py
|
| 5 |
-
deps:
|
| 6 |
-
- path: artifacts/data_ingestion
|
| 7 |
-
hash: md5
|
| 8 |
-
md5: 9208f64defb6697b78bab62e943d955d.dir
|
| 9 |
-
size: 302675528
|
| 10 |
-
nfiles: 2
|
| 11 |
-
- path: src/EmotionRecognition/config/configuration.py
|
| 12 |
-
hash: md5
|
| 13 |
-
md5: dacf4230e18681185b786aa280cdec5e
|
| 14 |
-
size: 4275
|
| 15 |
-
- path: src/EmotionRecognition/pipeline/stage_02_data_validation.py
|
| 16 |
-
hash: md5
|
| 17 |
-
md5: 18a3d78c83dc5b278e14523077035e41
|
| 18 |
-
size: 1141
|
| 19 |
-
outs:
|
| 20 |
-
- path: artifacts/data_validation/status.txt
|
| 21 |
-
hash: md5
|
| 22 |
-
md5: 86e6a2f694c57a675b3e2da6b95ff9ba
|
| 23 |
-
size: 23
|
| 24 |
-
data_preparation:
|
| 25 |
-
cmd: python src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 26 |
-
deps:
|
| 27 |
-
- path: data/raw/CK+48
|
| 28 |
-
hash: md5
|
| 29 |
-
md5: a1559eddfd0d86b541e5df18b4b8205e.dir
|
| 30 |
-
size: 1715162
|
| 31 |
-
nfiles: 981
|
| 32 |
-
- path: data/raw/fer2013.csv
|
| 33 |
-
hash: md5
|
| 34 |
-
md5: f8428a1edbd21e88f42c73edd2a14f95
|
| 35 |
-
size: 301072766
|
| 36 |
-
- path: data/raw/fer2013new.csv
|
| 37 |
-
hash: md5
|
| 38 |
-
md5: 413eba86d6e454536b99705b8c7fc5c5
|
| 39 |
-
size: 1602762
|
| 40 |
-
- path: src/EmotionRecognition/components/data_preparation.py
|
| 41 |
-
hash: md5
|
| 42 |
-
md5: 228140227aaedb9f07b4c00462f267c6
|
| 43 |
-
size: 5776
|
| 44 |
-
- path: src/EmotionRecognition/config/configuration.py
|
| 45 |
-
hash: md5
|
| 46 |
-
md5: 8786c8d41e2e50a49b4ca6d5bf59ad44
|
| 47 |
-
size: 2910
|
| 48 |
-
- path: src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 49 |
-
hash: md5
|
| 50 |
-
md5: 1a324b8f1cf01e4e60e0a8529b23b577
|
| 51 |
-
size: 1110
|
| 52 |
-
params:
|
| 53 |
-
params.yaml:
|
| 54 |
-
DATA_PARAMS.CLASSES:
|
| 55 |
-
- angry
|
| 56 |
-
- disgust
|
| 57 |
-
- fear
|
| 58 |
-
- happy
|
| 59 |
-
- neutral
|
| 60 |
-
- sad
|
| 61 |
-
- surprise
|
| 62 |
-
outs:
|
| 63 |
-
- path: artifacts/data_preparation/test
|
| 64 |
-
hash: md5
|
| 65 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 66 |
-
size: 6249935
|
| 67 |
-
nfiles: 3589
|
| 68 |
-
- path: artifacts/data_preparation/train
|
| 69 |
-
hash: md5
|
| 70 |
-
md5: 750c0a305d28467341396ab591ed2731.dir
|
| 71 |
-
size: 51232879
|
| 72 |
-
nfiles: 29471
|
| 73 |
-
model_training:
|
| 74 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 75 |
-
deps:
|
| 76 |
-
- path: artifacts/data_preparation/test
|
| 77 |
-
hash: md5
|
| 78 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 79 |
-
size: 6249935
|
| 80 |
-
nfiles: 3589
|
| 81 |
-
- path: artifacts/data_preparation/train
|
| 82 |
-
hash: md5
|
| 83 |
-
md5: 750c0a305d28467341396ab591ed2731.dir
|
| 84 |
-
size: 51232879
|
| 85 |
-
nfiles: 29471
|
| 86 |
-
- path: src/EmotionRecognition/components/model_trainer.py
|
| 87 |
-
hash: md5
|
| 88 |
-
md5: 5192acef195c9a9b03a88490476ead1c
|
| 89 |
-
size: 3916
|
| 90 |
-
- path: src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 91 |
-
hash: md5
|
| 92 |
-
md5: 2ee36d6e30a3a262e8327a26e71a37e9
|
| 93 |
-
size: 1076
|
| 94 |
-
params:
|
| 95 |
-
params.yaml:
|
| 96 |
-
DATA_PARAMS:
|
| 97 |
-
IMAGE_SIZE:
|
| 98 |
-
- 224
|
| 99 |
-
- 224
|
| 100 |
-
CHANNELS: 3
|
| 101 |
-
BATCH_SIZE: 32
|
| 102 |
-
CLASSES:
|
| 103 |
-
- angry
|
| 104 |
-
- disgust
|
| 105 |
-
- fear
|
| 106 |
-
- happy
|
| 107 |
-
- neutral
|
| 108 |
-
- sad
|
| 109 |
-
- surprise
|
| 110 |
-
NUM_CLASSES: 7
|
| 111 |
-
TRAINING_PARAMS:
|
| 112 |
-
EPOCHS: 50
|
| 113 |
-
LEARNING_RATE: 0.0001
|
| 114 |
-
OPTIMIZER: Adam
|
| 115 |
-
LOSS_FUNCTION: CategoricalCrossentropy
|
| 116 |
-
METRICS:
|
| 117 |
-
- accuracy
|
| 118 |
-
DROPOUT_RATE: 0.5
|
| 119 |
-
outs:
|
| 120 |
-
- path: artifacts/training/model.keras
|
| 121 |
-
hash: md5
|
| 122 |
-
md5: 2c632cb4cbf3f2944145a8da1927f2cf
|
| 123 |
-
size: 11331400
|
| 124 |
-
model_evaluation:
|
| 125 |
-
cmd: python src/EmotionRecognition/pipeline/stage_03_model_evaluation.py
|
| 126 |
-
deps:
|
| 127 |
-
- path: artifacts/data_preparation/test
|
| 128 |
-
hash: md5
|
| 129 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 130 |
-
size: 6249935
|
| 131 |
-
nfiles: 3589
|
| 132 |
-
- path: artifacts/training/model.keras
|
| 133 |
-
hash: md5
|
| 134 |
-
md5: 2c632cb4cbf3f2944145a8da1927f2cf
|
| 135 |
-
size: 11331400
|
| 136 |
-
- path: src/EmotionRecognition/components/model_evaluation.py
|
| 137 |
-
hash: md5
|
| 138 |
-
md5: 8b327667db406dd7c6489937747b8537
|
| 139 |
-
size: 2429
|
| 140 |
-
params:
|
| 141 |
-
params.yaml:
|
| 142 |
-
DATA_PARAMS:
|
| 143 |
-
IMAGE_SIZE:
|
| 144 |
-
- 224
|
| 145 |
-
- 224
|
| 146 |
-
CHANNELS: 3
|
| 147 |
-
BATCH_SIZE: 32
|
| 148 |
-
CLASSES:
|
| 149 |
-
- angry
|
| 150 |
-
- disgust
|
| 151 |
-
- fear
|
| 152 |
-
- happy
|
| 153 |
-
- neutral
|
| 154 |
-
- sad
|
| 155 |
-
- surprise
|
| 156 |
-
NUM_CLASSES: 7
|
| 157 |
-
outs:
|
| 158 |
-
- path: artifacts/evaluation/metrics.json
|
| 159 |
-
hash: md5
|
| 160 |
-
md5: 3e8f938b34095f56c597110c5d86064e
|
| 161 |
-
size: 72
|
| 162 |
-
data_preprocessing:
|
| 163 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_data_preprocessing.py
|
| 164 |
-
deps:
|
| 165 |
-
- path: artifacts/data_preparation/test
|
| 166 |
-
hash: md5
|
| 167 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 168 |
-
size: 6249935
|
| 169 |
-
nfiles: 3589
|
| 170 |
-
- path: artifacts/data_preparation/train
|
| 171 |
-
hash: md5
|
| 172 |
-
md5: 750c0a305d28467341396ab591ed2731.dir
|
| 173 |
-
size: 51232879
|
| 174 |
-
nfiles: 29471
|
| 175 |
-
- path: src/EmotionRecognition/components/data_preprocessing.py
|
| 176 |
-
hash: md5
|
| 177 |
-
md5: bc85964fdf86afb289051c2498037eb8
|
| 178 |
-
size: 3903
|
| 179 |
-
- path: src/EmotionRecognition/pipeline/stage_02_data_preprocessing.py
|
| 180 |
-
hash: md5
|
| 181 |
-
md5: 5631296a6b7bace5c2f6979eda5ca081
|
| 182 |
-
size: 971
|
| 183 |
-
params:
|
| 184 |
-
params.yaml:
|
| 185 |
-
DATA_PARAMS.CLASSES:
|
| 186 |
-
- angry
|
| 187 |
-
- disgust
|
| 188 |
-
- fear
|
| 189 |
-
- happy
|
| 190 |
-
- neutral
|
| 191 |
-
- sad
|
| 192 |
-
- surprise
|
| 193 |
-
outs:
|
| 194 |
-
- path: artifacts/data_preprocessing/test
|
| 195 |
-
hash: md5
|
| 196 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 197 |
-
size: 6249935
|
| 198 |
-
nfiles: 3589
|
| 199 |
-
- path: artifacts/data_preprocessing/train
|
| 200 |
-
hash: md5
|
| 201 |
-
md5: 3dc8382a4774d1a1f1d1e5dfe3ca4c1b.dir
|
| 202 |
-
size: 18389122
|
| 203 |
-
nfiles: 10500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/dvc.yaml
DELETED
|
@@ -1,39 +0,0 @@
|
|
| 1 |
-
stages:
|
| 2 |
-
data_preparation:
|
| 3 |
-
cmd: python src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 4 |
-
deps:
|
| 5 |
-
- src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 6 |
-
- src/EmotionRecognition/components/data_preparation.py
|
| 7 |
-
- data/raw/fer2013.csv
|
| 8 |
-
- data/raw/fer2013new.csv
|
| 9 |
-
- data/raw/CK+48
|
| 10 |
-
params:
|
| 11 |
-
- DATA_PARAMS.CLASSES
|
| 12 |
-
outs:
|
| 13 |
-
- artifacts/data_preparation/train
|
| 14 |
-
- artifacts/data_preparation/test
|
| 15 |
-
|
| 16 |
-
model_training:
|
| 17 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 18 |
-
deps:
|
| 19 |
-
- src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 20 |
-
- src/EmotionRecognition/components/model_trainer.py
|
| 21 |
-
- artifacts/data_preparation/train
|
| 22 |
-
- artifacts/data_preparation/test
|
| 23 |
-
params:
|
| 24 |
-
- DATA_PARAMS
|
| 25 |
-
- TRAINING_PARAMS
|
| 26 |
-
outs:
|
| 27 |
-
- artifacts/training/model.keras
|
| 28 |
-
|
| 29 |
-
model_evaluation:
|
| 30 |
-
cmd: python src/EmotionRecognition/pipeline/stage_03_model_evaluation.py
|
| 31 |
-
deps:
|
| 32 |
-
- src/EmotionRecognition/components/model_evaluation.py
|
| 33 |
-
- artifacts/data_preparation/test
|
| 34 |
-
- artifacts/training/model.keras
|
| 35 |
-
params:
|
| 36 |
-
- DATA_PARAMS
|
| 37 |
-
metrics:
|
| 38 |
-
- artifacts/evaluation/metrics.json:
|
| 39 |
-
cache: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/gpuCheck.py
DELETED
|
@@ -1,42 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import tensorflow as tf
|
| 3 |
-
|
| 4 |
-
# --- THE WORKAROUND ---
|
| 5 |
-
# Define the full path to the CUDA bin directory
|
| 6 |
-
cuda_bin_path = r"E:\Nvidia\CUDA\v11.2\bin"
|
| 7 |
-
|
| 8 |
-
# Add this path to the OS environment's DLL search path
|
| 9 |
-
# This MUST be done BEFORE importing tensorflow
|
| 10 |
-
try:
|
| 11 |
-
os.add_dll_directory(cuda_bin_path)
|
| 12 |
-
print(f"Successfully added {cuda_bin_path} to DLL search path.")
|
| 13 |
-
except AttributeError:
|
| 14 |
-
# This function was added in Python 3.8. For older versions, you might need
|
| 15 |
-
# to add the path to the system PATH environment variable manually.
|
| 16 |
-
print("os.add_dll_directory not available. Ensure CUDA bin is in the system PATH.")
|
| 17 |
-
# --- END WORKAROUND ---
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
print(f"TensorFlow Version: {tf.__version__}")
|
| 21 |
-
print("-" * 30)
|
| 22 |
-
|
| 23 |
-
# Check for GPU devices
|
| 24 |
-
gpu_devices = tf.config.list_physical_devices('GPU')
|
| 25 |
-
print(f"Num GPUs Available: {len(gpu_devices)}")
|
| 26 |
-
print("-" * 30)
|
| 27 |
-
|
| 28 |
-
if gpu_devices:
|
| 29 |
-
print("GPU Device Details:")
|
| 30 |
-
for gpu in gpu_devices:
|
| 31 |
-
tf.config.experimental.set_memory_growth(gpu, True)
|
| 32 |
-
print(f"- {gpu.name}, Type: {gpu.device_type}")
|
| 33 |
-
print("\nSUCCESS: TensorFlow is configured to use the GPU!")
|
| 34 |
-
else:
|
| 35 |
-
print("\nFAILURE: TensorFlow did not detect a GPU.")
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
import tensorflow as tf
|
| 39 |
-
from tensorflow.python.client import device_lib
|
| 40 |
-
|
| 41 |
-
print("Verbose device list:")
|
| 42 |
-
print(device_lib.list_local_devices())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/.dvc/.gitignore
DELETED
|
@@ -1,2 +0,0 @@
|
|
| 1 |
-
/config.local
|
| 2 |
-
/tmp
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/.dvc/config
DELETED
|
File without changes
|
huggingface-space/huggingface-space/.dvcignore
DELETED
|
File without changes
|
huggingface-space/huggingface-space/.gitattributes
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
sota_model/model.safetensors filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
huggingface-space/huggingface-space/.gitignore
DELETED
|
@@ -1,112 +0,0 @@
|
|
| 1 |
-
# MLOps & Data Science Artifacts
|
| 2 |
-
# -------------------------------------------------------------------
|
| 3 |
-
# Ignore all data, models, and artifacts. These should be tracked by DVC.
|
| 4 |
-
/artifacts/
|
| 5 |
-
/data/
|
| 6 |
-
/sota_model/
|
| 7 |
-
# Ignore the DVC local cache. This is where the actual data files are stored.
|
| 8 |
-
.dvc/cache
|
| 9 |
-
|
| 10 |
-
# Ignore MLflow experiment tracking output
|
| 11 |
-
/mlruns/
|
| 12 |
-
|
| 13 |
-
# Ignore logs
|
| 14 |
-
/logs/
|
| 15 |
-
*.log
|
| 16 |
-
|
| 17 |
-
# Ignore common model file extensions, just in case
|
| 18 |
-
*.h5
|
| 19 |
-
*.pkl
|
| 20 |
-
*.model
|
| 21 |
-
*.onnx
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
# Python Virtual Environments
|
| 25 |
-
# -------------------------------------------------------------------
|
| 26 |
-
/venv/
|
| 27 |
-
/myenv/
|
| 28 |
-
/.venv/
|
| 29 |
-
/env/
|
| 30 |
-
/ENV/
|
| 31 |
-
*/.venv/
|
| 32 |
-
*/venv/
|
| 33 |
-
*/myenv/
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
# Python Byte-code and Caches
|
| 37 |
-
# -------------------------------------------------------------------
|
| 38 |
-
__pycache__/
|
| 39 |
-
*.py[cod]
|
| 40 |
-
*$py.class
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
# Python Packaging & Distribution
|
| 44 |
-
# -------------------------------------------------------------------
|
| 45 |
-
build/
|
| 46 |
-
develop-eggs/
|
| 47 |
-
dist/
|
| 48 |
-
downloads/
|
| 49 |
-
eggs/
|
| 50 |
-
.eggs/
|
| 51 |
-
lib/
|
| 52 |
-
lib64/
|
| 53 |
-
parts/
|
| 54 |
-
sdist/
|
| 55 |
-
var/
|
| 56 |
-
wheels/
|
| 57 |
-
*.egg-info/
|
| 58 |
-
.installed.cfg
|
| 59 |
-
*.egg
|
| 60 |
-
MANIFEST
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
# IDE and Editor Configuration
|
| 64 |
-
# -------------------------------------------------------------------
|
| 65 |
-
# PyCharm
|
| 66 |
-
.idea/
|
| 67 |
-
|
| 68 |
-
# Visual Studio Code (allow sharing of recommended extensions)
|
| 69 |
-
.vscode/*
|
| 70 |
-
!.vscode/extensions.json
|
| 71 |
-
|
| 72 |
-
# Sublime Text
|
| 73 |
-
*.sublime-project
|
| 74 |
-
*.sublime-workspace
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
# Secrets and Environment Variables
|
| 78 |
-
# -------------------------------------------------------------------
|
| 79 |
-
# NEVER commit secrets or environment variables
|
| 80 |
-
.env
|
| 81 |
-
*.env
|
| 82 |
-
secrets.yaml
|
| 83 |
-
secrets.json
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
# Operating System Files
|
| 87 |
-
# -------------------------------------------------------------------
|
| 88 |
-
# macOS
|
| 89 |
-
.DS_Store
|
| 90 |
-
|
| 91 |
-
# Windows
|
| 92 |
-
Thumbs.db
|
| 93 |
-
desktop.ini
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
# Jupyter Notebook Checkpoints
|
| 97 |
-
# -------------------------------------------------------------------
|
| 98 |
-
.ipynb_checkpoints/
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
# Other
|
| 102 |
-
# -------------------------------------------------------------------
|
| 103 |
-
# Temporary files
|
| 104 |
-
*.tmp
|
| 105 |
-
*.bak
|
| 106 |
-
*.swp
|
| 107 |
-
|
| 108 |
-
.env
|
| 109 |
-
*.env
|
| 110 |
-
secrets.yaml
|
| 111 |
-
secrets.json
|
| 112 |
-
processed_video.mp4
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/Dockerfile
DELETED
|
File without changes
|
huggingface-space/huggingface-space/LICENSE
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
MIT License
|
| 2 |
-
|
| 3 |
-
Copyright (c) 2025 ALYYAN
|
| 4 |
-
|
| 5 |
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
-
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
-
in the Software without restriction, including without limitation the rights
|
| 8 |
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
-
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
-
furnished to do so, subject to the following conditions:
|
| 11 |
-
|
| 12 |
-
The above copyright notice and this permission notice shall be included in all
|
| 13 |
-
copies or substantial portions of the Software.
|
| 14 |
-
|
| 15 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
-
SOFTWARE.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/README.md
DELETED
|
@@ -1,42 +0,0 @@
|
|
| 1 |
-
# 🎭 End-to-End Facial Emotion Recognition
|
| 2 |
-
|
| 3 |
-
<!-- Replace with a link to your final app screenshot -->
|
| 4 |
-
|
| 5 |
-
This repository contains a complete, end-to-end MLOps pipeline and a production-ready web application for real-time facial emotion recognition. The project leverages a state-of-the-art Vision Transformer model and is deployed as a user-friendly Gradio application on Hugging Face Spaces.
|
| 6 |
-
|
| 7 |
-
**Live Demo:** [🚀 Click here to try the application on Hugging Face Spaces!](https://huggingface.co/spaces/ALYYAN/Emotion-Recognition) <!-- Replace with your HF Space URL -->
|
| 8 |
-
|
| 9 |
-
---
|
| 10 |
-
|
| 11 |
-
## ✨ Features
|
| 12 |
-
|
| 13 |
-
- **Real-time Emotion Detection:** Analyzes your webcam feed to predict emotions in real-time.
|
| 14 |
-
- **High Accuracy:** Powered by a pre-trained Swin Transformer model fine-tuned on the massive AffectNet dataset for superior performance on "in the wild" faces.
|
| 15 |
-
- **Static Image & Video Analysis:** Upload your own images or videos for emotion prediction.
|
| 16 |
-
- **Polished UI:** A professional and responsive user interface with an animated background, built with Gradio.
|
| 17 |
-
- **Reproducible MLOps Pipeline:** The entire model training and data processing workflow is managed by DVC, ensuring 100% reproducibility.
|
| 18 |
-
- **Containerized for Deployment:** The application is packaged with Docker for easy and consistent deployment anywhere.
|
| 19 |
-
|
| 20 |
-
## 🛠️ Tech Stack
|
| 21 |
-
|
| 22 |
-
- **Model:** Swin Transformer (`PangPang/affectnet-swin-tiny-patch4-window7-224`)
|
| 23 |
-
- **ML/Ops:** Python, TensorFlow/Keras, DVC, MLflow, Hugging Face `transformers`
|
| 24 |
-
- **Backend & UI:** Gradio
|
| 25 |
-
- **Face Detection:** MTCNN
|
| 26 |
-
- **Deployment:** Hugging Face Spaces, Docker
|
| 27 |
-
|
| 28 |
-
## 🚀 Getting Started
|
| 29 |
-
|
| 30 |
-
Follow these steps to run the project locally.
|
| 31 |
-
|
| 32 |
-
### Prerequisites
|
| 33 |
-
|
| 34 |
-
- Python 3.10+
|
| 35 |
-
- Git and Git LFS ([installation guide](https://git-lfs.github.com))
|
| 36 |
-
- An NVIDIA GPU with CUDA drivers is recommended for the training pipeline, but the deployed app runs on CPU.
|
| 37 |
-
|
| 38 |
-
### 1. Clone the Repository
|
| 39 |
-
|
| 40 |
-
```bash
|
| 41 |
-
git clone https://github.com/YOUR-USERNAME/Emotion-Recognition-MLOps.git
|
| 42 |
-
cd Emotion-Recognition-MLOps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/app.py
DELETED
|
@@ -1,195 +0,0 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import os
|
| 3 |
-
import cv2
|
| 4 |
-
import time
|
| 5 |
-
|
| 6 |
-
from src.EmotionRecognition.pipeline.hf_predictor import HFPredictor
|
| 7 |
-
|
| 8 |
-
# --- INITIALIZE THE MODEL ---
|
| 9 |
-
print("[INFO] Initializing predictor...")
|
| 10 |
-
try:
|
| 11 |
-
predictor = HFPredictor()
|
| 12 |
-
print("[INFO] Predictor initialized successfully.")
|
| 13 |
-
except Exception as e:
|
| 14 |
-
predictor = None
|
| 15 |
-
print(f"[FATAL ERROR] Failed to initialize predictor: {e}")
|
| 16 |
-
|
| 17 |
-
# --- UI CONTENT & STYLING ---
|
| 18 |
-
# In app.py
|
| 19 |
-
|
| 20 |
-
CSS = """
|
| 21 |
-
/* Animated Gradient Background */
|
| 22 |
-
body {
|
| 23 |
-
background: linear-gradient(-45deg, #0b0f19, #131a2d, #2a2a72, #522a72);
|
| 24 |
-
background-size: 400% 400%;
|
| 25 |
-
animation: gradient 15s ease infinite;
|
| 26 |
-
color: #e0e0e0;
|
| 27 |
-
}
|
| 28 |
-
@keyframes gradient {
|
| 29 |
-
0% { background-position: 0% 50%; }
|
| 30 |
-
50% { background-position: 100% 50%; }
|
| 31 |
-
100% { background-position: 0% 50%; }
|
| 32 |
-
}
|
| 33 |
-
|
| 34 |
-
/* General Layout & Typography */
|
| 35 |
-
.gradio-container { max-width: 1320px !important; margin: auto !important; }
|
| 36 |
-
#title { text-align: center; font-size: 3rem !important; font-weight: 700; color: #FFF; margin-bottom: 0.5rem; }
|
| 37 |
-
#subtitle { text-align: center; color: #bebebe; margin-top: 0; margin-bottom: 40px; font-size: 1.2rem; font-weight: 300; }
|
| 38 |
-
.gr-button { font-weight: bold !important; }
|
| 39 |
-
|
| 40 |
-
/* --- NEW: The "Glass Card" effect --- */
|
| 41 |
-
#main-card {
|
| 42 |
-
background: rgba(22, 22, 34, 0.65); /* Semi-transparent dark background */
|
| 43 |
-
border-radius: 16px;
|
| 44 |
-
box-shadow: 0 8px 32px 0 rgba(0, 0, 0, 0.37);
|
| 45 |
-
backdrop-filter: blur(12px); /* The "frosted glass" effect */
|
| 46 |
-
-webkit-backdrop-filter: blur(12px); /* For Safari */
|
| 47 |
-
border: 1px solid rgba(255, 255, 255, 0.18);
|
| 48 |
-
padding: 1rem;
|
| 49 |
-
}
|
| 50 |
-
/* --- END NEW --- */
|
| 51 |
-
|
| 52 |
-
/* Prediction Bar Styling - now inside the card */
|
| 53 |
-
#predictions-column { background-color: transparent !important; border-radius: 12px; padding: 1.5rem; }
|
| 54 |
-
#predictions-column > .gr-label { display: none; }
|
| 55 |
-
.prediction-list { list-style-type: none; padding: 0; margin-top: 0; }
|
| 56 |
-
.prediction-list li { display: flex; align-items: center; margin-bottom: 12px; font-size: 1.1rem; }
|
| 57 |
-
.prediction-list .label { width: 100px; text-transform: capitalize; color: #e0e0e0; }
|
| 58 |
-
.prediction-list .bar-container { flex-grow: 1; height: 24px; background-color: rgba(255,255,255,0.1); border-radius: 12px; margin: 0 15px; overflow: hidden; }
|
| 59 |
-
.prediction-list .bar { height: 100%; background: linear-gradient(90deg, #8A2BE2, #C71585); border-radius: 12px; transition: width 0.2s ease-in-out; }
|
| 60 |
-
.prediction-list .percent { width: 60px; text-align: right; font-weight: bold; color: #FFF; }
|
| 61 |
-
footer { display: none !important; }
|
| 62 |
-
"""
|
| 63 |
-
|
| 64 |
-
ABOUT_MARKDOWN = """
|
| 65 |
-
### Model: Vision Transformer (ViT)
|
| 66 |
-
This application uses a Vision Transformer model, fine-tuned for facial emotion recognition.
|
| 67 |
-
### Dataset
|
| 68 |
-
The model was fine-tuned on the **Emotion Recognition Dataset** from Kaggle, a large, curated collection of labeled facial images. This diverse dataset allows the model to generalize to a wide variety of real-world faces and expressions.
|
| 69 |
-
*Dataset Link:* [https://www.kaggle.com/datasets/sujaykapadnis/emotion-recognition-dataset](https://www.kaggle.com/datasets/sujaykapadnis/emotion-recognition-dataset)
|
| 70 |
-
### MLOps Pipeline
|
| 71 |
-
This entire application, from data processing to training and deployment, was built using a reproducible MLOps pipeline, ensuring consistency and quality at every step.
|
| 72 |
-
"""
|
| 73 |
-
|
| 74 |
-
# --- BACKEND LOGIC ---
|
| 75 |
-
def create_prediction_html(probabilities):
|
| 76 |
-
if not probabilities:
|
| 77 |
-
return "<div style='padding: 2rem; text-align: center; color: #999;'>Waiting for prediction...</div>"
|
| 78 |
-
html = "<ul class='prediction-list'>"
|
| 79 |
-
sorted_preds = sorted(probabilities.items(), key=lambda item: item[1], reverse=True)
|
| 80 |
-
for emotion, prob in sorted_preds:
|
| 81 |
-
html += f"""
|
| 82 |
-
<li>
|
| 83 |
-
<strong class='label'>{emotion}</strong>
|
| 84 |
-
<div class='bar-container'><div class='bar' style='width: {prob*100:.1f}%;'></div></div>
|
| 85 |
-
<span class='percent'>{(prob*100):.1f}%</span>
|
| 86 |
-
</li>
|
| 87 |
-
"""
|
| 88 |
-
html += "</ul>"
|
| 89 |
-
return html
|
| 90 |
-
|
| 91 |
-
def live_detection_stream():
|
| 92 |
-
"""A generator function that runs the live feed loop. This is the definitive fix."""
|
| 93 |
-
cap = cv2.VideoCapture(0)
|
| 94 |
-
if not cap.isOpened():
|
| 95 |
-
print("[ERROR] Cannot open webcam")
|
| 96 |
-
return
|
| 97 |
-
try:
|
| 98 |
-
while True:
|
| 99 |
-
ret, frame = cap.read()
|
| 100 |
-
if not ret:
|
| 101 |
-
time.sleep(0.01)
|
| 102 |
-
continue
|
| 103 |
-
|
| 104 |
-
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 105 |
-
annotated_frame, probabilities = predictor.process_frame(frame_rgb)
|
| 106 |
-
yield annotated_frame, create_prediction_html(probabilities)
|
| 107 |
-
time.sleep(0.05) # Controls FPS. 0.05 = ~20 FPS target. The model inference will be the main bottleneck.
|
| 108 |
-
finally:
|
| 109 |
-
print("[INFO] Live feed stopped. Releasing webcam.")
|
| 110 |
-
cap.release()
|
| 111 |
-
|
| 112 |
-
def process_image(image):
|
| 113 |
-
if image is None: return None, create_prediction_html({})
|
| 114 |
-
annotated_frame, probabilities = predictor.process_frame(image)
|
| 115 |
-
return annotated_frame, create_prediction_html(probabilities)
|
| 116 |
-
|
| 117 |
-
def process_video(video_path, progress=gr.Progress(track_tqdm=True)):
|
| 118 |
-
if video_path is None: return None
|
| 119 |
-
try:
|
| 120 |
-
cap = cv2.VideoCapture(video_path)
|
| 121 |
-
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 122 |
-
output_path = "processed_video.mp4"
|
| 123 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 124 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 125 |
-
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 126 |
-
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 127 |
-
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
| 128 |
-
for _ in progress.tqdm(range(frame_count), desc="Processing Video"):
|
| 129 |
-
ret, frame = cap.read()
|
| 130 |
-
if not ret: break
|
| 131 |
-
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 132 |
-
annotated_frame, _ = predictor.process_frame(frame_rgb)
|
| 133 |
-
if annotated_frame is not None:
|
| 134 |
-
out.write(cv2.cvtColor(annotated_frame, cv2.COLOR_RGB2BGR))
|
| 135 |
-
cap.release()
|
| 136 |
-
out.release()
|
| 137 |
-
return output_path
|
| 138 |
-
except Exception as e:
|
| 139 |
-
print(f"[ERROR] Video processing failed: {e}")
|
| 140 |
-
return None
|
| 141 |
-
|
| 142 |
-
# --- GRADIO UI ---
|
| 143 |
-
with gr.Blocks(css=CSS, theme=gr.themes.Base()) as demo:
|
| 144 |
-
gr.Markdown("# Facial Emotion Detector", elem_id="title")
|
| 145 |
-
gr.Markdown("A real-time AI application powered by Vision Transformers", elem_id="subtitle")
|
| 146 |
-
|
| 147 |
-
# --- NEW: Wrapper for the glass card effect ---
|
| 148 |
-
with gr.Box(elem_id="main-card"):
|
| 149 |
-
with gr.Tabs():
|
| 150 |
-
with gr.TabItem("Live Detection"):
|
| 151 |
-
with gr.Row(equal_height=True):
|
| 152 |
-
with gr.Column(scale=3):
|
| 153 |
-
live_output = gr.Image(label="Live Feed", interactive=False, height=550)
|
| 154 |
-
with gr.Column(scale=2, elem_id="predictions-column"):
|
| 155 |
-
gr.Markdown("### Emotion Probabilities") # Title for the panel
|
| 156 |
-
live_predictions = gr.HTML()
|
| 157 |
-
with gr.Row():
|
| 158 |
-
start_button = gr.Button("Start Webcam", variant="primary", scale=1)
|
| 159 |
-
stop_button = gr.Button("Stop Webcam", variant="secondary", scale=1)
|
| 160 |
-
|
| 161 |
-
stream_state = gr.State("Stop")
|
| 162 |
-
|
| 163 |
-
with gr.TabItem("Upload Image"):
|
| 164 |
-
with gr.Row(equal_height=True):
|
| 165 |
-
with gr.Column(scale=3):
|
| 166 |
-
image_input = gr.Image(type="numpy", label="Upload an Image", height=550)
|
| 167 |
-
with gr.Column(scale=2, elem_id="predictions-column"):
|
| 168 |
-
gr.Markdown("### Emotion Probabilities")
|
| 169 |
-
image_predictions = gr.HTML()
|
| 170 |
-
image_button = gr.Button("Analyze Image", variant="primary")
|
| 171 |
-
|
| 172 |
-
with gr.TabItem("Upload Video"):
|
| 173 |
-
with gr.Row(equal_height=True):
|
| 174 |
-
video_input = gr.Video(label="Upload a Video File")
|
| 175 |
-
video_output = gr.Video(label="Processed Video")
|
| 176 |
-
video_button = gr.Button("Analyze Video", variant="primary")
|
| 177 |
-
|
| 178 |
-
with gr.TabItem("About"):
|
| 179 |
-
gr.Markdown(ABOUT_MARKDOWN)
|
| 180 |
-
# --- END WRAPPER ---
|
| 181 |
-
|
| 182 |
-
# --- EVENT LISTENERS (No changes needed here) ---
|
| 183 |
-
start_event = start_button.click(lambda: "Start", None, stream_state, queue=False)
|
| 184 |
-
live_stream = start_event.then(live_detection_stream, stream_state, [live_output, live_predictions])
|
| 185 |
-
|
| 186 |
-
stop_button.click(fn=None, inputs=None, outputs=None, cancels=[live_stream])
|
| 187 |
-
|
| 188 |
-
image_button.click(process_image, [image_input], [image_input, image_predictions])
|
| 189 |
-
video_button.click(process_video, [video_input], [video_output])
|
| 190 |
-
|
| 191 |
-
# --- LAUNCH THE APP ---
|
| 192 |
-
if predictor:
|
| 193 |
-
demo.queue().launch(debug=True, share=True)
|
| 194 |
-
else:
|
| 195 |
-
print("\n[FATAL ERROR] Could not start the application.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/config/config.yaml
DELETED
|
@@ -1,25 +0,0 @@
|
|
| 1 |
-
artifacts_root: artifacts
|
| 2 |
-
|
| 3 |
-
data_preparation: # This is our Stage 1
|
| 4 |
-
root_dir: artifacts/data_preparation
|
| 5 |
-
# Inputs from raw data
|
| 6 |
-
ferplus_pixels_csv: data/raw/fer2013.csv
|
| 7 |
-
ferplus_labels_csv: data/raw/fer2013new.csv
|
| 8 |
-
ckplus_dir: data/raw/CK+48
|
| 9 |
-
# Outputs
|
| 10 |
-
combined_train_dir: artifacts/data_preparation/train
|
| 11 |
-
ferplus_test_dir: artifacts/data_preparation/test
|
| 12 |
-
|
| 13 |
-
model_trainer:
|
| 14 |
-
root_dir: artifacts/training
|
| 15 |
-
# The trainer now takes its input directly from the preparation stage
|
| 16 |
-
train_data_dir: artifacts/data_preparation/train
|
| 17 |
-
test_data_dir: artifacts/data_preparation/test
|
| 18 |
-
trained_model_path: artifacts/training/model.keras
|
| 19 |
-
|
| 20 |
-
model_evaluation:
|
| 21 |
-
root_dir: artifacts/evaluation
|
| 22 |
-
test_data_dir: artifacts/data_preparation/test
|
| 23 |
-
trained_model_path: artifacts/training/model.keras
|
| 24 |
-
metrics_file_name: artifacts/evaluation/metrics.json
|
| 25 |
-
mlflow_uri: https://dagshub.com/AlyyanAhmed21/Emotion-Recognition-MLOps.mlflow # Example for DagsHub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/dvc.lock
DELETED
|
@@ -1,203 +0,0 @@
|
|
| 1 |
-
schema: '2.0'
|
| 2 |
-
stages:
|
| 3 |
-
data_validation:
|
| 4 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_data_validation.py
|
| 5 |
-
deps:
|
| 6 |
-
- path: artifacts/data_ingestion
|
| 7 |
-
hash: md5
|
| 8 |
-
md5: 9208f64defb6697b78bab62e943d955d.dir
|
| 9 |
-
size: 302675528
|
| 10 |
-
nfiles: 2
|
| 11 |
-
- path: src/EmotionRecognition/config/configuration.py
|
| 12 |
-
hash: md5
|
| 13 |
-
md5: dacf4230e18681185b786aa280cdec5e
|
| 14 |
-
size: 4275
|
| 15 |
-
- path: src/EmotionRecognition/pipeline/stage_02_data_validation.py
|
| 16 |
-
hash: md5
|
| 17 |
-
md5: 18a3d78c83dc5b278e14523077035e41
|
| 18 |
-
size: 1141
|
| 19 |
-
outs:
|
| 20 |
-
- path: artifacts/data_validation/status.txt
|
| 21 |
-
hash: md5
|
| 22 |
-
md5: 86e6a2f694c57a675b3e2da6b95ff9ba
|
| 23 |
-
size: 23
|
| 24 |
-
data_preparation:
|
| 25 |
-
cmd: python src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 26 |
-
deps:
|
| 27 |
-
- path: data/raw/CK+48
|
| 28 |
-
hash: md5
|
| 29 |
-
md5: a1559eddfd0d86b541e5df18b4b8205e.dir
|
| 30 |
-
size: 1715162
|
| 31 |
-
nfiles: 981
|
| 32 |
-
- path: data/raw/fer2013.csv
|
| 33 |
-
hash: md5
|
| 34 |
-
md5: f8428a1edbd21e88f42c73edd2a14f95
|
| 35 |
-
size: 301072766
|
| 36 |
-
- path: data/raw/fer2013new.csv
|
| 37 |
-
hash: md5
|
| 38 |
-
md5: 413eba86d6e454536b99705b8c7fc5c5
|
| 39 |
-
size: 1602762
|
| 40 |
-
- path: src/EmotionRecognition/components/data_preparation.py
|
| 41 |
-
hash: md5
|
| 42 |
-
md5: 228140227aaedb9f07b4c00462f267c6
|
| 43 |
-
size: 5776
|
| 44 |
-
- path: src/EmotionRecognition/config/configuration.py
|
| 45 |
-
hash: md5
|
| 46 |
-
md5: 8786c8d41e2e50a49b4ca6d5bf59ad44
|
| 47 |
-
size: 2910
|
| 48 |
-
- path: src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 49 |
-
hash: md5
|
| 50 |
-
md5: 1a324b8f1cf01e4e60e0a8529b23b577
|
| 51 |
-
size: 1110
|
| 52 |
-
params:
|
| 53 |
-
params.yaml:
|
| 54 |
-
DATA_PARAMS.CLASSES:
|
| 55 |
-
- angry
|
| 56 |
-
- disgust
|
| 57 |
-
- fear
|
| 58 |
-
- happy
|
| 59 |
-
- neutral
|
| 60 |
-
- sad
|
| 61 |
-
- surprise
|
| 62 |
-
outs:
|
| 63 |
-
- path: artifacts/data_preparation/test
|
| 64 |
-
hash: md5
|
| 65 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 66 |
-
size: 6249935
|
| 67 |
-
nfiles: 3589
|
| 68 |
-
- path: artifacts/data_preparation/train
|
| 69 |
-
hash: md5
|
| 70 |
-
md5: 750c0a305d28467341396ab591ed2731.dir
|
| 71 |
-
size: 51232879
|
| 72 |
-
nfiles: 29471
|
| 73 |
-
model_training:
|
| 74 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 75 |
-
deps:
|
| 76 |
-
- path: artifacts/data_preparation/test
|
| 77 |
-
hash: md5
|
| 78 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 79 |
-
size: 6249935
|
| 80 |
-
nfiles: 3589
|
| 81 |
-
- path: artifacts/data_preparation/train
|
| 82 |
-
hash: md5
|
| 83 |
-
md5: 750c0a305d28467341396ab591ed2731.dir
|
| 84 |
-
size: 51232879
|
| 85 |
-
nfiles: 29471
|
| 86 |
-
- path: src/EmotionRecognition/components/model_trainer.py
|
| 87 |
-
hash: md5
|
| 88 |
-
md5: 5192acef195c9a9b03a88490476ead1c
|
| 89 |
-
size: 3916
|
| 90 |
-
- path: src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 91 |
-
hash: md5
|
| 92 |
-
md5: 2ee36d6e30a3a262e8327a26e71a37e9
|
| 93 |
-
size: 1076
|
| 94 |
-
params:
|
| 95 |
-
params.yaml:
|
| 96 |
-
DATA_PARAMS:
|
| 97 |
-
IMAGE_SIZE:
|
| 98 |
-
- 224
|
| 99 |
-
- 224
|
| 100 |
-
CHANNELS: 3
|
| 101 |
-
BATCH_SIZE: 32
|
| 102 |
-
CLASSES:
|
| 103 |
-
- angry
|
| 104 |
-
- disgust
|
| 105 |
-
- fear
|
| 106 |
-
- happy
|
| 107 |
-
- neutral
|
| 108 |
-
- sad
|
| 109 |
-
- surprise
|
| 110 |
-
NUM_CLASSES: 7
|
| 111 |
-
TRAINING_PARAMS:
|
| 112 |
-
EPOCHS: 50
|
| 113 |
-
LEARNING_RATE: 0.0001
|
| 114 |
-
OPTIMIZER: Adam
|
| 115 |
-
LOSS_FUNCTION: CategoricalCrossentropy
|
| 116 |
-
METRICS:
|
| 117 |
-
- accuracy
|
| 118 |
-
DROPOUT_RATE: 0.5
|
| 119 |
-
outs:
|
| 120 |
-
- path: artifacts/training/model.keras
|
| 121 |
-
hash: md5
|
| 122 |
-
md5: 2c632cb4cbf3f2944145a8da1927f2cf
|
| 123 |
-
size: 11331400
|
| 124 |
-
model_evaluation:
|
| 125 |
-
cmd: python src/EmotionRecognition/pipeline/stage_03_model_evaluation.py
|
| 126 |
-
deps:
|
| 127 |
-
- path: artifacts/data_preparation/test
|
| 128 |
-
hash: md5
|
| 129 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 130 |
-
size: 6249935
|
| 131 |
-
nfiles: 3589
|
| 132 |
-
- path: artifacts/training/model.keras
|
| 133 |
-
hash: md5
|
| 134 |
-
md5: 2c632cb4cbf3f2944145a8da1927f2cf
|
| 135 |
-
size: 11331400
|
| 136 |
-
- path: src/EmotionRecognition/components/model_evaluation.py
|
| 137 |
-
hash: md5
|
| 138 |
-
md5: 8b327667db406dd7c6489937747b8537
|
| 139 |
-
size: 2429
|
| 140 |
-
params:
|
| 141 |
-
params.yaml:
|
| 142 |
-
DATA_PARAMS:
|
| 143 |
-
IMAGE_SIZE:
|
| 144 |
-
- 224
|
| 145 |
-
- 224
|
| 146 |
-
CHANNELS: 3
|
| 147 |
-
BATCH_SIZE: 32
|
| 148 |
-
CLASSES:
|
| 149 |
-
- angry
|
| 150 |
-
- disgust
|
| 151 |
-
- fear
|
| 152 |
-
- happy
|
| 153 |
-
- neutral
|
| 154 |
-
- sad
|
| 155 |
-
- surprise
|
| 156 |
-
NUM_CLASSES: 7
|
| 157 |
-
outs:
|
| 158 |
-
- path: artifacts/evaluation/metrics.json
|
| 159 |
-
hash: md5
|
| 160 |
-
md5: 3e8f938b34095f56c597110c5d86064e
|
| 161 |
-
size: 72
|
| 162 |
-
data_preprocessing:
|
| 163 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_data_preprocessing.py
|
| 164 |
-
deps:
|
| 165 |
-
- path: artifacts/data_preparation/test
|
| 166 |
-
hash: md5
|
| 167 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 168 |
-
size: 6249935
|
| 169 |
-
nfiles: 3589
|
| 170 |
-
- path: artifacts/data_preparation/train
|
| 171 |
-
hash: md5
|
| 172 |
-
md5: 750c0a305d28467341396ab591ed2731.dir
|
| 173 |
-
size: 51232879
|
| 174 |
-
nfiles: 29471
|
| 175 |
-
- path: src/EmotionRecognition/components/data_preprocessing.py
|
| 176 |
-
hash: md5
|
| 177 |
-
md5: bc85964fdf86afb289051c2498037eb8
|
| 178 |
-
size: 3903
|
| 179 |
-
- path: src/EmotionRecognition/pipeline/stage_02_data_preprocessing.py
|
| 180 |
-
hash: md5
|
| 181 |
-
md5: 5631296a6b7bace5c2f6979eda5ca081
|
| 182 |
-
size: 971
|
| 183 |
-
params:
|
| 184 |
-
params.yaml:
|
| 185 |
-
DATA_PARAMS.CLASSES:
|
| 186 |
-
- angry
|
| 187 |
-
- disgust
|
| 188 |
-
- fear
|
| 189 |
-
- happy
|
| 190 |
-
- neutral
|
| 191 |
-
- sad
|
| 192 |
-
- surprise
|
| 193 |
-
outs:
|
| 194 |
-
- path: artifacts/data_preprocessing/test
|
| 195 |
-
hash: md5
|
| 196 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 197 |
-
size: 6249935
|
| 198 |
-
nfiles: 3589
|
| 199 |
-
- path: artifacts/data_preprocessing/train
|
| 200 |
-
hash: md5
|
| 201 |
-
md5: 3dc8382a4774d1a1f1d1e5dfe3ca4c1b.dir
|
| 202 |
-
size: 18389122
|
| 203 |
-
nfiles: 10500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/dvc.yaml
DELETED
|
@@ -1,39 +0,0 @@
|
|
| 1 |
-
stages:
|
| 2 |
-
data_preparation:
|
| 3 |
-
cmd: python src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 4 |
-
deps:
|
| 5 |
-
- src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 6 |
-
- src/EmotionRecognition/components/data_preparation.py
|
| 7 |
-
- data/raw/fer2013.csv
|
| 8 |
-
- data/raw/fer2013new.csv
|
| 9 |
-
- data/raw/CK+48
|
| 10 |
-
params:
|
| 11 |
-
- DATA_PARAMS.CLASSES
|
| 12 |
-
outs:
|
| 13 |
-
- artifacts/data_preparation/train
|
| 14 |
-
- artifacts/data_preparation/test
|
| 15 |
-
|
| 16 |
-
model_training:
|
| 17 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 18 |
-
deps:
|
| 19 |
-
- src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 20 |
-
- src/EmotionRecognition/components/model_trainer.py
|
| 21 |
-
- artifacts/data_preparation/train
|
| 22 |
-
- artifacts/data_preparation/test
|
| 23 |
-
params:
|
| 24 |
-
- DATA_PARAMS
|
| 25 |
-
- TRAINING_PARAMS
|
| 26 |
-
outs:
|
| 27 |
-
- artifacts/training/model.keras
|
| 28 |
-
|
| 29 |
-
model_evaluation:
|
| 30 |
-
cmd: python src/EmotionRecognition/pipeline/stage_03_model_evaluation.py
|
| 31 |
-
deps:
|
| 32 |
-
- src/EmotionRecognition/components/model_evaluation.py
|
| 33 |
-
- artifacts/data_preparation/test
|
| 34 |
-
- artifacts/training/model.keras
|
| 35 |
-
params:
|
| 36 |
-
- DATA_PARAMS
|
| 37 |
-
metrics:
|
| 38 |
-
- artifacts/evaluation/metrics.json:
|
| 39 |
-
cache: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/gpuCheck.py
DELETED
|
@@ -1,42 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import tensorflow as tf
|
| 3 |
-
|
| 4 |
-
# --- THE WORKAROUND ---
|
| 5 |
-
# Define the full path to the CUDA bin directory
|
| 6 |
-
cuda_bin_path = r"E:\Nvidia\CUDA\v11.2\bin"
|
| 7 |
-
|
| 8 |
-
# Add this path to the OS environment's DLL search path
|
| 9 |
-
# This MUST be done BEFORE importing tensorflow
|
| 10 |
-
try:
|
| 11 |
-
os.add_dll_directory(cuda_bin_path)
|
| 12 |
-
print(f"Successfully added {cuda_bin_path} to DLL search path.")
|
| 13 |
-
except AttributeError:
|
| 14 |
-
# This function was added in Python 3.8. For older versions, you might need
|
| 15 |
-
# to add the path to the system PATH environment variable manually.
|
| 16 |
-
print("os.add_dll_directory not available. Ensure CUDA bin is in the system PATH.")
|
| 17 |
-
# --- END WORKAROUND ---
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
print(f"TensorFlow Version: {tf.__version__}")
|
| 21 |
-
print("-" * 30)
|
| 22 |
-
|
| 23 |
-
# Check for GPU devices
|
| 24 |
-
gpu_devices = tf.config.list_physical_devices('GPU')
|
| 25 |
-
print(f"Num GPUs Available: {len(gpu_devices)}")
|
| 26 |
-
print("-" * 30)
|
| 27 |
-
|
| 28 |
-
if gpu_devices:
|
| 29 |
-
print("GPU Device Details:")
|
| 30 |
-
for gpu in gpu_devices:
|
| 31 |
-
tf.config.experimental.set_memory_growth(gpu, True)
|
| 32 |
-
print(f"- {gpu.name}, Type: {gpu.device_type}")
|
| 33 |
-
print("\nSUCCESS: TensorFlow is configured to use the GPU!")
|
| 34 |
-
else:
|
| 35 |
-
print("\nFAILURE: TensorFlow did not detect a GPU.")
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
import tensorflow as tf
|
| 39 |
-
from tensorflow.python.client import device_lib
|
| 40 |
-
|
| 41 |
-
print("Verbose device list:")
|
| 42 |
-
print(device_lib.list_local_devices())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/.dvc/.gitignore
DELETED
|
@@ -1,2 +0,0 @@
|
|
| 1 |
-
/config.local
|
| 2 |
-
/tmp
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/.dvc/config
DELETED
|
File without changes
|
huggingface-space/huggingface-space/huggingface-space/.dvcignore
DELETED
|
File without changes
|
huggingface-space/huggingface-space/huggingface-space/.gitattributes
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
sota_model/model.safetensors filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/.gitignore
DELETED
|
@@ -1,112 +0,0 @@
|
|
| 1 |
-
# MLOps & Data Science Artifacts
|
| 2 |
-
# -------------------------------------------------------------------
|
| 3 |
-
# Ignore all data, models, and artifacts. These should be tracked by DVC.
|
| 4 |
-
/artifacts/
|
| 5 |
-
/data/
|
| 6 |
-
/sota_model/
|
| 7 |
-
# Ignore the DVC local cache. This is where the actual data files are stored.
|
| 8 |
-
.dvc/cache
|
| 9 |
-
|
| 10 |
-
# Ignore MLflow experiment tracking output
|
| 11 |
-
/mlruns/
|
| 12 |
-
|
| 13 |
-
# Ignore logs
|
| 14 |
-
/logs/
|
| 15 |
-
*.log
|
| 16 |
-
|
| 17 |
-
# Ignore common model file extensions, just in case
|
| 18 |
-
*.h5
|
| 19 |
-
*.pkl
|
| 20 |
-
*.model
|
| 21 |
-
*.onnx
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
# Python Virtual Environments
|
| 25 |
-
# -------------------------------------------------------------------
|
| 26 |
-
/venv/
|
| 27 |
-
/myenv/
|
| 28 |
-
/.venv/
|
| 29 |
-
/env/
|
| 30 |
-
/ENV/
|
| 31 |
-
*/.venv/
|
| 32 |
-
*/venv/
|
| 33 |
-
*/myenv/
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
# Python Byte-code and Caches
|
| 37 |
-
# -------------------------------------------------------------------
|
| 38 |
-
__pycache__/
|
| 39 |
-
*.py[cod]
|
| 40 |
-
*$py.class
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
# Python Packaging & Distribution
|
| 44 |
-
# -------------------------------------------------------------------
|
| 45 |
-
build/
|
| 46 |
-
develop-eggs/
|
| 47 |
-
dist/
|
| 48 |
-
downloads/
|
| 49 |
-
eggs/
|
| 50 |
-
.eggs/
|
| 51 |
-
lib/
|
| 52 |
-
lib64/
|
| 53 |
-
parts/
|
| 54 |
-
sdist/
|
| 55 |
-
var/
|
| 56 |
-
wheels/
|
| 57 |
-
*.egg-info/
|
| 58 |
-
.installed.cfg
|
| 59 |
-
*.egg
|
| 60 |
-
MANIFEST
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
# IDE and Editor Configuration
|
| 64 |
-
# -------------------------------------------------------------------
|
| 65 |
-
# PyCharm
|
| 66 |
-
.idea/
|
| 67 |
-
|
| 68 |
-
# Visual Studio Code (allow sharing of recommended extensions)
|
| 69 |
-
.vscode/*
|
| 70 |
-
!.vscode/extensions.json
|
| 71 |
-
|
| 72 |
-
# Sublime Text
|
| 73 |
-
*.sublime-project
|
| 74 |
-
*.sublime-workspace
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
# Secrets and Environment Variables
|
| 78 |
-
# -------------------------------------------------------------------
|
| 79 |
-
# NEVER commit secrets or environment variables
|
| 80 |
-
.env
|
| 81 |
-
*.env
|
| 82 |
-
secrets.yaml
|
| 83 |
-
secrets.json
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
# Operating System Files
|
| 87 |
-
# -------------------------------------------------------------------
|
| 88 |
-
# macOS
|
| 89 |
-
.DS_Store
|
| 90 |
-
|
| 91 |
-
# Windows
|
| 92 |
-
Thumbs.db
|
| 93 |
-
desktop.ini
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
# Jupyter Notebook Checkpoints
|
| 97 |
-
# -------------------------------------------------------------------
|
| 98 |
-
.ipynb_checkpoints/
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
# Other
|
| 102 |
-
# -------------------------------------------------------------------
|
| 103 |
-
# Temporary files
|
| 104 |
-
*.tmp
|
| 105 |
-
*.bak
|
| 106 |
-
*.swp
|
| 107 |
-
|
| 108 |
-
.env
|
| 109 |
-
*.env
|
| 110 |
-
secrets.yaml
|
| 111 |
-
secrets.json
|
| 112 |
-
processed_video.mp4
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/Dockerfile
DELETED
|
File without changes
|
huggingface-space/huggingface-space/huggingface-space/LICENSE
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
MIT License
|
| 2 |
-
|
| 3 |
-
Copyright (c) 2025 ALYYAN
|
| 4 |
-
|
| 5 |
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
-
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
-
in the Software without restriction, including without limitation the rights
|
| 8 |
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
-
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
-
furnished to do so, subject to the following conditions:
|
| 11 |
-
|
| 12 |
-
The above copyright notice and this permission notice shall be included in all
|
| 13 |
-
copies or substantial portions of the Software.
|
| 14 |
-
|
| 15 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
-
SOFTWARE.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/README.md
DELETED
|
@@ -1,42 +0,0 @@
|
|
| 1 |
-
# 🎭 End-to-End Facial Emotion Recognition
|
| 2 |
-
|
| 3 |
-
<!-- Replace with a link to your final app screenshot -->
|
| 4 |
-
|
| 5 |
-
This repository contains a complete, end-to-end MLOps pipeline and a production-ready web application for real-time facial emotion recognition. The project leverages a state-of-the-art Vision Transformer model and is deployed as a user-friendly Gradio application on Hugging Face Spaces.
|
| 6 |
-
|
| 7 |
-
**Live Demo:** [🚀 Click here to try the application on Hugging Face Spaces!](https://huggingface.co/spaces/ALYYAN/Emotion-Recognition) <!-- Replace with your HF Space URL -->
|
| 8 |
-
|
| 9 |
-
---
|
| 10 |
-
|
| 11 |
-
## ✨ Features
|
| 12 |
-
|
| 13 |
-
- **Real-time Emotion Detection:** Analyzes your webcam feed to predict emotions in real-time.
|
| 14 |
-
- **High Accuracy:** Powered by a pre-trained Swin Transformer model fine-tuned on the massive AffectNet dataset for superior performance on "in the wild" faces.
|
| 15 |
-
- **Static Image & Video Analysis:** Upload your own images or videos for emotion prediction.
|
| 16 |
-
- **Polished UI:** A professional and responsive user interface with an animated background, built with Gradio.
|
| 17 |
-
- **Reproducible MLOps Pipeline:** The entire model training and data processing workflow is managed by DVC, ensuring 100% reproducibility.
|
| 18 |
-
- **Containerized for Deployment:** The application is packaged with Docker for easy and consistent deployment anywhere.
|
| 19 |
-
|
| 20 |
-
## 🛠️ Tech Stack
|
| 21 |
-
|
| 22 |
-
- **Model:** Swin Transformer (`PangPang/affectnet-swin-tiny-patch4-window7-224`)
|
| 23 |
-
- **ML/Ops:** Python, TensorFlow/Keras, DVC, MLflow, Hugging Face `transformers`
|
| 24 |
-
- **Backend & UI:** Gradio
|
| 25 |
-
- **Face Detection:** MTCNN
|
| 26 |
-
- **Deployment:** Hugging Face Spaces, Docker
|
| 27 |
-
|
| 28 |
-
## 🚀 Getting Started
|
| 29 |
-
|
| 30 |
-
Follow these steps to run the project locally.
|
| 31 |
-
|
| 32 |
-
### Prerequisites
|
| 33 |
-
|
| 34 |
-
- Python 3.10+
|
| 35 |
-
- Git and Git LFS ([installation guide](https://git-lfs.github.com))
|
| 36 |
-
- An NVIDIA GPU with CUDA drivers is recommended for the training pipeline, but the deployed app runs on CPU.
|
| 37 |
-
|
| 38 |
-
### 1. Clone the Repository
|
| 39 |
-
|
| 40 |
-
```bash
|
| 41 |
-
git clone https://github.com/YOUR-USERNAME/Emotion-Recognition-MLOps.git
|
| 42 |
-
cd Emotion-Recognition-MLOps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/app.py
DELETED
|
@@ -1,195 +0,0 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import os
|
| 3 |
-
import cv2
|
| 4 |
-
import time
|
| 5 |
-
|
| 6 |
-
from src.EmotionRecognition.pipeline.hf_predictor import HFPredictor
|
| 7 |
-
|
| 8 |
-
# --- INITIALIZE THE MODEL ---
|
| 9 |
-
print("[INFO] Initializing predictor...")
|
| 10 |
-
try:
|
| 11 |
-
predictor = HFPredictor()
|
| 12 |
-
print("[INFO] Predictor initialized successfully.")
|
| 13 |
-
except Exception as e:
|
| 14 |
-
predictor = None
|
| 15 |
-
print(f"[FATAL ERROR] Failed to initialize predictor: {e}")
|
| 16 |
-
|
| 17 |
-
# --- UI CONTENT & STYLING ---
|
| 18 |
-
# In app.py
|
| 19 |
-
|
| 20 |
-
CSS = """
|
| 21 |
-
/* Animated Gradient Background */
|
| 22 |
-
body {
|
| 23 |
-
background: linear-gradient(-45deg, #0b0f19, #131a2d, #2a2a72, #522a72);
|
| 24 |
-
background-size: 400% 400%;
|
| 25 |
-
animation: gradient 15s ease infinite;
|
| 26 |
-
color: #e0e0e0;
|
| 27 |
-
}
|
| 28 |
-
@keyframes gradient {
|
| 29 |
-
0% { background-position: 0% 50%; }
|
| 30 |
-
50% { background-position: 100% 50%; }
|
| 31 |
-
100% { background-position: 0% 50%; }
|
| 32 |
-
}
|
| 33 |
-
|
| 34 |
-
/* General Layout & Typography */
|
| 35 |
-
.gradio-container { max-width: 1320px !important; margin: auto !important; }
|
| 36 |
-
#title { text-align: center; font-size: 3rem !important; font-weight: 700; color: #FFF; margin-bottom: 0.5rem; }
|
| 37 |
-
#subtitle { text-align: center; color: #bebebe; margin-top: 0; margin-bottom: 40px; font-size: 1.2rem; font-weight: 300; }
|
| 38 |
-
.gr-button { font-weight: bold !important; }
|
| 39 |
-
|
| 40 |
-
/* --- NEW: The "Glass Card" effect --- */
|
| 41 |
-
#main-card {
|
| 42 |
-
background: rgba(22, 22, 34, 0.65); /* Semi-transparent dark background */
|
| 43 |
-
border-radius: 16px;
|
| 44 |
-
box-shadow: 0 8px 32px 0 rgba(0, 0, 0, 0.37);
|
| 45 |
-
backdrop-filter: blur(12px); /* The "frosted glass" effect */
|
| 46 |
-
-webkit-backdrop-filter: blur(12px); /* For Safari */
|
| 47 |
-
border: 1px solid rgba(255, 255, 255, 0.18);
|
| 48 |
-
padding: 1rem;
|
| 49 |
-
}
|
| 50 |
-
/* --- END NEW --- */
|
| 51 |
-
|
| 52 |
-
/* Prediction Bar Styling - now inside the card */
|
| 53 |
-
#predictions-column { background-color: transparent !important; border-radius: 12px; padding: 1.5rem; }
|
| 54 |
-
#predictions-column > .gr-label { display: none; }
|
| 55 |
-
.prediction-list { list-style-type: none; padding: 0; margin-top: 0; }
|
| 56 |
-
.prediction-list li { display: flex; align-items: center; margin-bottom: 12px; font-size: 1.1rem; }
|
| 57 |
-
.prediction-list .label { width: 100px; text-transform: capitalize; color: #e0e0e0; }
|
| 58 |
-
.prediction-list .bar-container { flex-grow: 1; height: 24px; background-color: rgba(255,255,255,0.1); border-radius: 12px; margin: 0 15px; overflow: hidden; }
|
| 59 |
-
.prediction-list .bar { height: 100%; background: linear-gradient(90deg, #8A2BE2, #C71585); border-radius: 12px; transition: width 0.2s ease-in-out; }
|
| 60 |
-
.prediction-list .percent { width: 60px; text-align: right; font-weight: bold; color: #FFF; }
|
| 61 |
-
footer { display: none !important; }
|
| 62 |
-
"""
|
| 63 |
-
|
| 64 |
-
ABOUT_MARKDOWN = """
|
| 65 |
-
### Model: Vision Transformer (ViT)
|
| 66 |
-
This application uses a Vision Transformer model, fine-tuned for facial emotion recognition.
|
| 67 |
-
### Dataset
|
| 68 |
-
The model was fine-tuned on the **Emotion Recognition Dataset** from Kaggle, a large, curated collection of labeled facial images. This diverse dataset allows the model to generalize to a wide variety of real-world faces and expressions.
|
| 69 |
-
*Dataset Link:* [https://www.kaggle.com/datasets/sujaykapadnis/emotion-recognition-dataset](https://www.kaggle.com/datasets/sujaykapadnis/emotion-recognition-dataset)
|
| 70 |
-
### MLOps Pipeline
|
| 71 |
-
This entire application, from data processing to training and deployment, was built using a reproducible MLOps pipeline, ensuring consistency and quality at every step.
|
| 72 |
-
"""
|
| 73 |
-
|
| 74 |
-
# --- BACKEND LOGIC ---
|
| 75 |
-
def create_prediction_html(probabilities):
|
| 76 |
-
if not probabilities:
|
| 77 |
-
return "<div style='padding: 2rem; text-align: center; color: #999;'>Waiting for prediction...</div>"
|
| 78 |
-
html = "<ul class='prediction-list'>"
|
| 79 |
-
sorted_preds = sorted(probabilities.items(), key=lambda item: item[1], reverse=True)
|
| 80 |
-
for emotion, prob in sorted_preds:
|
| 81 |
-
html += f"""
|
| 82 |
-
<li>
|
| 83 |
-
<strong class='label'>{emotion}</strong>
|
| 84 |
-
<div class='bar-container'><div class='bar' style='width: {prob*100:.1f}%;'></div></div>
|
| 85 |
-
<span class='percent'>{(prob*100):.1f}%</span>
|
| 86 |
-
</li>
|
| 87 |
-
"""
|
| 88 |
-
html += "</ul>"
|
| 89 |
-
return html
|
| 90 |
-
|
| 91 |
-
def live_detection_stream():
|
| 92 |
-
"""A generator function that runs the live feed loop. This is the definitive fix."""
|
| 93 |
-
cap = cv2.VideoCapture(0)
|
| 94 |
-
if not cap.isOpened():
|
| 95 |
-
print("[ERROR] Cannot open webcam")
|
| 96 |
-
return
|
| 97 |
-
try:
|
| 98 |
-
while True:
|
| 99 |
-
ret, frame = cap.read()
|
| 100 |
-
if not ret:
|
| 101 |
-
time.sleep(0.01)
|
| 102 |
-
continue
|
| 103 |
-
|
| 104 |
-
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 105 |
-
annotated_frame, probabilities = predictor.process_frame(frame_rgb)
|
| 106 |
-
yield annotated_frame, create_prediction_html(probabilities)
|
| 107 |
-
time.sleep(0.05) # Controls FPS. 0.05 = ~20 FPS target. The model inference will be the main bottleneck.
|
| 108 |
-
finally:
|
| 109 |
-
print("[INFO] Live feed stopped. Releasing webcam.")
|
| 110 |
-
cap.release()
|
| 111 |
-
|
| 112 |
-
def process_image(image):
|
| 113 |
-
if image is None: return None, create_prediction_html({})
|
| 114 |
-
annotated_frame, probabilities = predictor.process_frame(image)
|
| 115 |
-
return annotated_frame, create_prediction_html(probabilities)
|
| 116 |
-
|
| 117 |
-
def process_video(video_path, progress=gr.Progress(track_tqdm=True)):
|
| 118 |
-
if video_path is None: return None
|
| 119 |
-
try:
|
| 120 |
-
cap = cv2.VideoCapture(video_path)
|
| 121 |
-
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 122 |
-
output_path = "processed_video.mp4"
|
| 123 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 124 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 125 |
-
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 126 |
-
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 127 |
-
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
| 128 |
-
for _ in progress.tqdm(range(frame_count), desc="Processing Video"):
|
| 129 |
-
ret, frame = cap.read()
|
| 130 |
-
if not ret: break
|
| 131 |
-
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 132 |
-
annotated_frame, _ = predictor.process_frame(frame_rgb)
|
| 133 |
-
if annotated_frame is not None:
|
| 134 |
-
out.write(cv2.cvtColor(annotated_frame, cv2.COLOR_RGB2BGR))
|
| 135 |
-
cap.release()
|
| 136 |
-
out.release()
|
| 137 |
-
return output_path
|
| 138 |
-
except Exception as e:
|
| 139 |
-
print(f"[ERROR] Video processing failed: {e}")
|
| 140 |
-
return None
|
| 141 |
-
|
| 142 |
-
# --- GRADIO UI ---
|
| 143 |
-
with gr.Blocks(css=CSS, theme=gr.themes.Base()) as demo:
|
| 144 |
-
gr.Markdown("# Facial Emotion Detector", elem_id="title")
|
| 145 |
-
gr.Markdown("A real-time AI application powered by Vision Transformers", elem_id="subtitle")
|
| 146 |
-
|
| 147 |
-
# --- NEW: Wrapper for the glass card effect ---
|
| 148 |
-
with gr.Box(elem_id="main-card"):
|
| 149 |
-
with gr.Tabs():
|
| 150 |
-
with gr.TabItem("Live Detection"):
|
| 151 |
-
with gr.Row(equal_height=True):
|
| 152 |
-
with gr.Column(scale=3):
|
| 153 |
-
live_output = gr.Image(label="Live Feed", interactive=False, height=550)
|
| 154 |
-
with gr.Column(scale=2, elem_id="predictions-column"):
|
| 155 |
-
gr.Markdown("### Emotion Probabilities") # Title for the panel
|
| 156 |
-
live_predictions = gr.HTML()
|
| 157 |
-
with gr.Row():
|
| 158 |
-
start_button = gr.Button("Start Webcam", variant="primary", scale=1)
|
| 159 |
-
stop_button = gr.Button("Stop Webcam", variant="secondary", scale=1)
|
| 160 |
-
|
| 161 |
-
stream_state = gr.State("Stop")
|
| 162 |
-
|
| 163 |
-
with gr.TabItem("Upload Image"):
|
| 164 |
-
with gr.Row(equal_height=True):
|
| 165 |
-
with gr.Column(scale=3):
|
| 166 |
-
image_input = gr.Image(type="numpy", label="Upload an Image", height=550)
|
| 167 |
-
with gr.Column(scale=2, elem_id="predictions-column"):
|
| 168 |
-
gr.Markdown("### Emotion Probabilities")
|
| 169 |
-
image_predictions = gr.HTML()
|
| 170 |
-
image_button = gr.Button("Analyze Image", variant="primary")
|
| 171 |
-
|
| 172 |
-
with gr.TabItem("Upload Video"):
|
| 173 |
-
with gr.Row(equal_height=True):
|
| 174 |
-
video_input = gr.Video(label="Upload a Video File")
|
| 175 |
-
video_output = gr.Video(label="Processed Video")
|
| 176 |
-
video_button = gr.Button("Analyze Video", variant="primary")
|
| 177 |
-
|
| 178 |
-
with gr.TabItem("About"):
|
| 179 |
-
gr.Markdown(ABOUT_MARKDOWN)
|
| 180 |
-
# --- END WRAPPER ---
|
| 181 |
-
|
| 182 |
-
# --- EVENT LISTENERS (No changes needed here) ---
|
| 183 |
-
start_event = start_button.click(lambda: "Start", None, stream_state, queue=False)
|
| 184 |
-
live_stream = start_event.then(live_detection_stream, stream_state, [live_output, live_predictions])
|
| 185 |
-
|
| 186 |
-
stop_button.click(fn=None, inputs=None, outputs=None, cancels=[live_stream])
|
| 187 |
-
|
| 188 |
-
image_button.click(process_image, [image_input], [image_input, image_predictions])
|
| 189 |
-
video_button.click(process_video, [video_input], [video_output])
|
| 190 |
-
|
| 191 |
-
# --- LAUNCH THE APP ---
|
| 192 |
-
if predictor:
|
| 193 |
-
demo.queue().launch(debug=True, share=True)
|
| 194 |
-
else:
|
| 195 |
-
print("\n[FATAL ERROR] Could not start the application.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/config/config.yaml
DELETED
|
@@ -1,25 +0,0 @@
|
|
| 1 |
-
artifacts_root: artifacts
|
| 2 |
-
|
| 3 |
-
data_preparation: # This is our Stage 1
|
| 4 |
-
root_dir: artifacts/data_preparation
|
| 5 |
-
# Inputs from raw data
|
| 6 |
-
ferplus_pixels_csv: data/raw/fer2013.csv
|
| 7 |
-
ferplus_labels_csv: data/raw/fer2013new.csv
|
| 8 |
-
ckplus_dir: data/raw/CK+48
|
| 9 |
-
# Outputs
|
| 10 |
-
combined_train_dir: artifacts/data_preparation/train
|
| 11 |
-
ferplus_test_dir: artifacts/data_preparation/test
|
| 12 |
-
|
| 13 |
-
model_trainer:
|
| 14 |
-
root_dir: artifacts/training
|
| 15 |
-
# The trainer now takes its input directly from the preparation stage
|
| 16 |
-
train_data_dir: artifacts/data_preparation/train
|
| 17 |
-
test_data_dir: artifacts/data_preparation/test
|
| 18 |
-
trained_model_path: artifacts/training/model.keras
|
| 19 |
-
|
| 20 |
-
model_evaluation:
|
| 21 |
-
root_dir: artifacts/evaluation
|
| 22 |
-
test_data_dir: artifacts/data_preparation/test
|
| 23 |
-
trained_model_path: artifacts/training/model.keras
|
| 24 |
-
metrics_file_name: artifacts/evaluation/metrics.json
|
| 25 |
-
mlflow_uri: https://dagshub.com/AlyyanAhmed21/Emotion-Recognition-MLOps.mlflow # Example for DagsHub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/dvc.lock
DELETED
|
@@ -1,203 +0,0 @@
|
|
| 1 |
-
schema: '2.0'
|
| 2 |
-
stages:
|
| 3 |
-
data_validation:
|
| 4 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_data_validation.py
|
| 5 |
-
deps:
|
| 6 |
-
- path: artifacts/data_ingestion
|
| 7 |
-
hash: md5
|
| 8 |
-
md5: 9208f64defb6697b78bab62e943d955d.dir
|
| 9 |
-
size: 302675528
|
| 10 |
-
nfiles: 2
|
| 11 |
-
- path: src/EmotionRecognition/config/configuration.py
|
| 12 |
-
hash: md5
|
| 13 |
-
md5: dacf4230e18681185b786aa280cdec5e
|
| 14 |
-
size: 4275
|
| 15 |
-
- path: src/EmotionRecognition/pipeline/stage_02_data_validation.py
|
| 16 |
-
hash: md5
|
| 17 |
-
md5: 18a3d78c83dc5b278e14523077035e41
|
| 18 |
-
size: 1141
|
| 19 |
-
outs:
|
| 20 |
-
- path: artifacts/data_validation/status.txt
|
| 21 |
-
hash: md5
|
| 22 |
-
md5: 86e6a2f694c57a675b3e2da6b95ff9ba
|
| 23 |
-
size: 23
|
| 24 |
-
data_preparation:
|
| 25 |
-
cmd: python src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 26 |
-
deps:
|
| 27 |
-
- path: data/raw/CK+48
|
| 28 |
-
hash: md5
|
| 29 |
-
md5: a1559eddfd0d86b541e5df18b4b8205e.dir
|
| 30 |
-
size: 1715162
|
| 31 |
-
nfiles: 981
|
| 32 |
-
- path: data/raw/fer2013.csv
|
| 33 |
-
hash: md5
|
| 34 |
-
md5: f8428a1edbd21e88f42c73edd2a14f95
|
| 35 |
-
size: 301072766
|
| 36 |
-
- path: data/raw/fer2013new.csv
|
| 37 |
-
hash: md5
|
| 38 |
-
md5: 413eba86d6e454536b99705b8c7fc5c5
|
| 39 |
-
size: 1602762
|
| 40 |
-
- path: src/EmotionRecognition/components/data_preparation.py
|
| 41 |
-
hash: md5
|
| 42 |
-
md5: 228140227aaedb9f07b4c00462f267c6
|
| 43 |
-
size: 5776
|
| 44 |
-
- path: src/EmotionRecognition/config/configuration.py
|
| 45 |
-
hash: md5
|
| 46 |
-
md5: 8786c8d41e2e50a49b4ca6d5bf59ad44
|
| 47 |
-
size: 2910
|
| 48 |
-
- path: src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 49 |
-
hash: md5
|
| 50 |
-
md5: 1a324b8f1cf01e4e60e0a8529b23b577
|
| 51 |
-
size: 1110
|
| 52 |
-
params:
|
| 53 |
-
params.yaml:
|
| 54 |
-
DATA_PARAMS.CLASSES:
|
| 55 |
-
- angry
|
| 56 |
-
- disgust
|
| 57 |
-
- fear
|
| 58 |
-
- happy
|
| 59 |
-
- neutral
|
| 60 |
-
- sad
|
| 61 |
-
- surprise
|
| 62 |
-
outs:
|
| 63 |
-
- path: artifacts/data_preparation/test
|
| 64 |
-
hash: md5
|
| 65 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 66 |
-
size: 6249935
|
| 67 |
-
nfiles: 3589
|
| 68 |
-
- path: artifacts/data_preparation/train
|
| 69 |
-
hash: md5
|
| 70 |
-
md5: 750c0a305d28467341396ab591ed2731.dir
|
| 71 |
-
size: 51232879
|
| 72 |
-
nfiles: 29471
|
| 73 |
-
model_training:
|
| 74 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 75 |
-
deps:
|
| 76 |
-
- path: artifacts/data_preparation/test
|
| 77 |
-
hash: md5
|
| 78 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 79 |
-
size: 6249935
|
| 80 |
-
nfiles: 3589
|
| 81 |
-
- path: artifacts/data_preparation/train
|
| 82 |
-
hash: md5
|
| 83 |
-
md5: 750c0a305d28467341396ab591ed2731.dir
|
| 84 |
-
size: 51232879
|
| 85 |
-
nfiles: 29471
|
| 86 |
-
- path: src/EmotionRecognition/components/model_trainer.py
|
| 87 |
-
hash: md5
|
| 88 |
-
md5: 5192acef195c9a9b03a88490476ead1c
|
| 89 |
-
size: 3916
|
| 90 |
-
- path: src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 91 |
-
hash: md5
|
| 92 |
-
md5: 2ee36d6e30a3a262e8327a26e71a37e9
|
| 93 |
-
size: 1076
|
| 94 |
-
params:
|
| 95 |
-
params.yaml:
|
| 96 |
-
DATA_PARAMS:
|
| 97 |
-
IMAGE_SIZE:
|
| 98 |
-
- 224
|
| 99 |
-
- 224
|
| 100 |
-
CHANNELS: 3
|
| 101 |
-
BATCH_SIZE: 32
|
| 102 |
-
CLASSES:
|
| 103 |
-
- angry
|
| 104 |
-
- disgust
|
| 105 |
-
- fear
|
| 106 |
-
- happy
|
| 107 |
-
- neutral
|
| 108 |
-
- sad
|
| 109 |
-
- surprise
|
| 110 |
-
NUM_CLASSES: 7
|
| 111 |
-
TRAINING_PARAMS:
|
| 112 |
-
EPOCHS: 50
|
| 113 |
-
LEARNING_RATE: 0.0001
|
| 114 |
-
OPTIMIZER: Adam
|
| 115 |
-
LOSS_FUNCTION: CategoricalCrossentropy
|
| 116 |
-
METRICS:
|
| 117 |
-
- accuracy
|
| 118 |
-
DROPOUT_RATE: 0.5
|
| 119 |
-
outs:
|
| 120 |
-
- path: artifacts/training/model.keras
|
| 121 |
-
hash: md5
|
| 122 |
-
md5: 2c632cb4cbf3f2944145a8da1927f2cf
|
| 123 |
-
size: 11331400
|
| 124 |
-
model_evaluation:
|
| 125 |
-
cmd: python src/EmotionRecognition/pipeline/stage_03_model_evaluation.py
|
| 126 |
-
deps:
|
| 127 |
-
- path: artifacts/data_preparation/test
|
| 128 |
-
hash: md5
|
| 129 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 130 |
-
size: 6249935
|
| 131 |
-
nfiles: 3589
|
| 132 |
-
- path: artifacts/training/model.keras
|
| 133 |
-
hash: md5
|
| 134 |
-
md5: 2c632cb4cbf3f2944145a8da1927f2cf
|
| 135 |
-
size: 11331400
|
| 136 |
-
- path: src/EmotionRecognition/components/model_evaluation.py
|
| 137 |
-
hash: md5
|
| 138 |
-
md5: 8b327667db406dd7c6489937747b8537
|
| 139 |
-
size: 2429
|
| 140 |
-
params:
|
| 141 |
-
params.yaml:
|
| 142 |
-
DATA_PARAMS:
|
| 143 |
-
IMAGE_SIZE:
|
| 144 |
-
- 224
|
| 145 |
-
- 224
|
| 146 |
-
CHANNELS: 3
|
| 147 |
-
BATCH_SIZE: 32
|
| 148 |
-
CLASSES:
|
| 149 |
-
- angry
|
| 150 |
-
- disgust
|
| 151 |
-
- fear
|
| 152 |
-
- happy
|
| 153 |
-
- neutral
|
| 154 |
-
- sad
|
| 155 |
-
- surprise
|
| 156 |
-
NUM_CLASSES: 7
|
| 157 |
-
outs:
|
| 158 |
-
- path: artifacts/evaluation/metrics.json
|
| 159 |
-
hash: md5
|
| 160 |
-
md5: 3e8f938b34095f56c597110c5d86064e
|
| 161 |
-
size: 72
|
| 162 |
-
data_preprocessing:
|
| 163 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_data_preprocessing.py
|
| 164 |
-
deps:
|
| 165 |
-
- path: artifacts/data_preparation/test
|
| 166 |
-
hash: md5
|
| 167 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 168 |
-
size: 6249935
|
| 169 |
-
nfiles: 3589
|
| 170 |
-
- path: artifacts/data_preparation/train
|
| 171 |
-
hash: md5
|
| 172 |
-
md5: 750c0a305d28467341396ab591ed2731.dir
|
| 173 |
-
size: 51232879
|
| 174 |
-
nfiles: 29471
|
| 175 |
-
- path: src/EmotionRecognition/components/data_preprocessing.py
|
| 176 |
-
hash: md5
|
| 177 |
-
md5: bc85964fdf86afb289051c2498037eb8
|
| 178 |
-
size: 3903
|
| 179 |
-
- path: src/EmotionRecognition/pipeline/stage_02_data_preprocessing.py
|
| 180 |
-
hash: md5
|
| 181 |
-
md5: 5631296a6b7bace5c2f6979eda5ca081
|
| 182 |
-
size: 971
|
| 183 |
-
params:
|
| 184 |
-
params.yaml:
|
| 185 |
-
DATA_PARAMS.CLASSES:
|
| 186 |
-
- angry
|
| 187 |
-
- disgust
|
| 188 |
-
- fear
|
| 189 |
-
- happy
|
| 190 |
-
- neutral
|
| 191 |
-
- sad
|
| 192 |
-
- surprise
|
| 193 |
-
outs:
|
| 194 |
-
- path: artifacts/data_preprocessing/test
|
| 195 |
-
hash: md5
|
| 196 |
-
md5: 79c105a50ccbe2557fea9fab2c743fa5.dir
|
| 197 |
-
size: 6249935
|
| 198 |
-
nfiles: 3589
|
| 199 |
-
- path: artifacts/data_preprocessing/train
|
| 200 |
-
hash: md5
|
| 201 |
-
md5: 3dc8382a4774d1a1f1d1e5dfe3ca4c1b.dir
|
| 202 |
-
size: 18389122
|
| 203 |
-
nfiles: 10500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/dvc.yaml
DELETED
|
@@ -1,39 +0,0 @@
|
|
| 1 |
-
stages:
|
| 2 |
-
data_preparation:
|
| 3 |
-
cmd: python src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 4 |
-
deps:
|
| 5 |
-
- src/EmotionRecognition/pipeline/stage_01_data_preparation.py
|
| 6 |
-
- src/EmotionRecognition/components/data_preparation.py
|
| 7 |
-
- data/raw/fer2013.csv
|
| 8 |
-
- data/raw/fer2013new.csv
|
| 9 |
-
- data/raw/CK+48
|
| 10 |
-
params:
|
| 11 |
-
- DATA_PARAMS.CLASSES
|
| 12 |
-
outs:
|
| 13 |
-
- artifacts/data_preparation/train
|
| 14 |
-
- artifacts/data_preparation/test
|
| 15 |
-
|
| 16 |
-
model_training:
|
| 17 |
-
cmd: python src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 18 |
-
deps:
|
| 19 |
-
- src/EmotionRecognition/pipeline/stage_02_model_training.py
|
| 20 |
-
- src/EmotionRecognition/components/model_trainer.py
|
| 21 |
-
- artifacts/data_preparation/train
|
| 22 |
-
- artifacts/data_preparation/test
|
| 23 |
-
params:
|
| 24 |
-
- DATA_PARAMS
|
| 25 |
-
- TRAINING_PARAMS
|
| 26 |
-
outs:
|
| 27 |
-
- artifacts/training/model.keras
|
| 28 |
-
|
| 29 |
-
model_evaluation:
|
| 30 |
-
cmd: python src/EmotionRecognition/pipeline/stage_03_model_evaluation.py
|
| 31 |
-
deps:
|
| 32 |
-
- src/EmotionRecognition/components/model_evaluation.py
|
| 33 |
-
- artifacts/data_preparation/test
|
| 34 |
-
- artifacts/training/model.keras
|
| 35 |
-
params:
|
| 36 |
-
- DATA_PARAMS
|
| 37 |
-
metrics:
|
| 38 |
-
- artifacts/evaluation/metrics.json:
|
| 39 |
-
cache: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/gpuCheck.py
DELETED
|
@@ -1,42 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import tensorflow as tf
|
| 3 |
-
|
| 4 |
-
# --- THE WORKAROUND ---
|
| 5 |
-
# Define the full path to the CUDA bin directory
|
| 6 |
-
cuda_bin_path = r"E:\Nvidia\CUDA\v11.2\bin"
|
| 7 |
-
|
| 8 |
-
# Add this path to the OS environment's DLL search path
|
| 9 |
-
# This MUST be done BEFORE importing tensorflow
|
| 10 |
-
try:
|
| 11 |
-
os.add_dll_directory(cuda_bin_path)
|
| 12 |
-
print(f"Successfully added {cuda_bin_path} to DLL search path.")
|
| 13 |
-
except AttributeError:
|
| 14 |
-
# This function was added in Python 3.8. For older versions, you might need
|
| 15 |
-
# to add the path to the system PATH environment variable manually.
|
| 16 |
-
print("os.add_dll_directory not available. Ensure CUDA bin is in the system PATH.")
|
| 17 |
-
# --- END WORKAROUND ---
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
print(f"TensorFlow Version: {tf.__version__}")
|
| 21 |
-
print("-" * 30)
|
| 22 |
-
|
| 23 |
-
# Check for GPU devices
|
| 24 |
-
gpu_devices = tf.config.list_physical_devices('GPU')
|
| 25 |
-
print(f"Num GPUs Available: {len(gpu_devices)}")
|
| 26 |
-
print("-" * 30)
|
| 27 |
-
|
| 28 |
-
if gpu_devices:
|
| 29 |
-
print("GPU Device Details:")
|
| 30 |
-
for gpu in gpu_devices:
|
| 31 |
-
tf.config.experimental.set_memory_growth(gpu, True)
|
| 32 |
-
print(f"- {gpu.name}, Type: {gpu.device_type}")
|
| 33 |
-
print("\nSUCCESS: TensorFlow is configured to use the GPU!")
|
| 34 |
-
else:
|
| 35 |
-
print("\nFAILURE: TensorFlow did not detect a GPU.")
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
import tensorflow as tf
|
| 39 |
-
from tensorflow.python.client import device_lib
|
| 40 |
-
|
| 41 |
-
print("Verbose device list:")
|
| 42 |
-
print(device_lib.list_local_devices())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/huggingface-space/.gitattributes
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
sota_model/model.safetensors filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/huggingface-space/README.md
DELETED
|
@@ -1,42 +0,0 @@
|
|
| 1 |
-
# 🎭 End-to-End Facial Emotion Recognition
|
| 2 |
-
|
| 3 |
-
<!-- Replace with a link to your final app screenshot -->
|
| 4 |
-
|
| 5 |
-
This repository contains a complete, end-to-end MLOps pipeline and a production-ready web application for real-time facial emotion recognition. The project leverages a state-of-the-art Vision Transformer model and is deployed as a user-friendly Gradio application on Hugging Face Spaces.
|
| 6 |
-
|
| 7 |
-
**Live Demo:** [🚀 Click here to try the application on Hugging Face Spaces!](https://huggingface.co/spaces/YOUR-USERNAME/YOUR-SPACE-NAME) <!-- Replace with your HF Space URL -->
|
| 8 |
-
|
| 9 |
-
---
|
| 10 |
-
|
| 11 |
-
## ✨ Features
|
| 12 |
-
|
| 13 |
-
- **Real-time Emotion Detection:** Analyzes your webcam feed to predict emotions in real-time.
|
| 14 |
-
- **High Accuracy:** Powered by a pre-trained Swin Transformer model fine-tuned on the massive AffectNet dataset for superior performance on "in the wild" faces.
|
| 15 |
-
- **Static Image & Video Analysis:** Upload your own images or videos for emotion prediction.
|
| 16 |
-
- **Polished UI:** A professional and responsive user interface with an animated background, built with Gradio.
|
| 17 |
-
- **Reproducible MLOps Pipeline:** The entire model training and data processing workflow is managed by DVC, ensuring 100% reproducibility.
|
| 18 |
-
- **Containerized for Deployment:** The application is packaged with Docker for easy and consistent deployment anywhere.
|
| 19 |
-
|
| 20 |
-
## 🛠️ Tech Stack
|
| 21 |
-
|
| 22 |
-
- **Model:** Swin Transformer (`PangPang/affectnet-swin-tiny-patch4-window7-224`)
|
| 23 |
-
- **ML/Ops:** Python, TensorFlow/Keras, DVC, MLflow, Hugging Face `transformers`
|
| 24 |
-
- **Backend & UI:** Gradio
|
| 25 |
-
- **Face Detection:** MTCNN
|
| 26 |
-
- **Deployment:** Hugging Face Spaces, Docker
|
| 27 |
-
|
| 28 |
-
## 🚀 Getting Started
|
| 29 |
-
|
| 30 |
-
Follow these steps to run the project locally.
|
| 31 |
-
|
| 32 |
-
### Prerequisites
|
| 33 |
-
|
| 34 |
-
- Python 3.10+
|
| 35 |
-
- Git and Git LFS ([installation guide](https://git-lfs.github.com))
|
| 36 |
-
- An NVIDIA GPU with CUDA drivers is recommended for the training pipeline, but the deployed app runs on CPU.
|
| 37 |
-
|
| 38 |
-
### 1. Clone the Repository
|
| 39 |
-
|
| 40 |
-
```bash
|
| 41 |
-
git clone https://github.com/YOUR-USERNAME/Emotion-Recognition-MLOps.git
|
| 42 |
-
cd Emotion-Recognition-MLOps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/main.py
DELETED
|
@@ -1,70 +0,0 @@
|
|
| 1 |
-
from EmotionRecognition import logger
|
| 2 |
-
from EmotionRecognition.pipeline.stage_01_data_ingestion import DataIngestionTrainingPipeline
|
| 3 |
-
from EmotionRecognition.pipeline.stage_02_data_validation import DataValidationTrainingPipeline
|
| 4 |
-
from EmotionRecognition.pipeline.stage_01_data_preparation import DataPreparationPipeline
|
| 5 |
-
from EmotionRecognition.pipeline.stage_02_model_training import ModelTrainingPipeline
|
| 6 |
-
from EmotionRecognition.pipeline.stage_03_model_evaluation import ModelEvaluationPipeline
|
| 7 |
-
|
| 8 |
-
# Data Ingestion Stage
|
| 9 |
-
STAGE_NAME = "Data Ingestion Stage"
|
| 10 |
-
try:
|
| 11 |
-
logger.info(f">>>>>> Stage '{STAGE_NAME}' started <<<<<<")
|
| 12 |
-
obj = DataIngestionTrainingPipeline()
|
| 13 |
-
obj.main()
|
| 14 |
-
logger.info(f">>>>>> Stage '{STAGE_NAME}' completed successfully <<<<<<\n\nx==========x")
|
| 15 |
-
except Exception as e:
|
| 16 |
-
logger.exception(e)
|
| 17 |
-
raise e
|
| 18 |
-
|
| 19 |
-
# Data Validation Stage
|
| 20 |
-
STAGE_NAME = "Data Validation Stage"
|
| 21 |
-
try:
|
| 22 |
-
logger.info(f">>>>>> Stage '{STAGE_NAME}' started <<<<<<")
|
| 23 |
-
obj = DataValidationTrainingPipeline()
|
| 24 |
-
obj.main()
|
| 25 |
-
logger.info(f">>>>>> Stage '{STAGE_NAME}' completed successfully <<<<<<\n\nx==========x")
|
| 26 |
-
except Exception as e:
|
| 27 |
-
logger.exception(e)
|
| 28 |
-
raise e
|
| 29 |
-
|
| 30 |
-
# Data Preprocessing Stage
|
| 31 |
-
#STAGE_NAME = "Data Preprocessing Stage"
|
| 32 |
-
#try:
|
| 33 |
-
# logger.info(f">>>>>> Stage '{STAGE_NAME}' started <<<<<<")
|
| 34 |
-
# obj = DataPreprocessingTrainingPipeline()
|
| 35 |
-
# obj.main()
|
| 36 |
-
# logger.info(f">>>>>> Stage '{STAGE_NAME}' completed successfully <<<<<<\n\nx==========x")
|
| 37 |
-
#except Exception as e:
|
| 38 |
-
# logger.exception(e)
|
| 39 |
-
# raise e
|
| 40 |
-
|
| 41 |
-
STAGE_NAME = "Data Preparation Stage"
|
| 42 |
-
try:
|
| 43 |
-
logger.info(f">>>>>> Stage '{STAGE_NAME}' started <<<<<<")
|
| 44 |
-
obj = DataPreparationPipeline()
|
| 45 |
-
obj.main()
|
| 46 |
-
logger.info(f">>>>>> Stage '{STAGE_NAME}' completed successfully <<<<<<\n\nx==========x")
|
| 47 |
-
except Exception as e:
|
| 48 |
-
logger.exception(e)
|
| 49 |
-
raise e
|
| 50 |
-
|
| 51 |
-
STAGE_NAME = "Model Training Stage"
|
| 52 |
-
try:
|
| 53 |
-
logger.info(f">>>>>> Stage '{STAGE_NAME}' started <<<<<<")
|
| 54 |
-
obj = ModelTrainingPipeline()
|
| 55 |
-
obj.main()
|
| 56 |
-
logger.info(f">>>>>> Stage '{STAGE_NAME}' completed successfully <<<<<<\n\nx==========x")
|
| 57 |
-
except Exception as e:
|
| 58 |
-
logger.exception(e)
|
| 59 |
-
raise e
|
| 60 |
-
|
| 61 |
-
# Model Evaluation Stage
|
| 62 |
-
STAGE_NAME = "Model Evaluation Stage"
|
| 63 |
-
try:
|
| 64 |
-
logger.info(f">>>>>> Stage '{STAGE_NAME}' started <<<<<<")
|
| 65 |
-
obj = ModelEvaluationPipeline()
|
| 66 |
-
obj.main()
|
| 67 |
-
logger.info(f">>>>>> Stage '{STAGE_NAME}' completed successfully <<<<<<\n\nx==========x")
|
| 68 |
-
except Exception as e:
|
| 69 |
-
logger.exception(e)
|
| 70 |
-
raise e
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/params.yaml
DELETED
|
@@ -1,15 +0,0 @@
|
|
| 1 |
-
DATA_PARAMS:
|
| 2 |
-
IMAGE_SIZE: [224, 224]
|
| 3 |
-
CHANNELS: 3
|
| 4 |
-
BATCH_SIZE: 32
|
| 5 |
-
# Our final 7 classes (Contempt is merged into Disgust)
|
| 6 |
-
CLASSES: ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
|
| 7 |
-
NUM_CLASSES: 7
|
| 8 |
-
|
| 9 |
-
TRAINING_PARAMS:
|
| 10 |
-
EPOCHS: 50 # A solid number for a baseline run
|
| 11 |
-
LEARNING_RATE: 0.0001 # A small, stable learning rate
|
| 12 |
-
OPTIMIZER: Adam
|
| 13 |
-
LOSS_FUNCTION: CategoricalCrossentropy
|
| 14 |
-
METRICS: ['accuracy']
|
| 15 |
-
DROPOUT_RATE: 0.5 # Strong regularization is good
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/requirements.txt
DELETED
|
@@ -1,44 +0,0 @@
|
|
| 1 |
-
# -----------------------------------------------------------
|
| 2 |
-
# MLOps Pipeline & Data Versioning
|
| 3 |
-
# -----------------------------------------------------------
|
| 4 |
-
dvc[s3] # For data versioning. Change [s3] to your remote type or remove.
|
| 5 |
-
kaggle # For downloading datasets from Kaggle.
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
# -----------------------------------------------------------
|
| 9 |
-
# Core Machine Learning & Deep Learning
|
| 10 |
-
# -----------------------------------------------------------
|
| 11 |
-
# Using the stable TensorFlow 2.10 for GPU support on native Windows
|
| 12 |
-
tensorflow==2.10.0
|
| 13 |
-
scikit-learn # For evaluation metrics.
|
| 14 |
-
|
| 15 |
-
# Hugging Face SOTA Model Dependencies
|
| 16 |
-
transformers # For loading models and processors from the Hub.
|
| 17 |
-
torch # PyTorch is a backend dependency for many HF vision models.
|
| 18 |
-
torchvision
|
| 19 |
-
timm # Another common dependency for HF vision transformers.
|
| 20 |
-
|
| 21 |
-
# Computer Vision
|
| 22 |
-
opencv-python # For image/video processing and drawing.
|
| 23 |
-
mtcnn # For fast and effective face detection.
|
| 24 |
-
Pillow # For basic image manipulation.
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
# -----------------------------------------------------------
|
| 28 |
-
# Web Application & User Interface
|
| 29 |
-
# -----------------------------------------------------------
|
| 30 |
-
gradio==3.50.2 # Locked to a stable version for consistent UI behavior.
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
# -----------------------------------------------------------
|
| 34 |
-
# Utilities
|
| 35 |
-
# -----------------------------------------------------------
|
| 36 |
-
numpy
|
| 37 |
-
pandas # For data manipulation in the preparation stage.
|
| 38 |
-
PyYAML # For reading .yaml configuration files.
|
| 39 |
-
python-box # For dot-notation access to config dictionaries.
|
| 40 |
-
tqdm # For progress bars in scripts.
|
| 41 |
-
ensure # For runtime type checking.
|
| 42 |
-
matplotlib # For plotting (useful in research).
|
| 43 |
-
seaborn # For advanced plotting (useful in research).
|
| 44 |
-
notebook # For running Jupyter notebooks.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/research/01_data_exploration.ipynb
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
huggingface-space/huggingface-space/huggingface-space/setup.py
DELETED
|
@@ -1,28 +0,0 @@
|
|
| 1 |
-
import setuptools
|
| 2 |
-
|
| 3 |
-
with open("README.md", "r", encoding="utf-8") as f:
|
| 4 |
-
long_description = f.read()
|
| 5 |
-
|
| 6 |
-
__version__ = "0.0.1"
|
| 7 |
-
|
| 8 |
-
REPO_NAME = "Emotion-Recognition-MLOps"
|
| 9 |
-
AUTHOR_USER_NAME = "AlyyanAhmed21"
|
| 10 |
-
SRC_REPO = "EmotionRecognition"
|
| 11 |
-
AUTHOR_EMAIL = "alyyanawan19@gmail.com"
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
setuptools.setup(
|
| 15 |
-
name=SRC_REPO,
|
| 16 |
-
version=__version__,
|
| 17 |
-
author=AUTHOR_USER_NAME,
|
| 18 |
-
author_email=AUTHOR_EMAIL,
|
| 19 |
-
description="A small python package for MLOps based facial emotion detection app",
|
| 20 |
-
long_description=long_description,
|
| 21 |
-
long_description_content_type="text/markdown",
|
| 22 |
-
url=f"https://github.com/{AUTHOR_USER_NAME}/{REPO_NAME}",
|
| 23 |
-
project_urls={
|
| 24 |
-
"Bug Tracker": f"https://github.com/{AUTHOR_USER_NAME}/{REPO_NAME}/issues",
|
| 25 |
-
},
|
| 26 |
-
package_dir={"": "src"},
|
| 27 |
-
packages=setuptools.find_packages(where="src")
|
| 28 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/src/EmotionRecognition/__init__.py
DELETED
|
@@ -1,25 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import sys
|
| 3 |
-
import logging
|
| 4 |
-
|
| 5 |
-
# Define the logging format
|
| 6 |
-
logging_str = "[%(asctime)s: %(levelname)s: %(module)s: %(message)s]"
|
| 7 |
-
|
| 8 |
-
# Define the directory for log files
|
| 9 |
-
log_dir = "logs"
|
| 10 |
-
log_filepath = os.path.join(log_dir, "running_logs.log")
|
| 11 |
-
os.makedirs(log_dir, exist_ok=True)
|
| 12 |
-
|
| 13 |
-
# Configure the logging
|
| 14 |
-
logging.basicConfig(
|
| 15 |
-
level=logging.INFO,
|
| 16 |
-
format=logging_str,
|
| 17 |
-
|
| 18 |
-
handlers=[
|
| 19 |
-
logging.FileHandler(log_filepath), # Log to a file
|
| 20 |
-
logging.StreamHandler(sys.stdout) # Log to the console
|
| 21 |
-
]
|
| 22 |
-
)
|
| 23 |
-
|
| 24 |
-
# Create a logger object that can be imported by other modules
|
| 25 |
-
logger = logging.getLogger("EmotionRecognitionLogger")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/src/EmotionRecognition/components/__init__.py
DELETED
|
File without changes
|
huggingface-space/huggingface-space/huggingface-space/src/EmotionRecognition/components/data_ingestion.py
DELETED
|
@@ -1,27 +0,0 @@
|
|
| 1 |
-
# File: src/EmotionRecognition/components/data_ingestion.py
|
| 2 |
-
import os
|
| 3 |
-
from EmotionRecognition import logger
|
| 4 |
-
from EmotionRecognition.entity.config_entity import DataIngestionConfig
|
| 5 |
-
|
| 6 |
-
class DataIngestion:
|
| 7 |
-
def __init__(self, config: DataIngestionConfig):
|
| 8 |
-
self.config = config
|
| 9 |
-
|
| 10 |
-
def validate_source_data(self):
|
| 11 |
-
"""
|
| 12 |
-
Validates the existence of all raw source data files and folders.
|
| 13 |
-
"""
|
| 14 |
-
logger.info("Validating source data files and folders...")
|
| 15 |
-
|
| 16 |
-
all_paths = [
|
| 17 |
-
self.config.root_dir,
|
| 18 |
-
self.config.ferplus_pixels_csv,
|
| 19 |
-
self.config.ferplus_labels_csv,
|
| 20 |
-
self.config.ckplus_dir
|
| 21 |
-
]
|
| 22 |
-
|
| 23 |
-
for path in all_paths:
|
| 24 |
-
if not os.path.exists(path):
|
| 25 |
-
raise FileNotFoundError(f"Missing required raw data source: {path}")
|
| 26 |
-
|
| 27 |
-
logger.info("All raw data sources found successfully.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
huggingface-space/huggingface-space/huggingface-space/src/EmotionRecognition/components/data_preparation.py
DELETED
|
@@ -1,118 +0,0 @@
|
|
| 1 |
-
# File: src/EmotionRecognition/components/data_preparation.py
|
| 2 |
-
import os
|
| 3 |
-
import pandas as pd
|
| 4 |
-
import numpy as np
|
| 5 |
-
from PIL import Image
|
| 6 |
-
from tqdm import tqdm
|
| 7 |
-
import shutil
|
| 8 |
-
from EmotionRecognition import logger
|
| 9 |
-
from EmotionRecognition.entity.config_entity import DataPreparationConfig
|
| 10 |
-
from pathlib import Path
|
| 11 |
-
import glob
|
| 12 |
-
|
| 13 |
-
class DataPreparation:
|
| 14 |
-
def __init__(self, config: DataPreparationConfig, params: dict):
|
| 15 |
-
self.config = config
|
| 16 |
-
self.params = params.DATA_PARAMS
|
| 17 |
-
|
| 18 |
-
def _process_and_save(self, best_emotion_name, usage, index, pixels, dataset_prefix):
|
| 19 |
-
"""Helper function to handle the merging logic and save images."""
|
| 20 |
-
|
| 21 |
-
# --- MERGING LOGIC ---
|
| 22 |
-
# If the emotion is 'contempt', we re-label it as 'disgust'.
|
| 23 |
-
if best_emotion_name == 'contempt':
|
| 24 |
-
final_emotion_name = 'disgust'
|
| 25 |
-
else:
|
| 26 |
-
final_emotion_name = best_emotion_name
|
| 27 |
-
# --- END MERGING LOGIC ---
|
| 28 |
-
|
| 29 |
-
# Check if this emotion is one of our final target classes
|
| 30 |
-
if final_emotion_name in self.params.CLASSES:
|
| 31 |
-
if usage == 'Training':
|
| 32 |
-
output_dir = self.config.combined_train_dir
|
| 33 |
-
elif usage == 'PublicTest':
|
| 34 |
-
output_dir = self.config.ferplus_test_dir
|
| 35 |
-
else:
|
| 36 |
-
return # Skip other usages like PrivateTest
|
| 37 |
-
|
| 38 |
-
image = Image.fromarray(pixels)
|
| 39 |
-
emotion_folder = Path(output_dir) / final_emotion_name
|
| 40 |
-
emotion_folder.mkdir(parents=True, exist_ok=True)
|
| 41 |
-
image.save(emotion_folder / f"{dataset_prefix}_{index}.png")
|
| 42 |
-
|
| 43 |
-
def _prepare_ferplus(self):
|
| 44 |
-
logger.info("Starting preparation of FER+ dataset...")
|
| 45 |
-
pixels_df = pd.read_csv(self.config.ferplus_pixels_csv)
|
| 46 |
-
labels_df = pd.read_csv(self.config.ferplus_labels_csv)
|
| 47 |
-
|
| 48 |
-
ferplus_emotion_columns = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt']
|
| 49 |
-
|
| 50 |
-
for index, row in tqdm(pixels_df.iterrows(), total=len(pixels_df), desc="Processing FER+ Images"):
|
| 51 |
-
label_votes = labels_df.iloc[index][ferplus_emotion_columns].values
|
| 52 |
-
source_emotion_name = ferplus_emotion_columns[np.argmax(label_votes)]
|
| 53 |
-
|
| 54 |
-
# --- STANDARDIZE THE NAME ---
|
| 55 |
-
# Default to the source name
|
| 56 |
-
our_emotion_name = source_emotion_name
|
| 57 |
-
if source_emotion_name == 'happiness': our_emotion_name = 'happy'
|
| 58 |
-
if source_emotion_name == 'sadness': our_emotion_name = 'sad'
|
| 59 |
-
if source_emotion_name == 'anger': our_emotion_name = 'angry'
|
| 60 |
-
if source_emotion_name == 'contempt': our_emotion_name = 'disgust' # MERGE
|
| 61 |
-
|
| 62 |
-
if our_emotion_name in self.params.CLASSES:
|
| 63 |
-
usage = row['Usage']
|
| 64 |
-
if usage == 'Training': output_dir = self.config.combined_train_dir
|
| 65 |
-
elif usage == 'PublicTest': output_dir = self.config.ferplus_test_dir
|
| 66 |
-
else: continue
|
| 67 |
-
|
| 68 |
-
pixels = np.array(row['pixels'].split(), 'uint8').reshape((48, 48))
|
| 69 |
-
image = Image.fromarray(pixels)
|
| 70 |
-
emotion_folder = Path(output_dir) / our_emotion_name
|
| 71 |
-
emotion_folder.mkdir(parents=True, exist_ok=True)
|
| 72 |
-
image.save(emotion_folder / f"ferplus_{index}.png")
|
| 73 |
-
|
| 74 |
-
logger.info("FER+ dataset preparation complete.")
|
| 75 |
-
|
| 76 |
-
def _prepare_ckplus(self):
|
| 77 |
-
logger.info("Starting preparation of CK+ dataset...")
|
| 78 |
-
|
| 79 |
-
for ckplus_folder_name in tqdm(os.listdir(self.config.ckplus_dir), desc="Processing CK+ Folders"):
|
| 80 |
-
source_emotion_dir = Path(self.config.ckplus_dir) / ckplus_folder_name
|
| 81 |
-
|
| 82 |
-
# --- STANDARDIZE THE NAME ---
|
| 83 |
-
our_emotion_name = ckplus_folder_name # Default
|
| 84 |
-
if ckplus_folder_name == 'contempt': our_emotion_name = 'disgust' # MERGE
|
| 85 |
-
|
| 86 |
-
if our_emotion_name in self.params.CLASSES and source_emotion_dir.is_dir():
|
| 87 |
-
dest_emotion_dir = Path(self.config.combined_train_dir) / our_emotion_name
|
| 88 |
-
dest_emotion_dir.mkdir(parents=True, exist_ok=True)
|
| 89 |
-
|
| 90 |
-
for img_file in os.listdir(source_emotion_dir):
|
| 91 |
-
shutil.copy(source_emotion_dir / img_file, dest_emotion_dir / f"ckplus_{img_file}")
|
| 92 |
-
|
| 93 |
-
logger.info("CK+ dataset preparation complete.")
|
| 94 |
-
|
| 95 |
-
def _log_dataset_statistics(self):
|
| 96 |
-
logger.info("--- Final Dataset Statistics ---")
|
| 97 |
-
logger.info("Training Set:")
|
| 98 |
-
for emotion in sorted(self.params.CLASSES):
|
| 99 |
-
count = len(glob.glob(str(self.config.combined_train_dir / emotion / '*.png')))
|
| 100 |
-
logger.info(f"- {emotion}: {count} images")
|
| 101 |
-
|
| 102 |
-
logger.info("\nTest Set:")
|
| 103 |
-
for emotion in sorted(self.params.CLASSES):
|
| 104 |
-
count = len(glob.glob(str(self.config.ferplus_test_dir / emotion / '*.png')))
|
| 105 |
-
logger.info(f"- {emotion}: {count} images")
|
| 106 |
-
logger.info("---------------------------------")
|
| 107 |
-
|
| 108 |
-
def combine_and_prepare_data(self):
|
| 109 |
-
logger.info("--- Starting Data Preparation Stage ---")
|
| 110 |
-
if os.path.exists(self.config.combined_train_dir): shutil.rmtree(self.config.combined_train_dir)
|
| 111 |
-
if os.path.exists(self.config.ferplus_test_dir): shutil.rmtree(self.config.ferplus_test_dir)
|
| 112 |
-
os.makedirs(self.config.combined_train_dir, exist_ok=True)
|
| 113 |
-
os.makedirs(self.config.ferplus_test_dir, exist_ok=True)
|
| 114 |
-
|
| 115 |
-
self._prepare_ferplus()
|
| 116 |
-
self._prepare_ckplus()
|
| 117 |
-
self._log_dataset_statistics()
|
| 118 |
-
logger.info("--- Data Preparation Stage Complete ---")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|