Spaces:
Build error
Build error
| # ============================================================ | |
| # app.py : Application Gradio — Détection d'émotions faciales | |
| # Déployée sur Hugging Face Spaces | |
| # ============================================================ | |
| import gradio as gr | |
| import numpy as np | |
| import cv2 | |
| import tensorflow as tf | |
| import json | |
| from huggingface_hub import hf_hub_download | |
| # --- Charger le modèle depuis notre repo HuggingFace --- | |
| print("⏳ Chargement du modèle...") | |
| model_path = hf_hub_download( | |
| repo_id = "Amadoudllo/emotion-detection-cnn", | |
| filename = "best_cnn_model.keras" | |
| ) | |
| model = tf.keras.models.load_model(model_path) | |
| print("✅ Modèle chargé !") | |
| # --- Charger le mapping classes --- | |
| class_path = hf_hub_download( | |
| repo_id = "Amadoudllo/emotion-detection-cnn", | |
| filename = "class_indices.json" | |
| ) | |
| with open(class_path, "r") as f: | |
| idx_to_class = json.load(f) | |
| # --- Détecteur de visages Haar Cascade --- | |
| face_cascade = cv2.CascadeClassifier( | |
| cv2.data.haarcascades + "haarcascade_frontalface_default.xml" | |
| ) | |
| # --- Paramètres --- | |
| IMG_SIZE = 48 | |
| # Emojis par émotion | |
| EMOJIS = { | |
| "Angry" : "😠", | |
| "Fear" : "😨", | |
| "Happy" : "😄", | |
| "Neutral" : "😐", | |
| "Sad" : "😢", | |
| "Surprise" : "😲" | |
| } | |
| def clean_label(label): | |
| """ Corrige Suprise → Surprise """ | |
| return "Surprise" if label == "Suprise" else label | |
| def detect_emotion(image): | |
| """ | |
| Fonction principale appelée à chaque frame webcam ou image uploadée. | |
| Reçoit une image RGB → retourne image annotée + scores émotions | |
| """ | |
| if image is None: | |
| return None, {} | |
| # Convertir RGB → BGR pour OpenCV | |
| frame = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |
| gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
| # --- Détection des visages --- | |
| faces = face_cascade.detectMultiScale( | |
| gray, | |
| scaleFactor=1.1, | |
| minNeighbors=5, | |
| minSize=(30, 30) | |
| ) | |
| # Scores par défaut si aucun visage | |
| scores = { | |
| f"{EMOJIS.get(clean_label(idx_to_class[str(i)]), '')} {clean_label(idx_to_class[str(i)])}": 0.0 | |
| for i in range(len(idx_to_class)) | |
| } | |
| for (x, y, w, h) in faces: | |
| # --- Préparer le visage pour le modèle --- | |
| roi = cv2.resize(gray[y:y+h, x:x+w], (IMG_SIZE, IMG_SIZE)) | |
| inp = roi.reshape(1, IMG_SIZE, IMG_SIZE, 1).astype("float32") / 255. | |
| # --- Prédiction --- | |
| preds = model.predict(inp, verbose=0)[0] | |
| idx = np.argmax(preds) | |
| emotion = clean_label(idx_to_class[str(idx)]) | |
| confidence = preds[idx] * 100 | |
| # Scores pour le graphique Gradio | |
| scores = { | |
| f"{EMOJIS.get(clean_label(idx_to_class[str(i)]), '')} {clean_label(idx_to_class[str(i)])}": float(preds[i]) | |
| for i in range(len(preds)) | |
| } | |
| # --- Dessiner sur la frame --- | |
| cv2.rectangle(frame, (x, y), (x+w, y+h), (102, 126, 234), 2) | |
| label = f"{EMOJIS.get(emotion, '')} {emotion} ({confidence:.1f}%)" | |
| (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) | |
| cv2.rectangle(frame, (x, y-th-12), (x+tw+10, y), (102, 126, 234), -1) | |
| cv2.putText(frame, label, (x+5, y-5), | |
| cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2) | |
| # Reconvertir BGR → RGB pour Gradio | |
| frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| return frame_rgb, scores | |
| # ============================================================ | |
| # Interface Gradio | |
| # ============================================================ | |
| with gr.Blocks( | |
| title="🎭 Détection d'Émotions Faciales", | |
| theme=gr.themes.Soft() | |
| ) as demo: | |
| # --- En-tête --- | |
| gr.Markdown(""" | |
| # 🎭 Détection d'Émotions Faciales en Temps Réel | |
| **Modèle CNN from Scratch — 64.44% accuracy — 6 émotions détectées** | |
| 👉 Utilise ta **webcam** ou **uploade une photo** de visage ! | |
| > Modèle entraîné sur Kaggle · Code sur [GitHub](https://github.com/amadoudllo/emotion-detection-cnn) | |
| """) | |
| with gr.Tabs(): | |
| # ── Tab 1 : Webcam temps réel ── | |
| with gr.Tab("📷 Webcam Temps Réel"): | |
| gr.Markdown("### Active ta webcam et place ton visage devant la caméra") | |
| with gr.Row(): | |
| with gr.Column(): | |
| webcam = gr.Image( | |
| sources=["webcam"], | |
| streaming=True, | |
| label="Webcam", | |
| mirror_webcam=True | |
| ) | |
| with gr.Column(): | |
| webcam_output = gr.Image( | |
| label="Détection", | |
| streaming=True | |
| ) | |
| webcam_scores = gr.Label( | |
| label="Scores par émotion", | |
| num_top_classes=6 | |
| ) | |
| # Connexion streaming webcam | |
| webcam.stream( | |
| fn=detect_emotion, | |
| inputs=[webcam], | |
| outputs=[webcam_output, webcam_scores] | |
| ) | |
| # ── Tab 2 : Upload image ── | |
| with gr.Tab("🖼️ Uploader une Image"): | |
| gr.Markdown("### Uploade une photo de visage pour analyser l'émotion") | |
| with gr.Row(): | |
| with gr.Column(): | |
| img_input = gr.Image( | |
| sources=["upload"], | |
| label="Image à analyser", | |
| type="numpy" | |
| ) | |
| btn = gr.Button( | |
| "🔍 Détecter l'émotion", | |
| variant="primary" | |
| ) | |
| with gr.Column(): | |
| img_output = gr.Image(label="Résultat") | |
| img_scores = gr.Label( | |
| label="Scores par émotion", | |
| num_top_classes=6 | |
| ) | |
| # Bouton déclenche la détection | |
| btn.click( | |
| fn=detect_emotion, | |
| inputs=[img_input], | |
| outputs=[img_output, img_scores] | |
| ) | |
| # --- Exemples --- | |
| gr.Markdown(""" | |
| --- | |
| **Émotions détectées :** | |
| 😠 Angry · 😨 Fear · 😄 Happy · 😐 Neutral · 😢 Sad · 😲 Surprise | |
| **Défi IA — Semaine 1** · Construit avec ❤️ par Amadoudllo | |
| """) | |
| demo.launch() |