File size: 1,969 Bytes
89b0b12
 
 
 
 
291a517
89b0b12
 
 
 
291a517
89b0b12
 
 
 
 
 
 
 
 
 
 
 
291a517
89b0b12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291a517
89b0b12
 
 
 
 
 
 
 
291a517
89b0b12
291a517
 
89b0b12
291a517
 
89b0b12
 
 
 
291a517
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import gradio as gr
import cv2
import numpy as np
from keras.models import load_model

# Load face cascade
face_classifier = cv2.CascadeClassifier(
    cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'
)

# Load emotion model
classifier = load_model("Custom_CNN_model.keras")

emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']


def detect_emotion(image):
    if image is None:
        return None

    img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    faces = face_classifier.detectMultiScale(gray, 1.1, 5)

    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 2)

        roi_gray = gray[y:y + h, x:x + w]
        roi_gray = cv2.resize(roi_gray, (48, 48))

        roi = roi_gray.astype("float32") / 255.0
        roi = np.expand_dims(roi, axis=0)
        roi = np.expand_dims(roi, axis=-1)

        prediction = classifier.predict(roi)[0]
        label = emotion_labels[prediction.argmax()]

        cv2.putText(img, label, (x, y - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

    return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)


with gr.Blocks() as app:
    gr.Markdown(
        """
        <h1 style='text-align:center; color:#2D99FF;'>Emotion Detection App</h1>
        <p style='text-align:center;'>Upload an image or use your webcam to detect emotions using a custom CNN model.</p>
        """
    )

    with gr.Row():
        with gr.Column():
            input_img = gr.Image(type="numpy", label="Upload Image")
            webcam = gr.Image(type="numpy", sources=["webcam"], label="Webcam")
            run_btn = gr.Button("Detect Emotion")

        with gr.Column():
            output_img = gr.Image(type="numpy", label="Result")

    run_btn.click(fn=detect_emotion, inputs=input_img, outputs=output_img)
    webcam.change(fn=detect_emotion, inputs=webcam, outputs=output_img)

app.launch()