import gradio as gr import cv2 import numpy as np from keras.models import load_model # Load face cascade face_classifier = cv2.CascadeClassifier( cv2.data.haarcascades + 'haarcascade_frontalface_default.xml' ) # Load emotion model classifier = load_model("Custom_CNN_model.keras") emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise'] def detect_emotion(image): if image is None: return None img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale(gray, 1.1, 5) for (x, y, w, h) in faces: cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 2) roi_gray = gray[y:y + h, x:x + w] roi_gray = cv2.resize(roi_gray, (48, 48)) roi = roi_gray.astype("float32") / 255.0 roi = np.expand_dims(roi, axis=0) roi = np.expand_dims(roi, axis=-1) prediction = classifier.predict(roi)[0] label = emotion_labels[prediction.argmax()] cv2.putText(img, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) return cv2.cvtColor(img, cv2.COLOR_BGR2RGB) with gr.Blocks() as app: gr.Markdown( """
Upload an image or use your webcam to detect emotions using a custom CNN model.
""" ) with gr.Row(): with gr.Column(): input_img = gr.Image(type="numpy", label="Upload Image") webcam = gr.Image(type="numpy", sources=["webcam"], label="Webcam") run_btn = gr.Button("Detect Emotion") with gr.Column(): output_img = gr.Image(type="numpy", label="Result") run_btn.click(fn=detect_emotion, inputs=input_img, outputs=output_img) webcam.change(fn=detect_emotion, inputs=webcam, outputs=output_img) app.launch()