| | import streamlit as st |
| | import cv2 |
| | import numpy as np |
| | from tensorflow.keras.models import load_model |
| |
|
| |
|
| | model = load_model("FER-CNN.h5") |
| |
|
| | |
| | emotion_labels = ['Angry', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise'] |
| |
|
| | def detect_emotion(frame): |
| | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
| | face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') |
| | faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)) |
| | |
| | for (x, y, w, h) in faces: |
| | roi_gray = gray[y:y + h, x:x + w] |
| | roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA) |
| | roi_gray = roi_gray.astype('float') / 255.0 |
| | roi_gray = np.expand_dims(roi_gray, axis=-1) |
| | roi_gray = np.expand_dims(roi_gray, axis=0) |
| |
|
| | prediction = model.predict(roi_gray)[0] |
| | emotion = emotion_labels[prediction.argmax()] |
| |
|
| | |
| | cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2) |
| | cv2.putText(frame, emotion, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) |
| | |
| | return frame |
| |
|
| | st.title("Real-Time Face Emotion Recognition") |
| |
|
| | run = st.checkbox("Start Webcam") |
| |
|
| | FRAME_WINDOW = st.image([]) |
| |
|
| | camera = cv2.VideoCapture(0) |
| |
|
| | if run: |
| | while True: |
| | ret, frame = camera.read() |
| | if not ret: |
| | st.error("Failed to capture video.") |
| | break |
| |
|
| | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
| | frame = detect_emotion(frame) |
| | FRAME_WINDOW.image(frame, channels="RGB") |
| | else: |
| | camera.release() |
| | cv2.destroyAllWindows() |