import cv2 import streamlit as st from fer import FER from PIL import Image import numpy as np # Initialize emotion detector emotion_detector = FER() # Function to process image and detect emotions def process_image(image): frame = np.array(image) frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # Convert from PIL to OpenCV format emotions = emotion_detector.detect_emotions(frame) blurred_frame = cv2.GaussianBlur(frame, (51, 51), 0) for face in emotions: (x, y, w, h) = face["box"] emotion, score = max(face["emotions"].items(), key=lambda item: item[1]) overlay = frame.copy() alpha = 0.4 cv2.rectangle(overlay, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame) blurred_frame[y:y + h, x:x + w] = frame[y:y + h, x:x + w] font = cv2.FONT_HERSHEY_SIMPLEX font_scale = 0.7 font_thickness = 2 text_color = (255, 255, 255) bg_color = (0, 0, 0) text = f"{emotion}: {score:.2f}" (text_width, text_height), _ = cv2.getTextSize(text, font, font_scale, font_thickness) text_x = x + 10 text_y = y - 10 if y - 10 > text_height else y + h + text_height cv2.rectangle(blurred_frame, (text_x - 5, text_y - text_height - 5), (text_x + text_width + 5, text_y + 5), bg_color, -1) cv2.putText(blurred_frame, text, (text_x, text_y), font, font_scale, text_color, font_thickness) return cv2.cvtColor(blurred_frame, cv2.COLOR_BGR2RGB) # Convert back to RGB for Streamlit # Streamlit UI st.title("Real-Time Emotion Recognition") st.write("Use the camera or upload an image to detect emotions.") # Camera Input camera_image = st.camera_input("Take a picture") # File Upload uploaded_file = st.file_uploader("Or upload an image...", type=["jpg", "png", "jpeg"]) # Process image if uploaded or captured via camera if camera_image or uploaded_file: image = Image.open(camera_image if camera_image else uploaded_file) st.image(image, caption="Captured Image", use_column_width=True) processed_image = process_image(image) st.image(processed_image, caption="Processed Image with Emotions", use_column_width=True)