Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,16 +1,13 @@
|
|
| 1 |
import cv2
|
| 2 |
import streamlit as st
|
| 3 |
from fer import FER
|
| 4 |
-
from PIL import Image
|
| 5 |
import numpy as np
|
| 6 |
|
| 7 |
-
# Initialize
|
| 8 |
emotion_detector = FER()
|
| 9 |
|
| 10 |
-
# Function to process
|
| 11 |
-
def
|
| 12 |
-
frame = np.array(image)
|
| 13 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # Convert from PIL to OpenCV format
|
| 14 |
emotions = emotion_detector.detect_emotions(frame)
|
| 15 |
blurred_frame = cv2.GaussianBlur(frame, (51, 51), 0)
|
| 16 |
|
|
@@ -18,13 +15,16 @@ def process_image(image):
|
|
| 18 |
(x, y, w, h) = face["box"]
|
| 19 |
emotion, score = max(face["emotions"].items(), key=lambda item: item[1])
|
| 20 |
|
|
|
|
| 21 |
overlay = frame.copy()
|
| 22 |
alpha = 0.4
|
| 23 |
cv2.rectangle(overlay, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
| 24 |
cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
|
| 25 |
|
|
|
|
| 26 |
blurred_frame[y:y + h, x:x + w] = frame[y:y + h, x:x + w]
|
| 27 |
|
|
|
|
| 28 |
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 29 |
font_scale = 0.7
|
| 30 |
font_thickness = 2
|
|
@@ -39,22 +39,38 @@ def process_image(image):
|
|
| 39 |
cv2.rectangle(blurred_frame, (text_x - 5, text_y - text_height - 5), (text_x + text_width + 5, text_y + 5), bg_color, -1)
|
| 40 |
cv2.putText(blurred_frame, text, (text_x, text_y), font, font_scale, text_color, font_thickness)
|
| 41 |
|
| 42 |
-
return
|
| 43 |
|
| 44 |
# Streamlit UI
|
| 45 |
-
st.title("
|
| 46 |
-
st.write("
|
| 47 |
|
| 48 |
-
#
|
| 49 |
-
|
| 50 |
|
| 51 |
-
#
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
st.image(image, caption="Captured Image", use_column_width=True)
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import cv2
|
| 2 |
import streamlit as st
|
| 3 |
from fer import FER
|
|
|
|
| 4 |
import numpy as np
|
| 5 |
|
| 6 |
+
# Initialize Emotion Detector
|
| 7 |
emotion_detector = FER()
|
| 8 |
|
| 9 |
+
# Function to process each frame and detect emotions
|
| 10 |
+
def process_frame(frame):
|
|
|
|
|
|
|
| 11 |
emotions = emotion_detector.detect_emotions(frame)
|
| 12 |
blurred_frame = cv2.GaussianBlur(frame, (51, 51), 0)
|
| 13 |
|
|
|
|
| 15 |
(x, y, w, h) = face["box"]
|
| 16 |
emotion, score = max(face["emotions"].items(), key=lambda item: item[1])
|
| 17 |
|
| 18 |
+
# Overlay rectangle on detected face
|
| 19 |
overlay = frame.copy()
|
| 20 |
alpha = 0.4
|
| 21 |
cv2.rectangle(overlay, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
| 22 |
cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
|
| 23 |
|
| 24 |
+
# Replace detected face region with unblurred version
|
| 25 |
blurred_frame[y:y + h, x:x + w] = frame[y:y + h, x:x + w]
|
| 26 |
|
| 27 |
+
# Display Emotion Text
|
| 28 |
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 29 |
font_scale = 0.7
|
| 30 |
font_thickness = 2
|
|
|
|
| 39 |
cv2.rectangle(blurred_frame, (text_x - 5, text_y - text_height - 5), (text_x + text_width + 5, text_y + 5), bg_color, -1)
|
| 40 |
cv2.putText(blurred_frame, text, (text_x, text_y), font, font_scale, text_color, font_thickness)
|
| 41 |
|
| 42 |
+
return blurred_frame
|
| 43 |
|
| 44 |
# Streamlit UI
|
| 45 |
+
st.title("Live Emotion Recognition")
|
| 46 |
+
st.write("Allow camera access to start real-time emotion detection.")
|
| 47 |
|
| 48 |
+
# Button to start webcam
|
| 49 |
+
run = st.checkbox("Start Camera")
|
| 50 |
|
| 51 |
+
# Streamlit Image Containers
|
| 52 |
+
col1, col2 = st.columns(2)
|
| 53 |
+
with col1:
|
| 54 |
+
st.header("Live Camera Feed")
|
| 55 |
+
camera_placeholder = st.image([])
|
| 56 |
|
| 57 |
+
with col2:
|
| 58 |
+
st.header("Emotion Detection Output")
|
| 59 |
+
output_placeholder = st.image([])
|
|
|
|
| 60 |
|
| 61 |
+
# OpenCV Video Capture
|
| 62 |
+
cap = cv2.VideoCapture(0)
|
| 63 |
+
|
| 64 |
+
while run:
|
| 65 |
+
ret, frame = cap.read()
|
| 66 |
+
if not ret:
|
| 67 |
+
st.error("Error: Unable to access webcam.")
|
| 68 |
+
break
|
| 69 |
+
|
| 70 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert to RGB
|
| 71 |
+
processed_frame = process_frame(frame) # Detect emotions
|
| 72 |
+
|
| 73 |
+
camera_placeholder.image(frame, channels="RGB")
|
| 74 |
+
output_placeholder.image(processed_frame, channels="RGB")
|
| 75 |
+
|
| 76 |
+
cap.release()
|