charulp2499 commited on
Commit
f776016
·
verified ·
1 Parent(s): bfa1884

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -19
app.py CHANGED
@@ -1,16 +1,13 @@
1
  import cv2
2
  import streamlit as st
3
  from fer import FER
4
- from PIL import Image
5
  import numpy as np
6
 
7
- # Initialize emotion detector
8
  emotion_detector = FER()
9
 
10
- # Function to process image and detect emotions
11
- def process_image(image):
12
- frame = np.array(image)
13
- frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # Convert from PIL to OpenCV format
14
  emotions = emotion_detector.detect_emotions(frame)
15
  blurred_frame = cv2.GaussianBlur(frame, (51, 51), 0)
16
 
@@ -18,13 +15,16 @@ def process_image(image):
18
  (x, y, w, h) = face["box"]
19
  emotion, score = max(face["emotions"].items(), key=lambda item: item[1])
20
 
 
21
  overlay = frame.copy()
22
  alpha = 0.4
23
  cv2.rectangle(overlay, (x, y), (x + w, y + h), (0, 255, 0), 2)
24
  cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
25
 
 
26
  blurred_frame[y:y + h, x:x + w] = frame[y:y + h, x:x + w]
27
 
 
28
  font = cv2.FONT_HERSHEY_SIMPLEX
29
  font_scale = 0.7
30
  font_thickness = 2
@@ -39,22 +39,38 @@ def process_image(image):
39
  cv2.rectangle(blurred_frame, (text_x - 5, text_y - text_height - 5), (text_x + text_width + 5, text_y + 5), bg_color, -1)
40
  cv2.putText(blurred_frame, text, (text_x, text_y), font, font_scale, text_color, font_thickness)
41
 
42
- return cv2.cvtColor(blurred_frame, cv2.COLOR_BGR2RGB) # Convert back to RGB for Streamlit
43
 
44
  # Streamlit UI
45
- st.title("Real-Time Emotion Recognition")
46
- st.write("Use the camera or upload an image to detect emotions.")
47
 
48
- # Camera Input
49
- camera_image = st.camera_input("Take a picture")
50
 
51
- # File Upload
52
- uploaded_file = st.file_uploader("Or upload an image...", type=["jpg", "png", "jpeg"])
 
 
 
53
 
54
- # Process image if uploaded or captured via camera
55
- if camera_image or uploaded_file:
56
- image = Image.open(camera_image if camera_image else uploaded_file)
57
- st.image(image, caption="Captured Image", use_column_width=True)
58
 
59
- processed_image = process_image(image)
60
- st.image(processed_image, caption="Processed Image with Emotions", use_column_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import cv2
2
  import streamlit as st
3
  from fer import FER
 
4
  import numpy as np
5
 
6
+ # Initialize Emotion Detector
7
  emotion_detector = FER()
8
 
9
+ # Function to process each frame and detect emotions
10
+ def process_frame(frame):
 
 
11
  emotions = emotion_detector.detect_emotions(frame)
12
  blurred_frame = cv2.GaussianBlur(frame, (51, 51), 0)
13
 
 
15
  (x, y, w, h) = face["box"]
16
  emotion, score = max(face["emotions"].items(), key=lambda item: item[1])
17
 
18
+ # Overlay rectangle on detected face
19
  overlay = frame.copy()
20
  alpha = 0.4
21
  cv2.rectangle(overlay, (x, y), (x + w, y + h), (0, 255, 0), 2)
22
  cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
23
 
24
+ # Replace detected face region with unblurred version
25
  blurred_frame[y:y + h, x:x + w] = frame[y:y + h, x:x + w]
26
 
27
+ # Display Emotion Text
28
  font = cv2.FONT_HERSHEY_SIMPLEX
29
  font_scale = 0.7
30
  font_thickness = 2
 
39
  cv2.rectangle(blurred_frame, (text_x - 5, text_y - text_height - 5), (text_x + text_width + 5, text_y + 5), bg_color, -1)
40
  cv2.putText(blurred_frame, text, (text_x, text_y), font, font_scale, text_color, font_thickness)
41
 
42
+ return blurred_frame
43
 
44
  # Streamlit UI
45
+ st.title("Live Emotion Recognition")
46
+ st.write("Allow camera access to start real-time emotion detection.")
47
 
48
+ # Button to start webcam
49
+ run = st.checkbox("Start Camera")
50
 
51
+ # Streamlit Image Containers
52
+ col1, col2 = st.columns(2)
53
+ with col1:
54
+ st.header("Live Camera Feed")
55
+ camera_placeholder = st.image([])
56
 
57
+ with col2:
58
+ st.header("Emotion Detection Output")
59
+ output_placeholder = st.image([])
 
60
 
61
+ # OpenCV Video Capture
62
+ cap = cv2.VideoCapture(0)
63
+
64
+ while run:
65
+ ret, frame = cap.read()
66
+ if not ret:
67
+ st.error("Error: Unable to access webcam.")
68
+ break
69
+
70
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert to RGB
71
+ processed_frame = process_frame(frame) # Detect emotions
72
+
73
+ camera_placeholder.image(frame, channels="RGB")
74
+ output_placeholder.image(processed_frame, channels="RGB")
75
+
76
+ cap.release()