Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,10 +5,11 @@ import PIL
|
|
| 5 |
from base64 import b64decode, b64encode
|
| 6 |
from keras.models import load_model
|
| 7 |
import streamlit as st
|
|
|
|
| 8 |
|
| 9 |
# Initialize the Haar Cascade face detection model
|
| 10 |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
| 11 |
-
model = load_model('emotion_model.h5')
|
| 12 |
emotion_dict = {0: "Angry", 1: "Disgust", 2: "Fear", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
|
| 13 |
|
| 14 |
# Define functions to convert between JavaScript image reply and OpenCV image
|
|
@@ -54,16 +55,42 @@ def process_frame(frame):
|
|
| 54 |
|
| 55 |
return frame, emotions
|
| 56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
# Page Title and Description
|
| 58 |
st.set_page_config(page_title="Facial Emotion Recognition", layout="wide")
|
| 59 |
st.title("Facial Emotion Recognition")
|
| 60 |
|
| 61 |
# Sidebar
|
| 62 |
st.sidebar.title("Options")
|
| 63 |
-
option = st.sidebar.radio("Select Option", ("Drag a File",
|
| 64 |
|
| 65 |
# Main Content Area
|
| 66 |
-
if option == "Drag a File"
|
| 67 |
st.subheader("Photo Processing")
|
| 68 |
|
| 69 |
# Process image or captured frame
|
|
@@ -72,16 +99,7 @@ if option == "Drag a File" or option == "Take a Picture":
|
|
| 72 |
if uploaded_file is not None:
|
| 73 |
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
| 74 |
image = cv2.imdecode(file_bytes, 1)
|
| 75 |
-
|
| 76 |
-
cap = cv2.VideoCapture(0)
|
| 77 |
-
if st.button("Take Picture"):
|
| 78 |
-
ret, image = cap.read()
|
| 79 |
-
if ret:
|
| 80 |
-
cap.release()
|
| 81 |
-
else:
|
| 82 |
-
cap.release()
|
| 83 |
-
st.warning("Click the 'Take Picture' button to capture an image.")
|
| 84 |
-
|
| 85 |
if 'image' in locals():
|
| 86 |
processed_frame, emotions = process_frame(image)
|
| 87 |
# Display processed frame and emotions
|
|
@@ -90,21 +108,7 @@ if option == "Drag a File" or option == "Take a Picture":
|
|
| 90 |
if not emotions:
|
| 91 |
st.warning("No faces detected in the image.")
|
| 92 |
elif option == "Process Video":
|
| 93 |
-
|
| 94 |
-
run = st.button('Start')
|
| 95 |
-
stop = st.button('Stop')
|
| 96 |
-
|
| 97 |
-
camera = cv2.VideoCapture(0)
|
| 98 |
-
FRAME_WINDOW = st.image([])
|
| 99 |
|
| 100 |
-
while run and not stop:
|
| 101 |
-
_, frame = camera.read()
|
| 102 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 103 |
-
processed_frame, emotions = process_frame(frame)
|
| 104 |
-
if emotions:
|
| 105 |
-
FRAME_WINDOW.image(processed_frame, use_column_width=True)
|
| 106 |
|
| 107 |
-
|
| 108 |
-
st.write('Stopped')
|
| 109 |
-
|
| 110 |
-
camera.release()
|
|
|
|
| 5 |
from base64 import b64decode, b64encode
|
| 6 |
from keras.models import load_model
|
| 7 |
import streamlit as st
|
| 8 |
+
from streamlit_webrtc import webrtc_streamer, VideoProcessorBase
|
| 9 |
|
| 10 |
# Initialize the Haar Cascade face detection model
|
| 11 |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
| 12 |
+
model = load_model('emotion_model.h5',compile=False)
|
| 13 |
emotion_dict = {0: "Angry", 1: "Disgust", 2: "Fear", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
|
| 14 |
|
| 15 |
# Define functions to convert between JavaScript image reply and OpenCV image
|
|
|
|
| 55 |
|
| 56 |
return frame, emotions
|
| 57 |
|
| 58 |
+
|
| 59 |
+
class VideoProcessor(VideoProcessorBase):
|
| 60 |
+
def recv(self, frame):
|
| 61 |
+
img = frame.to_ndarray(format="bgr24")
|
| 62 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 63 |
+
faces = face_cascade.detectMultiScale(gray)
|
| 64 |
+
|
| 65 |
+
for (x, y, w, h) in faces:
|
| 66 |
+
face_region = gray[y:y+h, x:x+w]
|
| 67 |
+
face_resized = cv2.resize(face_region, (48, 48))
|
| 68 |
+
img_array = np.expand_dims(face_resized, axis=0)
|
| 69 |
+
img_array = np.expand_dims(img_array, axis=-1)
|
| 70 |
+
predictions = model.predict(img_array)
|
| 71 |
+
predicted_class = np.argmax(predictions)
|
| 72 |
+
predicted_emotion = emotion_dict[predicted_class]
|
| 73 |
+
accuracy = predictions[0][predicted_class]
|
| 74 |
+
|
| 75 |
+
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
|
| 76 |
+
cv2.putText(img, f"{predicted_emotion} ({accuracy:.2f})", (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 2)
|
| 77 |
+
|
| 78 |
+
return frame.from_ndarray(img, format="bgr24")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
|
| 84 |
# Page Title and Description
|
| 85 |
st.set_page_config(page_title="Facial Emotion Recognition", layout="wide")
|
| 86 |
st.title("Facial Emotion Recognition")
|
| 87 |
|
| 88 |
# Sidebar
|
| 89 |
st.sidebar.title("Options")
|
| 90 |
+
option = st.sidebar.radio("Select Option", ("Drag a File","Process Video"))
|
| 91 |
|
| 92 |
# Main Content Area
|
| 93 |
+
if option == "Drag a File" :
|
| 94 |
st.subheader("Photo Processing")
|
| 95 |
|
| 96 |
# Process image or captured frame
|
|
|
|
| 99 |
if uploaded_file is not None:
|
| 100 |
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
| 101 |
image = cv2.imdecode(file_bytes, 1)
|
| 102 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
if 'image' in locals():
|
| 104 |
processed_frame, emotions = process_frame(image)
|
| 105 |
# Display processed frame and emotions
|
|
|
|
| 108 |
if not emotions:
|
| 109 |
st.warning("No faces detected in the image.")
|
| 110 |
elif option == "Process Video":
|
| 111 |
+
webrtc_streamer(key="camera", video_processor_factory=VideoProcessor)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
|
| 114 |
+
|
|
|
|
|
|
|
|
|