Spaces:
Sleeping
Sleeping
Rename realtime.py to app.py
Browse files- realtime.py → app.py +104 -214
realtime.py → app.py
RENAMED
|
@@ -1,214 +1,104 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import cv2
|
| 3 |
-
import numpy as np
|
| 4 |
-
import tensorflow as tf
|
| 5 |
-
from tensorflow.keras.models import model_from_json
|
| 6 |
-
import streamlit as st
|
| 7 |
-
from PIL import Image
|
| 8 |
-
|
| 9 |
-
# Load model
|
| 10 |
-
with open("jsn_model.json", "r") as json_file:
|
| 11 |
-
loaded_model_json = json_file.read()
|
| 12 |
-
model = model_from_json(loaded_model_json)
|
| 13 |
-
model.load_weights('weights_model1.h5')
|
| 14 |
-
|
| 15 |
-
# Loading the classifier from the file.
|
| 16 |
-
face_haar_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
| 17 |
-
|
| 18 |
-
UPLOAD_FOLDER = 'static'
|
| 19 |
-
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
|
| 20 |
-
|
| 21 |
-
def allowed_file(filename):
|
| 22 |
-
"""Checks the file format when file is uploaded"""
|
| 23 |
-
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
| 24 |
-
|
| 25 |
-
def Emotion_Analysis(image):
|
| 26 |
-
"""It does prediction of Emotions found in the Image provided, saves as Images and returns them"""
|
| 27 |
-
gray_frame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 28 |
-
faces = face_haar_cascade.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
|
| 29 |
-
|
| 30 |
-
if len(faces) == 0:
|
| 31 |
-
return None
|
| 32 |
-
|
| 33 |
-
for (x, y, w, h) in faces:
|
| 34 |
-
roi = gray_frame[y:y + h, x:x + w]
|
| 35 |
-
roi = cv2.resize(roi, (48, 48))
|
| 36 |
-
roi = roi.astype("float") / 255.0
|
| 37 |
-
roi = tf.expand_dims(roi, axis=-1) # Adding channel dimension
|
| 38 |
-
roi = np.expand_dims(roi, axis=0) # Adding batch dimension
|
| 39 |
-
|
| 40 |
-
prediction = model.predict(roi)
|
| 41 |
-
EMOTIONS_LIST = ["Angry", "Disgust", "Fear", "Happy", "Neutral", "Sad", "Surprise"]
|
| 42 |
-
rec_col = {"Happy": (0, 255, 0), "Sad": (255, 0, 0), "Surprise": (255, 204, 55),
|
| 43 |
-
"Angry": (0, 0, 255), "Disgust": (230, 159, 0), "Neutral": (0, 255, 255), "Fear": (128, 0, 128)}
|
| 44 |
-
|
| 45 |
-
pred_emotion = EMOTIONS_LIST[np.argmax(prediction)]
|
| 46 |
-
Text = str(pred_emotion)
|
| 47 |
-
|
| 48 |
-
cv2.rectangle(image, (x, y), (x + w, y + h), rec_col[str(pred_emotion)], 2)
|
| 49 |
-
cv2.rectangle(image, (x, y - 40), (x + w, y), rec_col[str(pred_emotion)], -1)
|
| 50 |
-
cv2.putText(image, Text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
|
| 51 |
-
|
| 52 |
-
return image, pred_emotion
|
| 53 |
-
|
| 54 |
-
def video_frame_callback(frame):
|
| 55 |
-
"""Callback function to process each frame of video"""
|
| 56 |
-
image = np.array(frame)
|
| 57 |
-
result = Emotion_Analysis(image)
|
| 58 |
-
if result is not None:
|
| 59 |
-
processed_image, _ = result
|
| 60 |
-
return processed_image
|
| 61 |
-
return frame
|
| 62 |
-
|
| 63 |
-
st.title('Emotion Detection App')
|
| 64 |
-
|
| 65 |
-
st.sidebar.title("Options")
|
| 66 |
-
|
| 67 |
-
# Options for manual upload or webcam capture
|
| 68 |
-
upload_option = st.sidebar.selectbox("Choose Upload Option", ["Image Upload", "Webcam"])
|
| 69 |
-
|
| 70 |
-
if upload_option == "Image Upload":
|
| 71 |
-
uploaded_file = st.sidebar.file_uploader("Choose an image...", type=["png", "jpg", "jpeg", "gif"])
|
| 72 |
-
|
| 73 |
-
if uploaded_file is not None and allowed_file(uploaded_file.name):
|
| 74 |
-
image = Image.open(uploaded_file)
|
| 75 |
-
image = np.array(image.convert('RGB')) # Ensure image is in RGB format
|
| 76 |
-
result = Emotion_Analysis(image)
|
| 77 |
-
|
| 78 |
-
if result is None:
|
| 79 |
-
st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 80 |
-
st.error("No face detected")
|
| 81 |
-
else:
|
| 82 |
-
processed_image, pred_emotion = result
|
| 83 |
-
st.image(processed_image, caption=f"Predicted Emotion: {pred_emotion}", use_column_width=True)
|
| 84 |
-
|
| 85 |
-
elif upload_option == "Webcam":
|
| 86 |
-
st.sidebar.write("Webcam Capture")
|
| 87 |
-
run = st.checkbox('Run Webcam')
|
| 88 |
-
FRAME_WINDOW = st.image([])
|
| 89 |
-
|
| 90 |
-
camera = cv2.VideoCapture(0)
|
| 91 |
-
|
| 92 |
-
while run:
|
| 93 |
-
success, frame = camera.read()
|
| 94 |
-
if not success:
|
| 95 |
-
st.error("Unable to read from webcam. Please check your camera settings.")
|
| 96 |
-
break
|
| 97 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 98 |
-
processed_frame = video_frame_callback(frame)
|
| 99 |
-
FRAME_WINDOW.image(processed_frame)
|
| 100 |
-
|
| 101 |
-
camera.release()
|
| 102 |
-
else:
|
| 103 |
-
st.write("Please select an option to start.")
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
# import os
|
| 107 |
-
# import cv2
|
| 108 |
-
# import numpy as np
|
| 109 |
-
# import tensorflow as tf
|
| 110 |
-
# from tensorflow.keras.models import model_from_json
|
| 111 |
-
# import streamlit as st
|
| 112 |
-
# from PIL import Image
|
| 113 |
-
|
| 114 |
-
# # Load model
|
| 115 |
-
# with open("jsn_model.json", "r") as json_file:
|
| 116 |
-
# loaded_model_json = json_file.read()
|
| 117 |
-
# model = model_from_json(loaded_model_json)
|
| 118 |
-
# model.load_weights('weights_model1.h5')
|
| 119 |
-
|
| 120 |
-
# # Loading the classifier from the file.
|
| 121 |
-
# face_haar_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
| 122 |
-
|
| 123 |
-
# UPLOAD_FOLDER = 'static'
|
| 124 |
-
# ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
|
| 125 |
-
|
| 126 |
-
# def allowed_file(filename):
|
| 127 |
-
# """Checks the file format when file is uploaded"""
|
| 128 |
-
# return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
| 129 |
-
|
| 130 |
-
# def Emotion_Analysis(image):
|
| 131 |
-
# """It does prediction of Emotions found in the Image provided, saves as Images and returns them"""
|
| 132 |
-
# gray_frame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 133 |
-
# faces = face_haar_cascade.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
|
| 134 |
-
|
| 135 |
-
# if len(faces) == 0:
|
| 136 |
-
# return None
|
| 137 |
-
|
| 138 |
-
# for (x, y, w, h) in faces:
|
| 139 |
-
# roi = gray_frame[y:y + h, x:x + w]
|
| 140 |
-
# roi = cv2.resize(roi, (48, 48))
|
| 141 |
-
# roi = roi.astype("float") / 255.0
|
| 142 |
-
# roi = tf.expand_dims(roi, axis=-1) # Adding channel dimension
|
| 143 |
-
# roi = np.expand_dims(roi, axis=0) # Adding batch dimension
|
| 144 |
-
|
| 145 |
-
# prediction = model.predict(roi)
|
| 146 |
-
# EMOTIONS_LIST = ["Angry", "Disgust", "Fear", "Happy", "Neutral", "Sad", "Surprise"]
|
| 147 |
-
# rec_col = {"Happy": (0, 255, 0), "Sad": (255, 0, 0), "Surprise": (255, 204, 55),
|
| 148 |
-
# "Angry": (0, 0, 255), "Disgust": (230, 159, 0), "Neutral": (0, 255, 255), "Fear": (128, 0, 128)}
|
| 149 |
-
|
| 150 |
-
# pred_emotion = EMOTIONS_LIST[np.argmax(prediction)]
|
| 151 |
-
# Text = str(pred_emotion)
|
| 152 |
-
|
| 153 |
-
# cv2.rectangle(image, (x, y), (x + w, y + h), rec_col[str(pred_emotion)], 2)
|
| 154 |
-
# cv2.rectangle(image, (x, y - 40), (x + w, y), rec_col[str(pred_emotion)], -1)
|
| 155 |
-
# cv2.putText(image, Text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
|
| 156 |
-
|
| 157 |
-
# return image, pred_emotion
|
| 158 |
-
|
| 159 |
-
# def video_frame_callback(frame):
|
| 160 |
-
# """Callback function to process each frame of video"""
|
| 161 |
-
# image = np.array(frame)
|
| 162 |
-
# result = Emotion_Analysis(image)
|
| 163 |
-
# if result is not None:
|
| 164 |
-
# processed_image, _ = result
|
| 165 |
-
# return processed_image
|
| 166 |
-
# return frame
|
| 167 |
-
|
| 168 |
-
# st.title('Emotion Detection App')
|
| 169 |
-
|
| 170 |
-
# st.sidebar.title("Options")
|
| 171 |
-
|
| 172 |
-
# # Options for manual upload or webcam capture
|
| 173 |
-
# upload_option = st.sidebar.selectbox("Choose Upload Option", ["Image Upload", "Webcam"])
|
| 174 |
-
|
| 175 |
-
# if upload_option == "Image Upload":
|
| 176 |
-
# uploaded_file = st.sidebar.file_uploader("Choose an image...", type=["png", "jpg", "jpeg", "gif"])
|
| 177 |
-
|
| 178 |
-
# if uploaded_file is not None and allowed_file(uploaded_file.name):
|
| 179 |
-
# image = Image.open(uploaded_file)
|
| 180 |
-
# image = np.array(image.convert('RGB')) # Ensure image is in RGB format
|
| 181 |
-
# result = Emotion_Analysis(image)
|
| 182 |
-
|
| 183 |
-
# if result is None:
|
| 184 |
-
# st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 185 |
-
# st.error("No face detected")
|
| 186 |
-
# else:
|
| 187 |
-
# processed_image, pred_emotion = result
|
| 188 |
-
# st.image(processed_image, caption=f"Predicted Emotion: {pred_emotion}", use_column_width=True)
|
| 189 |
-
|
| 190 |
-
# elif upload_option == "Webcam":
|
| 191 |
-
# st.sidebar.write("Webcam Capture")
|
| 192 |
-
# run_webcam = st.sidebar.button('Run Webcam')
|
| 193 |
-
# stop_webcam = st.sidebar.button('Stop Webcam')
|
| 194 |
-
# FRAME_WINDOW = st.image([])
|
| 195 |
-
|
| 196 |
-
# if run_webcam:
|
| 197 |
-
# camera = cv2.VideoCapture(0)
|
| 198 |
-
# st.session_state['run'] = True
|
| 199 |
-
|
| 200 |
-
# if 'run' in st.session_state and st.session_state['run']:
|
| 201 |
-
# while True:
|
| 202 |
-
# success, frame = camera.read()
|
| 203 |
-
# if not success:
|
| 204 |
-
# st.error("Unable to read from webcam. Please check your camera settings.")
|
| 205 |
-
# break
|
| 206 |
-
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 207 |
-
# processed_frame = video_frame_callback(frame)
|
| 208 |
-
# FRAME_WINDOW.image(processed_frame)
|
| 209 |
-
# if stop_webcam:
|
| 210 |
-
# st.session_state['run'] = False
|
| 211 |
-
# camera.release()
|
| 212 |
-
# break
|
| 213 |
-
# else:
|
| 214 |
-
# st.write("Please select an option to start.")
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
import tensorflow as tf
|
| 5 |
+
from tensorflow.keras.models import model_from_json
|
| 6 |
+
import streamlit as st
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
# Load model
|
| 10 |
+
with open("jsn_model.json", "r") as json_file:
|
| 11 |
+
loaded_model_json = json_file.read()
|
| 12 |
+
model = model_from_json(loaded_model_json)
|
| 13 |
+
model.load_weights('weights_model1.h5')
|
| 14 |
+
|
| 15 |
+
# Loading the classifier from the file.
|
| 16 |
+
face_haar_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
| 17 |
+
|
| 18 |
+
UPLOAD_FOLDER = 'static'
|
| 19 |
+
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
|
| 20 |
+
|
| 21 |
+
def allowed_file(filename):
|
| 22 |
+
"""Checks the file format when file is uploaded"""
|
| 23 |
+
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
| 24 |
+
|
| 25 |
+
def Emotion_Analysis(image):
|
| 26 |
+
"""It does prediction of Emotions found in the Image provided, saves as Images and returns them"""
|
| 27 |
+
gray_frame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 28 |
+
faces = face_haar_cascade.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
|
| 29 |
+
|
| 30 |
+
if len(faces) == 0:
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
for (x, y, w, h) in faces:
|
| 34 |
+
roi = gray_frame[y:y + h, x:x + w]
|
| 35 |
+
roi = cv2.resize(roi, (48, 48))
|
| 36 |
+
roi = roi.astype("float") / 255.0
|
| 37 |
+
roi = tf.expand_dims(roi, axis=-1) # Adding channel dimension
|
| 38 |
+
roi = np.expand_dims(roi, axis=0) # Adding batch dimension
|
| 39 |
+
|
| 40 |
+
prediction = model.predict(roi)
|
| 41 |
+
EMOTIONS_LIST = ["Angry", "Disgust", "Fear", "Happy", "Neutral", "Sad", "Surprise"]
|
| 42 |
+
rec_col = {"Happy": (0, 255, 0), "Sad": (255, 0, 0), "Surprise": (255, 204, 55),
|
| 43 |
+
"Angry": (0, 0, 255), "Disgust": (230, 159, 0), "Neutral": (0, 255, 255), "Fear": (128, 0, 128)}
|
| 44 |
+
|
| 45 |
+
pred_emotion = EMOTIONS_LIST[np.argmax(prediction)]
|
| 46 |
+
Text = str(pred_emotion)
|
| 47 |
+
|
| 48 |
+
cv2.rectangle(image, (x, y), (x + w, y + h), rec_col[str(pred_emotion)], 2)
|
| 49 |
+
cv2.rectangle(image, (x, y - 40), (x + w, y), rec_col[str(pred_emotion)], -1)
|
| 50 |
+
cv2.putText(image, Text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
|
| 51 |
+
|
| 52 |
+
return image, pred_emotion
|
| 53 |
+
|
| 54 |
+
def video_frame_callback(frame):
|
| 55 |
+
"""Callback function to process each frame of video"""
|
| 56 |
+
image = np.array(frame)
|
| 57 |
+
result = Emotion_Analysis(image)
|
| 58 |
+
if result is not None:
|
| 59 |
+
processed_image, _ = result
|
| 60 |
+
return processed_image
|
| 61 |
+
return frame
|
| 62 |
+
|
| 63 |
+
st.title('Emotion Detection App')
|
| 64 |
+
|
| 65 |
+
st.sidebar.title("Options")
|
| 66 |
+
|
| 67 |
+
# Options for manual upload or webcam capture
|
| 68 |
+
upload_option = st.sidebar.selectbox("Choose Upload Option", ["Image Upload", "Webcam"])
|
| 69 |
+
|
| 70 |
+
if upload_option == "Image Upload":
|
| 71 |
+
uploaded_file = st.sidebar.file_uploader("Choose an image...", type=["png", "jpg", "jpeg", "gif"])
|
| 72 |
+
|
| 73 |
+
if uploaded_file is not None and allowed_file(uploaded_file.name):
|
| 74 |
+
image = Image.open(uploaded_file)
|
| 75 |
+
image = np.array(image.convert('RGB')) # Ensure image is in RGB format
|
| 76 |
+
result = Emotion_Analysis(image)
|
| 77 |
+
|
| 78 |
+
if result is None:
|
| 79 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 80 |
+
st.error("No face detected")
|
| 81 |
+
else:
|
| 82 |
+
processed_image, pred_emotion = result
|
| 83 |
+
st.image(processed_image, caption=f"Predicted Emotion: {pred_emotion}", use_column_width=True)
|
| 84 |
+
|
| 85 |
+
elif upload_option == "Webcam":
|
| 86 |
+
st.sidebar.write("Webcam Capture")
|
| 87 |
+
run = st.checkbox('Run Webcam')
|
| 88 |
+
FRAME_WINDOW = st.image([])
|
| 89 |
+
|
| 90 |
+
camera = cv2.VideoCapture(0)
|
| 91 |
+
|
| 92 |
+
while run:
|
| 93 |
+
success, frame = camera.read()
|
| 94 |
+
if not success:
|
| 95 |
+
st.error("Unable to read from webcam. Please check your camera settings.")
|
| 96 |
+
break
|
| 97 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 98 |
+
processed_frame = video_frame_callback(frame)
|
| 99 |
+
FRAME_WINDOW.image(processed_frame)
|
| 100 |
+
|
| 101 |
+
camera.release()
|
| 102 |
+
else:
|
| 103 |
+
st.write("Please select an option to start.")
|
| 104 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|