Upload 4 files
Browse files- app.py +178 -0
- emotion_model.h5 +3 -0
- emotion_model.json +1 -0
- haarcascade_frontalface_default.xml +0 -0
app.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import tempfile
|
| 6 |
+
import os
|
| 7 |
+
import time
|
| 8 |
+
from keras.models import model_from_json
|
| 9 |
+
from keras_preprocessing.image import img_to_array
|
| 10 |
+
import plotly.express as px
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Load Emotion Detection Model
|
| 14 |
+
@st.cache_resource
|
| 15 |
+
def load_model():
|
| 16 |
+
model_path = "emotion_model.json"
|
| 17 |
+
weights_path = "emotion_model.h5"
|
| 18 |
+
if not os.path.exists(model_path) or not os.path.exists(weights_path):
|
| 19 |
+
st.error("Model files not found.")
|
| 20 |
+
return None
|
| 21 |
+
with open(model_path, "r") as json_file:
|
| 22 |
+
loaded_model_json = json_file.read()
|
| 23 |
+
emotion_model = model_from_json(loaded_model_json)
|
| 24 |
+
emotion_model.load_weights(weights_path)
|
| 25 |
+
return emotion_model
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# Load Haar Cascade for Face Detection
|
| 29 |
+
@st.cache_resource
|
| 30 |
+
def load_face_detector():
|
| 31 |
+
cascade_path = "haarcascade_frontalface_default.xml"
|
| 32 |
+
if not os.path.exists(cascade_path):
|
| 33 |
+
st.error("Haar Cascade file not found.")
|
| 34 |
+
return None
|
| 35 |
+
return cv2.CascadeClassifier(cascade_path)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# Convert to H.264 for Better Browser Playback
|
| 39 |
+
def convert_to_h264(input_path, output_path):
|
| 40 |
+
cap = cv2.VideoCapture(input_path)
|
| 41 |
+
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 42 |
+
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 43 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
| 44 |
+
|
| 45 |
+
fourcc = cv2.VideoWriter_fourcc(*'avc1') # H.264 codec
|
| 46 |
+
|
| 47 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
|
| 48 |
+
|
| 49 |
+
while True:
|
| 50 |
+
ret, frame = cap.read()
|
| 51 |
+
if not ret:
|
| 52 |
+
break
|
| 53 |
+
out.write(frame)
|
| 54 |
+
|
| 55 |
+
cap.release()
|
| 56 |
+
out.release()
|
| 57 |
+
time.sleep(2) # Delay to prevent file locking
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# Process Video and Collect Emotion Data
|
| 61 |
+
def process_video(video_path, output_path, skip_frames=5):
|
| 62 |
+
cap = cv2.VideoCapture(video_path)
|
| 63 |
+
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 64 |
+
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 65 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
| 66 |
+
|
| 67 |
+
fourcc = cv2.VideoWriter_fourcc(*'avc1') # Use H.264 for better support
|
| 68 |
+
temp_output_path = os.path.join(tempfile.gettempdir(), "temp_video.mp4")
|
| 69 |
+
out = cv2.VideoWriter(temp_output_path, fourcc, fps, (frame_width, frame_height))
|
| 70 |
+
|
| 71 |
+
emotion_model = load_model()
|
| 72 |
+
face_detector = load_face_detector()
|
| 73 |
+
|
| 74 |
+
emotion_labels = {0: "Angry", 1: "Disgust", 2: "Fear",
|
| 75 |
+
3: "Happy", 4: "Sad", 5: "Surprise", 6: "Neutral"}
|
| 76 |
+
|
| 77 |
+
frame_number = 0
|
| 78 |
+
emotions_over_time = []
|
| 79 |
+
|
| 80 |
+
while cap.isOpened():
|
| 81 |
+
ret, frame = cap.read()
|
| 82 |
+
if not ret:
|
| 83 |
+
break
|
| 84 |
+
|
| 85 |
+
if frame_number % skip_frames != 0:
|
| 86 |
+
frame_number += 1
|
| 87 |
+
continue
|
| 88 |
+
|
| 89 |
+
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
| 90 |
+
faces = face_detector.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
|
| 91 |
+
|
| 92 |
+
for (x, y, w, h) in faces:
|
| 93 |
+
roi_gray = gray_frame[y:y + h, x:x + w]
|
| 94 |
+
roi_gray = cv2.resize(roi_gray, (48, 48))
|
| 95 |
+
roi = roi_gray.astype("float") / 255.0
|
| 96 |
+
roi = img_to_array(roi)
|
| 97 |
+
roi = np.expand_dims(roi, axis=0)
|
| 98 |
+
|
| 99 |
+
preds = emotion_model.predict(roi, verbose=0)[0]
|
| 100 |
+
label = emotion_labels[np.argmax(preds)]
|
| 101 |
+
|
| 102 |
+
emotions_over_time.append(label)
|
| 103 |
+
|
| 104 |
+
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
| 105 |
+
cv2.putText(frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
|
| 106 |
+
0.9, (36, 255, 12), 2)
|
| 107 |
+
|
| 108 |
+
out.write(frame)
|
| 109 |
+
frame_number += 1
|
| 110 |
+
|
| 111 |
+
cap.release()
|
| 112 |
+
out.release()
|
| 113 |
+
cv2.destroyAllWindows()
|
| 114 |
+
|
| 115 |
+
convert_to_h264(temp_output_path, output_path)
|
| 116 |
+
|
| 117 |
+
try:
|
| 118 |
+
os.remove(temp_output_path)
|
| 119 |
+
except PermissionError:
|
| 120 |
+
st.warning(f"Could not delete temporary file: {temp_output_path}. Please remove it manually.")
|
| 121 |
+
|
| 122 |
+
return pd.DataFrame({"Emotion": emotions_over_time})
|
| 123 |
+
|
| 124 |
+
# Generate Scatter Plot for Timeline
|
| 125 |
+
def generate_emotion_scatterplot(data):
|
| 126 |
+
data["Frame"] = data.index # Add frame numbers
|
| 127 |
+
fig = px.scatter(data, x="Frame", y="Emotion", title="Emotion Timeline Across Frames",
|
| 128 |
+
labels={"Frame": "Frame Number", "Emotion": "Detected Emotion"},
|
| 129 |
+
color="Emotion", opacity=0.7)
|
| 130 |
+
return fig
|
| 131 |
+
|
| 132 |
+
# Generate Pie Chart
|
| 133 |
+
def generate_emotion_distribution(data):
|
| 134 |
+
emotion_counts = data["Emotion"].value_counts().reset_index()
|
| 135 |
+
emotion_counts.columns = ["Emotion", "Count"]
|
| 136 |
+
fig = px.pie(emotion_counts, names="Emotion", values="Count", title="Emotion Distribution")
|
| 137 |
+
return fig
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
# Streamlit App
|
| 141 |
+
st.title("π EmoNet: We Empower Machines With Emotion ")
|
| 142 |
+
|
| 143 |
+
uploaded_file = st.file_uploader("π€ Upload a Video for Emotion Detection", type=["mp4", "avi", "mov"])
|
| 144 |
+
|
| 145 |
+
if uploaded_file is not None:
|
| 146 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tfile:
|
| 147 |
+
tfile.write(uploaded_file.read())
|
| 148 |
+
video_path = tfile.name
|
| 149 |
+
|
| 150 |
+
output_video_path = os.path.join(tempfile.gettempdir(), "analyzed_video.mp4")
|
| 151 |
+
|
| 152 |
+
loading_gif_path = "loading.gif" # Replace with your GIF file path
|
| 153 |
+
if os.path.exists(loading_gif_path):
|
| 154 |
+
st.image(loading_gif_path, caption="β³ Processing your video... Please wait.", use_column_width=True)
|
| 155 |
+
|
| 156 |
+
emotion_data = process_video(video_path, output_video_path, skip_frames=2)
|
| 157 |
+
|
| 158 |
+
if os.path.exists(output_video_path) and os.path.getsize(output_video_path) > 0:
|
| 159 |
+
st.success("β
Video processing complete! View your results below:")
|
| 160 |
+
|
| 161 |
+
tab1, tab2, tab3 = st.tabs(["π¬ Processed Video", "π Emotion Distribution", "π Emotion Timeline"])
|
| 162 |
+
|
| 163 |
+
with tab1:
|
| 164 |
+
st.subheader("Analyzed Video")
|
| 165 |
+
st.video(output_video_path)
|
| 166 |
+
|
| 167 |
+
with tab2:
|
| 168 |
+
st.subheader("Emotion Distribution")
|
| 169 |
+
st.plotly_chart(generate_emotion_distribution(emotion_data), use_container_width=True)
|
| 170 |
+
|
| 171 |
+
with tab3:
|
| 172 |
+
st.subheader("Emotion Timeline")
|
| 173 |
+
st.plotly_chart(generate_emotion_scatterplot(emotion_data), use_container_width=True)
|
| 174 |
+
|
| 175 |
+
else:
|
| 176 |
+
st.error("β οΈ Error: Processed video file is missing or empty.")
|
| 177 |
+
|
| 178 |
+
os.remove(video_path)
|
emotion_model.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3d42bef256364d206ddb84d4e7b09844b7a1d5b6a2a75d521da658c810c7570c
|
| 3 |
+
size 9413600
|
emotion_model.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 48, 48, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "conv2d_input"}}, {"class_name": "Conv2D", "config": {"name": "conv2d", "trainable": true, "dtype": "float32", "batch_input_shape": [null, 48, 48, 1], "filters": 32, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_1", "trainable": true, "dtype": "float32", "filters": 64, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Dropout", "config": {"name": "dropout", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_2", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Conv2D", "config": {"name": "conv2d_3", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_2", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Dropout", "config": {"name": "dropout_1", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Flatten", "config": {"name": "flatten", "trainable": true, "dtype": "float32", "data_format": "channels_last"}}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1024, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dropout", "config": {"name": "dropout_2", "trainable": true, "dtype": "float32", "rate": 0.5, "noise_shape": null, "seed": null}}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 7, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.12.0", "backend": "tensorflow"}
|
haarcascade_frontalface_default.xml
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|