Spaces:
Build error
Build error
HansGan commited on
Commit Β·
2da2de4
1
Parent(s): 943b58d
added models
Browse files- app.py +4 -23
- pages/Motion-Based_Model.py +141 -0
- pages/motion_scaler.pkl +3 -0
- pages/ocsvm_motion.pkl +3 -0
- pages/svm_prototype.py +0 -6
app.py
CHANGED
|
@@ -1,31 +1,12 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
|
| 3 |
st.set_page_config(
|
| 4 |
-
page_title="
|
| 5 |
-
page_icon="π",
|
| 6 |
)
|
| 7 |
|
| 8 |
-
st.title("
|
| 9 |
-
st.write("A
|
| 10 |
-
st.write("Thesis
|
| 11 |
-
st.image("pages/Cervical-Cancer-Cells.jpg", caption='', width=700)
|
| 12 |
|
| 13 |
|
| 14 |
-
st.header("How does this app work? β")
|
| 15 |
-
st.write("""
|
| 16 |
-
Once you upload an image of cervical cancer cells using any of our models,
|
| 17 |
-
they analyze the cell structure and classify the type of cancer present.
|
| 18 |
-
The system will then predict the type of cancer cells based on the analysis.
|
| 19 |
-
""")
|
| 20 |
-
|
| 21 |
-
st.subheader("How to use this app? π€")
|
| 22 |
-
st.markdown("""
|
| 23 |
-
1. Select U-Net-Model to segment your Cervical Cells image into predicted Cytoplasm and Nuclei Mask.
|
| 24 |
-
2. Download predicted Cytoplasm and Nuclei Mask image from U-Net-Model.
|
| 25 |
-
3. Head to either CNN or SVM Model to classify Cervical Cells image.
|
| 26 |
-
4. Upload Cervical Cells image, Predicted Cytoplasm image, and Predicted Nuclei image in the model choosen.
|
| 27 |
-
5. Wait for the model to analyze and classify the type of cancer present.
|
| 28 |
-
6. The model will output predicted Cervical Cancer cell type based on the analysis with images of the uploaded images and plotted image of concatenated image of predicted Cytoplasm and Nuclei Mask.
|
| 29 |
-
""")
|
| 30 |
-
|
| 31 |
st.sidebar.info("Please select a model from above π")
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
|
| 3 |
st.set_page_config(
|
| 4 |
+
page_title="One-Class SVM Prototype",
|
|
|
|
| 5 |
)
|
| 6 |
|
| 7 |
+
st.title("Implementing Unsupervised SVM for Crowd Panic Detection in Video-Surveillance Scenes using Fused Pose- and Motion-Based Features")
|
| 8 |
+
st.write("A One-Class SVM Prototype for Crowd Panic Detection")
|
| 9 |
+
st.write("Thesis Proof of Concept by Group DGO of Mapua University")
|
|
|
|
| 10 |
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
st.sidebar.info("Please select a model from above π")
|
pages/Motion-Based_Model.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import joblib
|
| 3 |
+
import numpy as np
|
| 4 |
+
import cv2
|
| 5 |
+
from sklearn.preprocessing import StandardScaler
|
| 6 |
+
from sklearn.svm import OneClassSVM
|
| 7 |
+
|
| 8 |
+
# Streamlit app header
|
| 9 |
+
st.title("Motion-Based One-Class SVM Model")
|
| 10 |
+
|
| 11 |
+
#==========================IMPORTING MODELS==========================
|
| 12 |
+
|
| 13 |
+
motion_scaler = joblib.load('pages/motion_scaler.pkl')
|
| 14 |
+
ocsvm_motion = joblib.load('pages/ocsvm_motion.pkl')
|
| 15 |
+
|
| 16 |
+
#====================================================================
|
| 17 |
+
|
| 18 |
+
#==========================FUNCTIONS==========================
|
| 19 |
+
|
| 20 |
+
# Function to extract and calculate motion features from BytesIO object
|
| 21 |
+
def calculate_optical_flow_and_motion_energy(video_bytes):
|
| 22 |
+
motion_features = []
|
| 23 |
+
|
| 24 |
+
# Convert BytesIO to a NumPy array (in-memory video stream)
|
| 25 |
+
video_bytes.seek(0)
|
| 26 |
+
video_data = np.asarray(bytearray(video_bytes.read()), dtype=np.uint8)
|
| 27 |
+
|
| 28 |
+
# Use cv2.VideoCapture to read from the video data (byte stream)
|
| 29 |
+
cap = cv2.VideoCapture(cv2.imdecode(video_data, cv2.IMREAD_COLOR))
|
| 30 |
+
|
| 31 |
+
# Check if the video opened successfully
|
| 32 |
+
if not cap.isOpened():
|
| 33 |
+
raise ValueError("Error opening video file from uploaded bytes.")
|
| 34 |
+
|
| 35 |
+
# Read the first frame
|
| 36 |
+
ret, prev_frame = cap.read()
|
| 37 |
+
if not ret:
|
| 38 |
+
raise ValueError("Cannot read the first frame from the video.")
|
| 39 |
+
|
| 40 |
+
# Convert the first frame to grayscale
|
| 41 |
+
prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
|
| 42 |
+
|
| 43 |
+
while True:
|
| 44 |
+
# Read the next frame
|
| 45 |
+
ret, frame = cap.read()
|
| 46 |
+
if not ret:
|
| 47 |
+
break # Exit if there are no more frames
|
| 48 |
+
|
| 49 |
+
# Convert the current frame to grayscale
|
| 50 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
| 51 |
+
|
| 52 |
+
# Calculate optical flow
|
| 53 |
+
flow = cv2.calcOpticalFlowFarneback(prev_gray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
|
| 54 |
+
|
| 55 |
+
# Calculate magnitude and angle
|
| 56 |
+
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
|
| 57 |
+
|
| 58 |
+
# Calculate average magnitude, angle, and motion energy
|
| 59 |
+
avg_mag = np.mean(mag) # Average magnitude
|
| 60 |
+
avg_ang = np.mean(ang) # Average angle
|
| 61 |
+
motion_energy = np.sum(mag) # Motion energy
|
| 62 |
+
|
| 63 |
+
# Motion Intensity Changes (Velocity and Acceleration)
|
| 64 |
+
if len(motion_features) >= 2:
|
| 65 |
+
velocity = avg_mag - motion_features[-1][0]
|
| 66 |
+
acceleration = velocity - (motion_features[-1][0] - motion_features[-2][0])
|
| 67 |
+
elif len(motion_features) == 1:
|
| 68 |
+
velocity = avg_mag - motion_features[0][0]
|
| 69 |
+
acceleration = 0
|
| 70 |
+
else:
|
| 71 |
+
velocity = 0
|
| 72 |
+
acceleration = 0
|
| 73 |
+
|
| 74 |
+
# Flow Coherence
|
| 75 |
+
flow_coherence = np.mean(np.cos(ang))
|
| 76 |
+
|
| 77 |
+
# Store features
|
| 78 |
+
features = np.array([velocity, avg_mag, flow_coherence])
|
| 79 |
+
motion_features.append(features)
|
| 80 |
+
|
| 81 |
+
# Update previous frame
|
| 82 |
+
prev_gray = gray
|
| 83 |
+
|
| 84 |
+
# Release the video capture object
|
| 85 |
+
cap.release()
|
| 86 |
+
|
| 87 |
+
return np.array(motion_features)
|
| 88 |
+
|
| 89 |
+
# Function to predict panic from uploaded video and display label
|
| 90 |
+
def predict_uploaded_video(video_uploaded):
|
| 91 |
+
# Calculate motion features from the uploaded BytesIO object
|
| 92 |
+
motion_features = calculate_optical_flow_and_motion_energy(video_uploaded)
|
| 93 |
+
|
| 94 |
+
# Scale the features
|
| 95 |
+
motion_features = motion_scaler.transform(motion_features)
|
| 96 |
+
|
| 97 |
+
# Create a list to store predictions
|
| 98 |
+
predictions = []
|
| 99 |
+
|
| 100 |
+
# Predict on each set of features
|
| 101 |
+
for feature in motion_features:
|
| 102 |
+
prediction = ocsvm_motion.predict([feature])
|
| 103 |
+
predictions.append(prediction)
|
| 104 |
+
|
| 105 |
+
# Open the video again for display with labels
|
| 106 |
+
video_uploaded.seek(0)
|
| 107 |
+
video_data = np.asarray(bytearray(video_uploaded.read()), dtype=np.uint8)
|
| 108 |
+
cap = cv2.VideoCapture(cv2.imdecode(video_data, cv2.IMREAD_COLOR))
|
| 109 |
+
|
| 110 |
+
stframe = st.empty()
|
| 111 |
+
frame_index = 0
|
| 112 |
+
while True:
|
| 113 |
+
ret, frame = cap.read()
|
| 114 |
+
if not ret:
|
| 115 |
+
break # No more frames
|
| 116 |
+
|
| 117 |
+
# Overlay "Panic" label if the prediction is 1 for the current frame
|
| 118 |
+
if predictions[frame_index] == 1:
|
| 119 |
+
label = "Panic"
|
| 120 |
+
cv2.putText(frame, label, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
|
| 121 |
+
|
| 122 |
+
# Convert the frame from BGR to RGB for display in Streamlit
|
| 123 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 124 |
+
|
| 125 |
+
# Display the frame with the label
|
| 126 |
+
stframe.image(frame_rgb)
|
| 127 |
+
|
| 128 |
+
frame_index += 1
|
| 129 |
+
|
| 130 |
+
cap.release()
|
| 131 |
+
|
| 132 |
+
#====================================================================
|
| 133 |
+
|
| 134 |
+
#==========================PREDICTING UPLOADED VIDEO==========================
|
| 135 |
+
|
| 136 |
+
video_mp4_file = st.file_uploader("Upload Video File", type=['mp4'])
|
| 137 |
+
|
| 138 |
+
if video_mp4_file is not None:
|
| 139 |
+
predict_uploaded_video(video_mp4_file)
|
| 140 |
+
|
| 141 |
+
#==============================================================================
|
pages/motion_scaler.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3c55b77bbd5def51e02dbe0fcd860160007fa784ad0750df6fc085b93e1d3db9
|
| 3 |
+
size 671
|
pages/ocsvm_motion.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e09ffc19450daff44e93c70dd934e8e30b42e67d72f7836b524f94c4e334338
|
| 3 |
+
size 3583
|
pages/svm_prototype.py
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
import streamlit as st
|
| 2 |
-
|
| 3 |
-
# Streamlit app header
|
| 4 |
-
st.title("Cervical Cancer Cell Classification - SVM Model")
|
| 5 |
-
|
| 6 |
-
st.sidebar.info("Feel free to select other models from the pages above π")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|