Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,42 +1,89 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
# Streamlit interface
|
| 5 |
st.title("Fall Detection App")
|
| 6 |
-
st.write("
|
| 7 |
|
| 8 |
# Create two columns
|
| 9 |
left_column, right_column = st.columns(2)
|
| 10 |
|
| 11 |
# Right column for video selection
|
| 12 |
with right_column:
|
| 13 |
-
# Define the paths for default videos and their corresponding pre-processed outputs
|
| 14 |
default_videos = {
|
| 15 |
-
"Video 1":
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
},
|
| 19 |
-
"Video 2": {
|
| 20 |
-
"input": os.path.join(os.getcwd(), "fall_test_02.mp4"),
|
| 21 |
-
"output": os.path.join(os.getcwd(), "video_02_results.mp4"),
|
| 22 |
-
},
|
| 23 |
-
"Video 3": {
|
| 24 |
-
"input": os.path.join(os.getcwd(), "fall_test_03.mp4"),
|
| 25 |
-
"output": os.path.join(os.getcwd(), "processed_fall_test_03.mp4"),
|
| 26 |
-
},
|
| 27 |
}
|
| 28 |
|
| 29 |
-
#
|
| 30 |
-
|
| 31 |
|
| 32 |
-
#
|
| 33 |
-
|
| 34 |
|
| 35 |
-
#
|
| 36 |
-
|
| 37 |
-
|
|
|
|
| 38 |
left_column.write("Download the processed video:")
|
| 39 |
-
with open(
|
| 40 |
-
left_column.download_button("Download", video_file,
|
| 41 |
else:
|
| 42 |
-
st.error("
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import os
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
from ultralytics import YOLO
|
| 6 |
+
|
| 7 |
+
# Load YOLO model
|
| 8 |
+
model = YOLO('yolov8n.pt') # Ensure you have the correct model file
|
| 9 |
+
|
| 10 |
+
def process_video(video_path):
|
| 11 |
+
cap = cv2.VideoCapture(video_path)
|
| 12 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 13 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 14 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
| 15 |
+
|
| 16 |
+
# Create a video writer to save the output
|
| 17 |
+
output_path = os.path.join(os.getcwd(), "output.mp4")
|
| 18 |
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
| 19 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
| 20 |
+
|
| 21 |
+
while cap.isOpened():
|
| 22 |
+
ret, frame = cap.read()
|
| 23 |
+
if not ret:
|
| 24 |
+
break
|
| 25 |
+
|
| 26 |
+
results = model(frame)
|
| 27 |
+
for result in results:
|
| 28 |
+
for bbox in result.boxes:
|
| 29 |
+
x1, y1, x2, y2 = map(int, bbox.xyxy[0])
|
| 30 |
+
confidence = float(bbox.conf)
|
| 31 |
+
cls = int(bbox.cls)
|
| 32 |
+
|
| 33 |
+
if cls == 0: # Assuming class 0 is 'person'
|
| 34 |
+
w = x2 - x1
|
| 35 |
+
h = y2 - y1
|
| 36 |
+
|
| 37 |
+
if h < w:
|
| 38 |
+
color = (0, 0, 255) # Red color for fall detected
|
| 39 |
+
label = "Fall Detected"
|
| 40 |
+
else:
|
| 41 |
+
color = (0, 255, 0) # Green color for normal detection
|
| 42 |
+
label = "Person"
|
| 43 |
+
|
| 44 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
|
| 45 |
+
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
| 46 |
+
|
| 47 |
+
out.write(frame)
|
| 48 |
+
|
| 49 |
+
cap.release()
|
| 50 |
+
out.release()
|
| 51 |
+
|
| 52 |
+
# Double-check that the file was saved
|
| 53 |
+
if os.path.exists(output_path):
|
| 54 |
+
st.success(f"Video successfully processed and saved to {output_path}")
|
| 55 |
+
else:
|
| 56 |
+
st.error("Failed to save the processed video.")
|
| 57 |
+
|
| 58 |
+
return output_path
|
| 59 |
|
| 60 |
# Streamlit interface
|
| 61 |
st.title("Fall Detection App")
|
| 62 |
+
st.write("The default video is automatically processed to detect falls.")
|
| 63 |
|
| 64 |
# Create two columns
|
| 65 |
left_column, right_column = st.columns(2)
|
| 66 |
|
| 67 |
# Right column for video selection
|
| 68 |
with right_column:
|
|
|
|
| 69 |
default_videos = {
|
| 70 |
+
"Video 1": os.path.join(os.getcwd(), "fall_test_01.mp4"),
|
| 71 |
+
"Video 2": os.path.join(os.getcwd(), "fall_test_02.mp4"),
|
| 72 |
+
"Video 3": "video3.mp4",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
}
|
| 74 |
|
| 75 |
+
# Select the first video as the default input
|
| 76 |
+
default_video_path = default_videos["Video 1"]
|
| 77 |
|
| 78 |
+
# Display the selection to the user
|
| 79 |
+
st.write("Default video selected: Video 1")
|
| 80 |
|
| 81 |
+
# Process the default video automatically
|
| 82 |
+
output_video = process_video(default_video_path)
|
| 83 |
+
if output_video and os.path.exists(output_video):
|
| 84 |
+
left_column.video(output_video) # Display video in the left column
|
| 85 |
left_column.write("Download the processed video:")
|
| 86 |
+
with open(output_video, "rb") as video_file:
|
| 87 |
+
left_column.download_button("Download", video_file, "output.mp4")
|
| 88 |
else:
|
| 89 |
+
st.error("There was an issue processing the video. Please try again.")
|