NajmiHassan1 commited on
Commit
478ac26
·
verified ·
1 Parent(s): 3ba99e3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +139 -0
app.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ import tempfile
5
+ import os
6
+ from pathlib import Path
7
+
8
+ # ---------------------------
9
+ # Helper: Load YOLO Model
10
+ # ---------------------------
11
+ @st.cache_resource
12
+ def load_yolo():
13
+ # Load class labels
14
+ labelsPath = os.path.join("yolo", "coco.names")
15
+ with open(labelsPath, "r") as f:
16
+ classes = f.read().strip().split("\n")
17
+
18
+ # Load YOLO model configuration and weights
19
+ net = cv2.dnn.readNet(os.path.join("yolo", "yolov4.weights"), os.path.join("yolo", "yolov4.cfg"))
20
+
21
+ # Uncomment these lines if you have GPU support
22
+ # net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
23
+ # net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
24
+
25
+ # Get output layer names
26
+ layer_names = net.getLayerNames()
27
+ output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers().flatten()]
28
+
29
+ return net, output_layers, classes
30
+
31
+ net, output_layers, classes = load_yolo()
32
+
33
+ # ---------------------------
34
+ # Helper: Process Video
35
+ # ---------------------------
36
+ def process_video(video_path, max_frames=100):
37
+ cap = cv2.VideoCapture(video_path)
38
+ processed_frames = []
39
+ frame_count = 0
40
+
41
+ while cap.isOpened() and frame_count < max_frames:
42
+ ret, frame = cap.read()
43
+ if not ret:
44
+ break
45
+
46
+ height, width = frame.shape[:2]
47
+
48
+ # Create blob from image
49
+ blob = cv2.dnn.blobFromImage(frame, 1/255.0, (416, 416), swapRB=True, crop=False)
50
+ net.setInput(blob)
51
+ outputs = net.forward(output_layers)
52
+
53
+ boxes = []
54
+ confidences = []
55
+ class_ids = []
56
+
57
+ # Loop over detections
58
+ for output in outputs:
59
+ for detection in output:
60
+ scores = detection[5:]
61
+ class_id = np.argmax(scores)
62
+ confidence = scores[class_id]
63
+ if confidence > 0.5:
64
+ center_x = int(detection[0] * width)
65
+ center_y = int(detection[1] * height)
66
+ w = int(detection[2] * width)
67
+ h = int(detection[3] * height)
68
+ x = int(center_x - w / 2)
69
+ y = int(center_y - h / 2)
70
+ boxes.append([x, y, w, h])
71
+ confidences.append(float(confidence))
72
+ class_ids.append(class_id)
73
+
74
+ # Non-max suppression to remove duplicates
75
+ indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
76
+ object_count = {}
77
+
78
+ if len(indexes) > 0:
79
+ for i in indexes.flatten():
80
+ x, y, w, h = boxes[i]
81
+ label = str(classes[class_ids[i]])
82
+ confidence = confidences[i]
83
+ color = (0, 255, 0)
84
+ cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
85
+ cv2.putText(frame, f'{label} {int(confidence * 100)}%', (x, y - 10),
86
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
87
+ object_count[label] = object_count.get(label, 0) + 1
88
+
89
+ # Display object counts on frame
90
+ y_offset = 30
91
+ for label, count in object_count.items():
92
+ cv2.putText(frame, f'{label}: {count}', (10, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,255), 2)
93
+ y_offset += 30
94
+
95
+ processed_frames.append(frame)
96
+ frame_count += 1
97
+
98
+ cap.release()
99
+ return processed_frames
100
+
101
+ # ---------------------------
102
+ # Streamlit UI
103
+ # ---------------------------
104
+ st.title("Real-Time Object Detection and Counting")
105
+ st.write("Upload a video file to run object detection using YOLOv4.")
106
+
107
+ # Video file uploader
108
+ uploaded_file = st.file_uploader("Choose a video file", type=["mp4", "mov", "avi"])
109
+
110
+ if uploaded_file is not None:
111
+ # Save uploaded file to a temporary file
112
+ tfile = tempfile.NamedTemporaryFile(delete=False)
113
+ tfile.write(uploaded_file.read())
114
+
115
+ st.video(uploaded_file) # Show original video
116
+
117
+ if st.button("Run Object Detection"):
118
+ st.write("Processing video...")
119
+ processed_frames = process_video(tfile.name, max_frames=100)
120
+
121
+ # Create a directory for output frames (optional)
122
+ output_dir = Path("output_frames")
123
+ output_dir.mkdir(exist_ok=True)
124
+ frame_paths = []
125
+
126
+ # Save processed frames as images
127
+ for i, frame in enumerate(processed_frames):
128
+ frame_path = output_dir / f"frame_{i:03d}.jpg"
129
+ cv2.imwrite(str(frame_path), frame)
130
+ frame_paths.append(str(frame_path))
131
+
132
+ st.success("Processing complete!")
133
+
134
+ # Display processed frames as a gallery (or create a video if needed)
135
+ st.write("Processed Frames:")
136
+ for frame_path in frame_paths:
137
+ st.image(frame_path, channels="BGR")
138
+
139
+ # Optionally, you could create a video file from frames and offer a download link.