sunbal7 commited on
Commit
e8fa336
Β·
verified Β·
1 Parent(s): 8aa25f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +329 -232
app.py CHANGED
@@ -7,144 +7,144 @@ import plotly.graph_objects as go
7
  from collections import defaultdict
8
  import time
9
  import requests
10
- from PIL import Image
11
- import io
12
  import pandas as pd
 
 
 
13
 
14
  # Page configuration
15
  st.set_page_config(
16
- page_title="People & Vehicle Counter",
17
- page_icon="πŸš—",
18
  layout="wide"
19
  )
20
 
21
  # Title and description
22
- st.title("πŸš— People & Vehicle Counter")
23
  st.markdown("""
24
- Upload a video or provide a video URL to count people and vehicles in real-time using YOLOv8.
25
- This app is useful for traffic monitoring, retail analytics, and crowd management.
26
  """)
27
 
28
- # Sidebar for settings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  with st.sidebar:
30
- st.header("βš™οΈ Settings")
 
 
 
 
31
 
32
  # Confidence threshold
33
  confidence = st.slider(
34
- "Detection Confidence",
35
- min_value=0.1,
36
- max_value=1.0,
37
- value=0.25,
38
- step=0.05,
39
- help="Higher values reduce false positives but might miss some objects"
40
  )
41
 
42
  # Object classes to detect
43
- st.subheader("Objects to Count")
44
- count_person = st.checkbox("Person", value=True)
45
- count_car = st.checkbox("Car", value=True)
46
- count_bus = st.checkbox("Bus", value=False)
47
- count_truck = st.checkbox("Truck", value=False)
48
- count_motorcycle = st.checkbox("Motorcycle", value=False)
49
- count_bicycle = st.checkbox("Bicycle", value=False)
50
 
51
  # Line intersection for counting
52
- st.subheader("Counting Line")
53
- show_line = st.checkbox("Show counting line", value=True)
54
  line_position = st.slider(
55
- "Line position (%)",
56
- min_value=0,
57
- max_value=100,
58
- value=50,
59
- help="Vertical line position for counting object crossings"
60
  )
61
 
62
  # Processing options
63
- st.subheader("Processing Options")
64
  process_every_nth = st.slider(
65
- "Process every Nth frame",
66
- min_value=1,
67
- max_value=10,
68
- value=3,
69
- help="Higher values speed up processing but reduce accuracy"
70
  )
71
 
72
  max_frames = st.number_input(
73
- "Maximum frames to process",
74
- min_value=10,
75
- max_value=1000,
76
- value=200,
77
- help="Limit processing for long videos"
78
  )
79
 
80
- # Initialize session state
81
- if 'total_counts' not in st.session_state:
82
- st.session_state.total_counts = defaultdict(int)
83
- if 'frame_counts' not in st.session_state:
84
- st.session_state.frame_counts = []
85
- if 'processing_complete' not in st.session_state:
86
- st.session_state.processing_complete = False
87
- if 'processed_video' not in st.session_state:
88
- st.session_state.processed_video = None
89
 
90
- # COCO class names for YOLO (common objects)
91
- CLASS_NAMES = {
92
- 0: "person", 1: "bicycle", 2: "car", 3: "motorcycle",
93
- 5: "bus", 7: "truck", 64: "chair" # Note: YOLOv8 uses different indices
94
- }
95
 
96
- # Map our checkboxes to class IDs
97
- def get_selected_classes():
98
- selected_classes = []
99
- class_mapping = {
100
- "person": 0,
101
- "bicycle": 2,
102
- "car": 2,
103
- "motorcycle": 3,
104
- "bus": 5,
105
- "truck": 7
106
- }
107
-
108
- if count_person:
109
- selected_classes.append(0)
110
- if count_bicycle:
111
- selected_classes.append(1)
112
- if count_car:
113
- selected_classes.append(2)
114
- if count_motorcycle:
115
- selected_classes.append(3)
116
- if count_bus:
117
- selected_classes.append(5)
118
- if count_truck:
119
- selected_classes.append(7)
120
-
121
- return selected_classes
122
 
123
- # Load YOLO model with caching
124
- @st.cache_resource
125
- def load_model():
126
- return YOLO('yolov8n.pt') # Using nano model for speed
127
 
128
- # Function to process video
129
- def process_video(video_path, selected_classes):
130
- model = load_model()
131
-
 
132
  cap = cv2.VideoCapture(video_path)
 
 
133
  fps = int(cap.get(cv2.CAP_PROP_FPS))
134
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
135
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
 
136
 
137
- # Initialize video writer
 
 
 
138
  temp_output = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
139
  output_path = temp_output.name
140
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
141
- out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
 
 
 
 
 
 
 
142
 
143
- # Reset counts
144
- st.session_state.total_counts = defaultdict(int)
145
- st.session_state.frame_counts = []
146
 
147
- # Progress bar
148
  progress_bar = st.progress(0)
149
  status_text = st.empty()
150
 
@@ -153,50 +153,111 @@ def process_video(video_path, selected_classes):
153
 
154
  while cap.isOpened():
155
  ret, frame = cap.read()
 
 
156
  if not ret or processed_frames >= max_frames:
157
  break
158
 
159
  frame_count += 1
160
 
161
- # Process only every nth frame
162
  if frame_count % process_every_nth != 0:
163
- out.write(frame)
 
164
  continue
165
 
166
- # Run YOLO inference
167
- results = model(frame, conf=confidence, classes=selected_classes, verbose=False)
 
 
 
 
 
 
 
 
 
 
 
168
 
169
- # Draw results
170
- annotated_frame = results[0].plot()
171
 
172
- # Count objects
173
- frame_counts = defaultdict(int)
174
- for box in results[0].boxes:
175
- cls_id = int(box.cls[0])
176
- class_name = CLASS_NAMES.get(cls_id, f"class_{cls_id}")
177
- frame_counts[class_name] += 1
178
- st.session_state.total_counts[class_name] += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
- # Store frame counts for chart
181
- st.session_state.frame_counts.append({
182
- 'frame': processed_frames,
183
- **frame_counts
184
- })
185
 
186
- # Draw counting line if enabled
187
  if show_line:
188
- line_x = int(width * line_position / 100)
189
- cv2.line(annotated_frame, (line_x, 0), (line_x, height), (0, 255, 255), 2)
 
 
 
 
190
 
191
- # Add counter text
192
  y_offset = 30
193
- for obj_type, count in frame_counts.items():
194
  cv2.putText(annotated_frame,
195
- f"{obj_type}: {count}",
196
- (10, y_offset),
197
- cv2.FONT_HERSHEY_SIMPLEX,
198
- 0.7, (0, 255, 0), 2)
199
- y_offset += 25
 
 
 
 
 
200
 
201
  # Write frame to output video
202
  out.write(annotated_frame)
@@ -205,44 +266,69 @@ def process_video(video_path, selected_classes):
205
  # Update progress
206
  progress = min(processed_frames / max_frames, 1.0)
207
  progress_bar.progress(progress)
208
- status_text.text(f"Processing frame {processed_frames}/{max_frames}")
209
 
 
210
  cap.release()
211
  out.release()
212
 
213
- st.session_state.processing_complete = True
214
- st.session_state.processed_video = output_path
 
 
215
 
216
  return output_path
217
 
218
  # Function to download video from URL
 
219
  def download_video_from_url(url):
 
220
  try:
221
- response = requests.get(url, stream=True)
222
- if response.status_code == 200:
223
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
224
- for chunk in response.iter_content(chunk_size=8192):
225
- temp_file.write(chunk)
226
- temp_file.close()
227
- return temp_file.name
228
- else:
229
- st.error(f"Failed to download video. Status code: {response.status_code}")
230
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
  except Exception as e:
232
- st.error(f"Error downloading video: {str(e)}")
233
  return None
234
 
235
- # Main app layout
236
- tab1, tab2, tab3 = st.tabs(["πŸ“Ή Video Input", "πŸ“Š Results", "ℹ️ About"])
237
 
238
  with tab1:
239
  col1, col2 = st.columns(2)
 
240
 
241
  with col1:
242
- st.subheader("Upload Video")
243
  uploaded_file = st.file_uploader(
244
  "Choose a video file",
245
- type=['mp4', 'avi', 'mov', 'mkv']
 
246
  )
247
 
248
  if uploaded_file is not None:
@@ -250,85 +336,92 @@ with tab1:
250
  tfile = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
251
  tfile.write(uploaded_file.read())
252
  video_path = tfile.name
253
-
254
- # Display video info
255
  st.video(uploaded_file)
256
- st.info(f"Uploaded: {uploaded_file.name}")
257
 
258
  with col2:
259
- st.subheader("Video URL")
260
  video_url = st.text_input(
261
- "Enter video URL",
262
- placeholder="https://example.com/video.mp4"
263
  )
264
 
265
- if st.button("Load from URL") and video_url:
266
- with st.spinner("Downloading video..."):
267
- video_path = download_video_from_url(video_url)
268
- if video_path:
269
- st.success("Video downloaded successfully!")
270
- # Display first frame
271
  cap = cv2.VideoCapture(video_path)
272
  ret, frame = cap.read()
273
  if ret:
274
- st.image(frame, caption="First frame of video", use_column_width=True)
275
  cap.release()
 
 
 
 
276
 
277
- # Process button
278
- if ('video_path' in locals() and video_path) or ('uploaded_file' in locals() and uploaded_file):
279
- if st.button("πŸš€ Start Counting", type="primary"):
280
- selected_classes = get_selected_classes()
281
 
282
- if not selected_classes:
283
- st.warning("Please select at least one object type to count.")
284
  else:
285
- with st.spinner("Processing video with YOLOv8..."):
286
- output_path = process_video(video_path, selected_classes)
287
- st.success("Processing complete!")
 
 
 
 
 
 
 
 
288
 
289
  with tab2:
290
- if st.session_state.processing_complete:
 
 
 
291
  col1, col2 = st.columns([2, 1])
292
 
293
  with col1:
294
- st.subheader("Processed Video")
295
  # Display processed video
296
- video_file = open(st.session_state.processed_video, 'rb')
297
- video_bytes = video_file.read()
298
  st.video(video_bytes)
299
 
300
  # Download button
301
  st.download_button(
302
- label="πŸ“₯ Download Processed Video",
303
  data=video_bytes,
304
- file_name="processed_video.mp4",
305
- mime="video/mp4"
 
306
  )
307
-
308
- with col2:
309
- st.subheader("πŸ“ˆ Total Counts")
310
 
 
 
311
  # Display total counts
312
- if st.session_state.total_counts:
313
- for obj_type, count in st.session_state.total_counts.items():
314
- st.metric(label=obj_type.capitalize(), value=count)
315
  else:
316
- st.info("No objects detected")
317
-
318
- # Summary statistics
319
- st.subheader("πŸ“Š Summary")
320
- if st.session_state.frame_counts:
321
- df = pd.DataFrame(st.session_state.frame_counts)
322
- st.dataframe(df.tail(10), use_container_width=True)
323
-
324
- # Time series chart
325
- st.subheader("πŸ“ˆ Objects Over Time")
326
- if st.session_state.frame_counts:
327
- df = pd.DataFrame(st.session_state.frame_counts)
328
 
 
329
  fig = go.Figure()
330
 
331
- # Add a trace for each object type
332
  for column in df.columns:
333
  if column != 'frame':
334
  fig.add_trace(go.Scatter(
@@ -339,63 +432,67 @@ with tab2:
339
  ))
340
 
341
  fig.update_layout(
342
- title="Object Counts per Frame",
343
  xaxis_title="Frame Number",
344
- yaxis_title="Count",
345
  hovermode='x unified',
346
  height=400
347
  )
348
 
349
  st.plotly_chart(fig, use_container_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
  else:
351
- st.info("Process a video first to see results here.")
352
 
353
  with tab3:
 
354
  st.markdown("""
355
- ## About This App
356
 
357
- ### πŸ”§ Technology Stack
358
- - **YOLOv8**: State-of-the-art object detection model
359
- - **Streamlit**: Interactive web app framework
360
- - **OpenCV**: Computer vision library for video processing
361
- - **Plotly**: Interactive visualizations
362
 
363
- ### πŸ“‹ Features
364
- 1. **Multiple Input Sources**: Upload videos or use URLs
365
- 2. **Customizable Detection**: Select specific object classes
366
- 3. **Real-time Counting**: Track objects frame by frame
367
- 4. **Visual Analytics**: Interactive charts and statistics
368
- 5. **Export Results**: Download processed videos and data
369
 
370
- ### 🎯 Use Cases
371
- - **Traffic Monitoring**: Count vehicles on roads
372
- - **Retail Analytics**: Track customer movements
373
- - **Crowd Management**: Monitor people density
374
- - **Security**: Detect and count objects of interest
375
 
376
- ### ⚠️ Limitations
377
- - Processing speed depends on video length and resolution
378
- - Maximum 200 frames processed in this demo
379
- - Accuracy depends on model confidence settings
380
 
381
- ### πŸ“š How to Use
382
- 1. Upload a video or provide a URL
383
- 2. Configure detection settings in the sidebar
384
- 3. Click "Start Counting"
385
- 4. View results in the Results tab
386
- 5. Download processed video and data
387
 
388
- ---
 
 
389
 
390
- **Note**: This app runs on Hugging Face Spaces with limited resources.
391
- For heavy processing, consider running locally with GPU support.
392
- """)
393
-
394
- # Footer
395
- st.markdown("---")
396
- st.markdown(
397
- "<div style='text-align: center'>"
398
- "Built with ❀️ using YOLOv8, Streamlit, and Hugging Face Spaces"
399
- "</div>",
400
- unsafe_allow_html=True
401
- )
 
 
 
 
 
 
7
  from collections import defaultdict
8
  import time
9
  import requests
 
 
10
  import pandas as pd
11
+ from scipy.spatial import distance
12
+
13
+ # --- Configuration & Initialization ---
14
 
15
  # Page configuration
16
  st.set_page_config(
17
+ page_title="YOLOv8 Object Tracking & Counter",
18
+ page_icon="πŸ€–",
19
  layout="wide"
20
  )
21
 
22
  # Title and description
23
+ st.title("🚦 Smart Object Traffic Analyzer (YOLOv8)")
24
  st.markdown("""
25
+ A professional application for real-time **tracking and counting** of people and vehicles in video streams.
26
+ It uses YOLOv8 for detection and a simple tracking algorithm to count unique objects crossing a user-defined line.
27
  """)
28
 
29
+ # COCO Class Names (Corrected for standard YOLOv8)
30
+ COCO_CLASS_NAMES = {
31
+ 0: "person", 1: "bicycle", 2: "car", 3: "motorcycle", 4: "airplane",
32
+ 5: "bus", 6: "train", 7: "truck", 8: "boat", 9: "traffic light",
33
+ # ... other classes
34
+ }
35
+
36
+ # Mapping of checkboxed objects to their standard COCO Class IDs
37
+ CLASS_MAPPING = {
38
+ "Person": 0,
39
+ "Bicycle": 1,
40
+ "Car": 2,
41
+ "Motorcycle": 3,
42
+ "Bus": 5,
43
+ "Truck": 7,
44
+ }
45
+
46
+ # Initialize session state for tracking
47
+ if 'processed_data' not in st.session_state:
48
+ st.session_state.processed_data = {
49
+ 'total_counts': defaultdict(int),
50
+ 'frame_counts': [],
51
+ 'processed_video': None,
52
+ 'processing_complete': False,
53
+ 'tracked_objects': {}, # Unique ID: {'class': str, 'last_centroid': (x, y), 'counted': bool}
54
+ }
55
+
56
+ # --- Sidebar for Settings ---
57
  with st.sidebar:
58
+ st.header("βš™οΈ Configuration Settings")
59
+
60
+ # Model Selection
61
+ st.subheader("Model & Detection")
62
+ model_name = st.selectbox("Select YOLO Model", options=['yolov8n.pt', 'yolov8s.pt'], help="Nano (n) is fast, Small (s) is more accurate.")
63
 
64
  # Confidence threshold
65
  confidence = st.slider(
66
+ "Detection Confidence Threshold",
67
+ min_value=0.1, max_value=1.0, value=0.40, step=0.05,
68
+ help="Minimum confidence to consider a detection valid."
 
 
 
69
  )
70
 
71
  # Object classes to detect
72
+ st.subheader("Objects for Counting")
73
+ selected_classes_ui = {}
74
+ for name, id in CLASS_MAPPING.items():
75
+ # Default True for Person and Car, False otherwise
76
+ default_val = name in ["Person", "Car"]
77
+ selected_classes_ui[name] = st.checkbox(name, value=default_val)
 
78
 
79
  # Line intersection for counting
80
+ st.subheader("Counting Line Settings")
81
+ show_line = st.checkbox("Show crossing line", value=True)
82
  line_position = st.slider(
83
+ "Line Position (Vertical % from left)",
84
+ min_value=10, max_value=90, value=50,
85
+ help="Line position for counting objects that cross it."
 
 
86
  )
87
 
88
  # Processing options
89
+ st.subheader("Performance Options")
90
  process_every_nth = st.slider(
91
+ "Frame Skip (Process every Nth frame)",
92
+ min_value=1, max_value=10, value=2,
93
+ help="Higher values significantly speed up processing but reduce tracking smoothness."
 
 
94
  )
95
 
96
  max_frames = st.number_input(
97
+ "Maximum Frames to Analyze",
98
+ min_value=10, max_value=5000, value=500,
99
+ help="Limits the processing duration for long videos. Set to a very high number (e.g., 99999) for full video."
 
 
100
  )
101
 
102
+ # --- Helper Functions ---
 
 
 
 
 
 
 
 
103
 
104
+ @st.cache_resource
105
+ def load_model(model_path):
106
+ """Caches the YOLO model loading."""
107
+ return YOLO(model_path)
 
108
 
109
+ def get_selected_class_ids():
110
+ """Returns a list of COCO class IDs selected by the user."""
111
+ return [CLASS_MAPPING[name] for name, is_selected in selected_classes_ui.items() if is_selected]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
+ # --- Core Processing Function (with simple tracking and crossing logic) ---
 
 
 
114
 
115
+ def process_video(video_path, selected_class_ids, model_path):
116
+ """
117
+ Processes the video, performs object detection/tracking, and counts line crossings.
118
+ """
119
+ model = load_model(model_path)
120
  cap = cv2.VideoCapture(video_path)
121
+
122
+ # Video properties
123
  fps = int(cap.get(cv2.CAP_PROP_FPS))
124
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
125
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
126
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
127
 
128
+ if total_frames > max_frames:
129
+ st.warning(f"Video is being processed for the first {max_frames} frames only (configurable in sidebar).")
130
+
131
+ # Setup video writer (Using a smaller size for web-friendliness if possible)
132
  temp_output = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
133
  output_path = temp_output.name
134
+ # mp4v or XVID is generally compatible. mp4v preferred for browser.
135
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
136
+ out = cv2.VideoWriter(output_path, fourcc, fps / process_every_nth, (width, height))
137
+
138
+ # Initialize state variables for the loop
139
+ current_state = st.session_state.processed_data
140
+ current_state['total_counts'] = defaultdict(int)
141
+ current_state['frame_counts'] = []
142
+ current_state['tracked_objects'] = {} # ID: {'class': str, 'last_centroid': (x, y), 'counted': bool}
143
 
144
+ # Define counting line
145
+ line_x = int(width * line_position / 100)
 
146
 
147
+ # UI Elements for progress
148
  progress_bar = st.progress(0)
149
  status_text = st.empty()
150
 
 
153
 
154
  while cap.isOpened():
155
  ret, frame = cap.read()
156
+
157
+ # Stop condition
158
  if not ret or processed_frames >= max_frames:
159
  break
160
 
161
  frame_count += 1
162
 
163
+ # Skip frames for performance (still write the frame for a continuous video)
164
  if frame_count % process_every_nth != 0:
165
+ # We don't write the skipped frame because we want the output video
166
+ # to reflect the lower frame rate for smaller size and faster processing.
167
  continue
168
 
169
+ # --- YOLO Detection ---
170
+ # NOTE: Using tracker="bytetrack.yaml" for better tracking. Requires ultralytics>=8.0.198
171
+ # However, for simplicity and dependency management, we will use simple centroid tracking.
172
+ results = model.track(
173
+ frame,
174
+ conf=confidence,
175
+ classes=selected_class_ids,
176
+ persist=True,
177
+ tracker="bytetrack.yaml", # Use YOLO's built-in tracking!
178
+ verbose=False
179
+ )
180
+
181
+ annotated_frame = frame.copy()
182
 
183
+ # Current frame counts
184
+ current_frame_counts = defaultdict(int)
185
 
186
+ # --- Tracking and Counting Logic ---
187
+ if results and results[0].boxes.id is not None:
188
+ boxes = results[0].boxes.xyxy.cpu().numpy().astype(int)
189
+ track_ids = results[0].boxes.id.cpu().numpy().astype(int)
190
+ class_ids = results[0].boxes.cls.cpu().numpy().astype(int)
191
+
192
+ for box, track_id, class_id in zip(boxes, track_ids, class_ids):
193
+ x1, y1, x2, y2 = box
194
+
195
+ # Calculate centroid
196
+ centroid_x = (x1 + x2) // 2
197
+ centroid_y = (y1 + y2) // 2
198
+ centroid = (centroid_x, centroid_y)
199
+
200
+ class_name = COCO_CLASS_NAMES.get(class_id, "Unknown")
201
+ current_frame_counts[class_name] += 1
202
+
203
+ # Update/Initialize tracked object
204
+ if track_id not in current_state['tracked_objects']:
205
+ # New object detected
206
+ current_state['tracked_objects'][track_id] = {
207
+ 'class': class_name,
208
+ 'last_centroid': centroid,
209
+ 'counted': False
210
+ }
211
+ else:
212
+ # Existing object - Check for line crossing
213
+ obj_data = current_state['tracked_objects'][track_id]
214
+ prev_x = obj_data['last_centroid'][0]
215
+
216
+ if not obj_data['counted']:
217
+ # Crossing logic: object crossed the line from one side to the other
218
+ if (prev_x < line_x and centroid_x >= line_x) or \
219
+ (prev_x > line_x and centroid_x <= line_x):
220
+
221
+ # Object crossed the line!
222
+ current_state['total_counts'][class_name] += 1
223
+ obj_data['counted'] = True # Count only once
224
+
225
+ # Update the object's last known position
226
+ obj_data['last_centroid'] = centroid
227
+
228
+ # Draw bounding box, track ID, and centroid
229
+ cv2.rectangle(annotated_frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
230
+ cv2.circle(annotated_frame, centroid, 5, (0, 0, 255), -1)
231
+
232
+ label = f"ID:{track_id} {class_name}"
233
+ cv2.putText(annotated_frame, label, (x1, y1 - 10),
234
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
235
 
236
+ # --- Visualization & Logging ---
 
 
 
 
237
 
238
+ # Draw counting line
239
  if show_line:
240
+ line_color = (0, 255, 255) # Cyan
241
+ cv2.line(annotated_frame, (line_x, 0), (line_x, height), line_color, 2)
242
+
243
+ # Label for the line
244
+ cv2.putText(annotated_frame, "COUNTING LINE", (line_x + 5, 20),
245
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, line_color, 2)
246
 
247
+ # Add total counter text
248
  y_offset = 30
249
+ for obj_type, count in current_state['total_counts'].items():
250
  cv2.putText(annotated_frame,
251
+ f"TOTAL {obj_type.upper()}: {count}",
252
+ (width - 300, y_offset),
253
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
254
+ y_offset += 35
255
+
256
+ # Store frame counts for chart (count of *objects in frame*, not crossings)
257
+ frame_data = {'frame': processed_frames * process_every_nth}
258
+ for name in CLASS_MAPPING.keys():
259
+ frame_data[name.lower()] = current_frame_counts.get(name.lower(), 0)
260
+ current_state['frame_counts'].append(frame_data)
261
 
262
  # Write frame to output video
263
  out.write(annotated_frame)
 
266
  # Update progress
267
  progress = min(processed_frames / max_frames, 1.0)
268
  progress_bar.progress(progress)
269
+ status_text.text(f"Analyzing Frame {frame_count}/{total_frames} (Processed {processed_frames})")
270
 
271
+ # --- Cleanup ---
272
  cap.release()
273
  out.release()
274
 
275
+ # Update global state
276
+ current_state['processing_complete'] = True
277
+ current_state['processed_video'] = output_path
278
+ st.session_state.processed_data = current_state
279
 
280
  return output_path
281
 
282
  # Function to download video from URL
283
+ @st.cache_data
284
  def download_video_from_url(url):
285
+ """Downloads video from URL to a temporary file."""
286
  try:
287
+ st.info("Attempting to download video. This might take a moment...")
288
+ response = requests.get(url, stream=True, timeout=30)
289
+ response.raise_for_status() # Raise exception for bad status codes
290
+
291
+ # Determine file extension (optional, but good practice)
292
+ content_type = response.headers.get('Content-Type', '')
293
+ suffix = '.mp4' if 'mp4' in content_type else '.mov'
294
+
295
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
296
+ total_size = int(response.headers.get('Content-Length', 0))
297
+
298
+ downloaded_size = 0
299
+ progress_placeholder = st.empty()
300
+
301
+ for chunk in response.iter_content(chunk_size=8192):
302
+ temp_file.write(chunk)
303
+ downloaded_size += len(chunk)
304
+ if total_size > 0:
305
+ progress = downloaded_size / total_size
306
+ progress_placeholder.progress(progress, text=f"Downloading: {downloaded_size/(1024*1024):.2f}MB / {total_size/(1024*1024):.2f}MB")
307
+
308
+ temp_file.close()
309
+ progress_placeholder.empty()
310
+ return temp_file.name
311
+
312
+ except requests.exceptions.RequestException as e:
313
+ st.error(f"Failed to download video: {str(e)}. Check URL and file access.")
314
+ return None
315
  except Exception as e:
316
+ st.error(f"An unexpected error occurred during download: {str(e)}")
317
  return None
318
 
319
+ # --- Main App Layout ---
320
+ tab1, tab2, tab3 = st.tabs(["πŸ“Ή Video Input", "πŸ“Š Analysis & Results", "ℹ️ Documentation"])
321
 
322
  with tab1:
323
  col1, col2 = st.columns(2)
324
+ video_path = None
325
 
326
  with col1:
327
+ st.subheader("πŸ“ Upload Video File")
328
  uploaded_file = st.file_uploader(
329
  "Choose a video file",
330
+ type=['mp4', 'avi', 'mov', 'mkv'],
331
+ help="Supported video formats. Maximum recommended file size: 50MB."
332
  )
333
 
334
  if uploaded_file is not None:
 
336
  tfile = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
337
  tfile.write(uploaded_file.read())
338
  video_path = tfile.name
339
+ st.info(f"Video ready: {uploaded_file.name}")
 
340
  st.video(uploaded_file)
 
341
 
342
  with col2:
343
+ st.subheader("🌐 Load from Video URL")
344
  video_url = st.text_input(
345
+ "Enter public video URL (e.g., direct link to .mp4)",
346
+ placeholder="https://example.com/traffic.mp4"
347
  )
348
 
349
+ if st.button("πŸ”— Load from URL", use_container_width=True) and video_url:
350
+ video_path = download_video_from_url(video_url)
351
+ if video_path:
352
+ st.success("Video downloaded and ready for processing.")
353
+ # Try to display a frame if possible
354
+ try:
355
  cap = cv2.VideoCapture(video_path)
356
  ret, frame = cap.read()
357
  if ret:
358
+ st.image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), caption="Video Preview", use_column_width=True)
359
  cap.release()
360
+ except Exception:
361
+ st.warning("Could not display video preview.")
362
+
363
+ st.markdown("---")
364
 
365
+ # Process button logic
366
+ if video_path:
367
+ if st.button("πŸš€ START TRACKING AND COUNTING", type="primary", use_container_width=True):
368
+ selected_class_ids = get_selected_class_ids()
369
 
370
+ if not selected_class_ids:
371
+ st.error("Please select at least one object type to count in the sidebar.")
372
  else:
373
+ try:
374
+ with st.spinner(f"Analyzing video with {model_name}..."):
375
+ process_video(video_path, selected_class_ids, model_name)
376
+ st.success("Analysis complete! See results in the 'Analysis & Results' tab.")
377
+ # Automatically switch to results tab on completion? (Streamlit doesn't natively support this well)
378
+ except Exception as e:
379
+ st.error(f"An error occurred during video processing: {e}")
380
+ # Optionally print traceback
381
+ # import traceback; st.code(traceback.format_exc())
382
+ else:
383
+ st.info("Upload a video or provide a URL to begin.")
384
 
385
  with tab2:
386
+ data = st.session_state.processed_data
387
+ if data['processing_complete']:
388
+ st.header("Results Summary")
389
+
390
  col1, col2 = st.columns([2, 1])
391
 
392
  with col1:
393
+ st.subheader("πŸŽ₯ Analyzed Video Output")
394
  # Display processed video
395
+ with open(data['processed_video'], 'rb') as video_file:
396
+ video_bytes = video_file.read()
397
  st.video(video_bytes)
398
 
399
  # Download button
400
  st.download_button(
401
+ label="πŸ“₯ Download Analyzed Video (MP4)",
402
  data=video_bytes,
403
+ file_name="analyzed_tracking_video.mp4",
404
+ mime="video/mp4",
405
+ use_container_width=True
406
  )
 
 
 
407
 
408
+ with col2:
409
+ st.subheader("βœ… Object Crossing Totals")
410
  # Display total counts
411
+ if data['total_counts']:
412
+ for obj_type, count in data['total_counts'].items():
413
+ st.metric(label=f"Total {obj_type.capitalize()} Crossed", value=count)
414
  else:
415
+ st.info("No objects crossed the counting line in the analyzed section.")
416
+
417
+ st.subheader("πŸ“Š Object Presence Over Frames")
418
+ if data['frame_counts']:
419
+ df = pd.DataFrame(data['frame_counts']).fillna(0)
 
 
 
 
 
 
 
420
 
421
+ # Time series chart (Plotly)
422
  fig = go.Figure()
423
 
424
+ # Add a trace for each object type (columns except 'frame')
425
  for column in df.columns:
426
  if column != 'frame':
427
  fig.add_trace(go.Scatter(
 
432
  ))
433
 
434
  fig.update_layout(
435
+ title="Count of Objects Present in Frame",
436
  xaxis_title="Frame Number",
437
+ yaxis_title="Count of Objects (Instance Count)",
438
  hovermode='x unified',
439
  height=400
440
  )
441
 
442
  st.plotly_chart(fig, use_container_width=True)
443
+
444
+ st.subheader("Data Export")
445
+ st.dataframe(df.tail(10), use_container_width=True, height=200)
446
+
447
+ csv = df.to_csv(index=False).encode('utf-8')
448
+ st.download_button(
449
+ label="⬇️ Download Frame-by-Frame Data (CSV)",
450
+ data=csv,
451
+ file_name="object_count_data.csv",
452
+ mime="text/csv",
453
+ )
454
+
455
+ else:
456
+ st.warning("No tracking data available. Process a video first.")
457
+
458
  else:
459
+ st.info("Process a video in the 'Video Input' tab to view analysis results.")
460
 
461
  with tab3:
462
+ st.header("Documentation: Smart Object Traffic Analyzer")
463
  st.markdown("""
464
+ This application utilizes cutting-edge computer vision techniques for object tracking and crossing counting.
465
 
466
+ ### πŸ”‘ Core Technology
 
 
 
 
467
 
468
+ * **YOLOv8**: The primary model for high-accuracy, real-time object detection. We recommend the `yolov8n.pt` (Nano) for speed in browser-based demos.
469
+ * **ByteTrack**: Used via the `ultralytics` package for robust object tracking, assigning a unique ID to each detected instance across frames.
470
+ * **Streamlit**: Provides the interactive, professional front-end interface.
 
 
 
471
 
472
+ ---
 
 
 
 
473
 
474
+ ### βš™οΈ How Crossing Counting Works
 
 
 
475
 
476
+ Unlike simple detection counters which add to a total for every frame an object is visible, this app counts **unique object crossings** of a vertical line:
 
 
 
 
 
477
 
478
+ 1. **Tracking**: YOLOv8's integrated tracker assigns a persistent **Track ID** to each object (`person`, `car`, etc.).
479
+ 2. **Centroid Calculation**: The center-point (centroid) of the object's bounding box is calculated for every frame.
480
+ 3. **Crossing Logic**: The system monitors the object's horizontal position relative to the **Counting Line**. An object is counted **once** when its centroid moves from one side of the line (e.g., left) to the other (e.g., right).
481
 
482
+ This ensures an accurate count of unique events, not redundant detections.
483
+
484
+ ### πŸš€ Deployment on Hugging Face Spaces
485
+
486
+ This script is optimized for deployment:
487
+
488
+ * **Caching (`@st.cache_resource`)**: The YOLO model is loaded only once, saving significant time.
489
+ * **Dependency List**: You will need a `requirements.txt` file in your Space with the following key libraries:
490
+ ```text
491
+ streamlit
492
+ ultralytics
493
+ opencv-python-headless
494
+ numpy
495
+ plotly
496
+ pandas
497
+ requests
498
+ scipy # for distance calculation, though not strictly needed with bytetrack