nagasurendra commited on
Commit
73a391b
·
verified ·
1 Parent(s): e55f23c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -102
app.py CHANGED
@@ -36,7 +36,7 @@ last_metrics: Dict[str, Any] = {}
36
  frame_count: int = 0
37
  SAVE_IMAGE_INTERVAL = 1
38
  DETECTION_CLASSES = ["Longitudinal", "Pothole", "Transverse"]
39
- MAX_IMAGES = 500 # Limit saved images to reduce ZIP time
40
 
41
  device = "cuda" if torch.cuda.is_available() else "cpu"
42
  model = YOLO('./data/best.pt').to(device)
@@ -46,7 +46,7 @@ if device == "cuda":
46
  def zip_all_outputs(report_path: str, video_path: str, chart_path: str, map_path: str) -> str:
47
  zip_path = os.path.join(OUTPUT_DIR, f"drone_analysis_outputs_{datetime.now().strftime('%Y%m%d_%H%M%S')}.zip")
48
  try:
49
- with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_STORED) as zipf: # Use ZIP_STORED for faster compression
50
  if os.path.exists(report_path):
51
  zipf.write(report_path, os.path.basename(report_path))
52
  if os.path.exists(video_path):
@@ -114,7 +114,7 @@ def write_flight_log(frame_count: int, gps_coord: List[float], timestamp: str) -
114
  def check_image_quality(frame: np.ndarray, input_resolution: int) -> bool:
115
  height, width, _ = frame.shape
116
  frame_resolution = width * height
117
- if frame_resolution < 2_073_600: # 1920x1080 minimum
118
  log_entries.append(f"Frame {frame_count}: Resolution {width}x{height} below 2MP")
119
  return False
120
  if frame_resolution < input_resolution:
@@ -196,7 +196,7 @@ def generate_report(
196
  "- Terrain Follow Mode: Enabled",
197
  "",
198
  "## 3. Quality Check Results",
199
- f"- Resolution: 1920x1080",
200
  "- Overlap: 85%",
201
  "- Camera Angle: 90° nadir",
202
  "- Drone Speed: ≤ 5 m/s",
@@ -205,9 +205,9 @@ def generate_report(
205
  "",
206
  "## 4. AI/ML Analytics",
207
  f"- Total Frames Processed: {frame_count}",
208
- f"- Detection Frames: {detection_frame_count} ({detection_frame_count//frame_count*100:.1f}%)",
209
- f"- Total Detections:: {metrics['total_detections']}",
210
- " - Breakdown:",
211
  ]
212
 
213
  for item in metrics.get("items", []):
@@ -219,51 +219,46 @@ def generate_report(
219
  f"- Average Resize Time: {sum(resize_times)/len(resize_times):.1f} ms" if resize_times else "- Average Resize Time: N/A",
220
  f"- Average Inference Time: {sum(inference_times)/len(inference_times):.1f} ms" if inference_times else "- Average Inference Time: N/A",
221
  f"- Average I/O Time: {sum(io_times)/len(io_times):.1f} ms" if io_times else "- Average I/O Time: N/A",
222
- f"- Timestamp: {metrics.get('timestamp', 'N/A')}": 'N/A'",
223
  "- Summary: Potholes and cracks detected in high-traffic areas.",
224
  "",
225
  "## 5. Output File Structure",
226
  "- ZIP file contains:",
227
- " - `drone_analysis_report_<timestamp>.md`:): This report",
228
- " - `outputs/processed_output.mp4`:): Processed video with annotations",
229
- " - `outputs/chart_<timestamp>.jpg`:): Detection trend chart",
230
- " - `outputs/map_<timestamp>.jpg`:): Issue locations map",
231
- " - `captured_frames/detected_<frame>.jpg`:): Geotagged images for detected issues",
232
- " - `flight_logs/flight_log_<frame>.csv`:): Flight logs matching image frames",
233
- "- Note: Images and logs share frame numbers (e.g., `detected_000001.jpg` corresponds to `flight_log_000001.csv`).),
234
- ])
235
-
236
- report_content.append([
237
- "",
238
  "",
239
  "## 6. Geotagged Images",
240
  f"- Total Images: {len(detected_issues)}",
241
- f"- Storage: Data Lake `/project_xyz/images/projects/{datetime.now().strftime('%Y%m%d')}`",
242
  "",
243
- "| Frame | Issue Type | GPS (Lat, Lon) | Timestamp | | Confidence | Image Path | |",
244
- "|_______|------------|----------------|-----------|------------|-----------------------------|",
245
  ])
246
 
247
  for detection in all_detections[:100]:
248
  report_content.append(
249
- f"""| {detection['frame']:06d} | {detection['label']} | ({detection['gps'][0]:.6f}, {detection['gps'][1]:.6f}) | {detection['timestamp']} | {detection['conf']:.1f} | captured_frames/{os.path.basename(detection['path']})} |"
250
  )
251
 
252
  report_content.extend([
253
- "",
254
- "## 7. Flight Logs",
255
- f"- Total Logs: {len(detected_issues)}",
256
- f"- Storage: Data Lake `/project_xyz/flight_logs/{datetime.now().strftime('%Y%m%d')}')}",
257
- "",
258
- "| Frame | Timestamp | Latitude | Longitude | Latitude Longitude | Speed (m/s) | Latency Satellites | ms) | Altitude |(m) | Log Path |",
259
- "|-------|-----------|-----------------------------|-----------|-------------|------------|--------------|-----------------------------|",
260
  ])
261
 
262
  for detection in all_detections[:100]:
263
- log_path = f"flight_logs/flight_log_{detection['frame']}:f{06d}.csv":'
264
  report_content.append(
265
- f"| {detection['frame']:06d} | {detection['timestamp']} | {detection['gps'][0]:.6f} | {detection['gps'][1]:.6f} | 5.0 |,
266
- | 12 | 60 | {log_path} |"
267
  )
268
 
269
  report_content.extend([
@@ -275,12 +270,12 @@ def generate_report(
275
  f"- Duration: {output_duration:.1f} seconds",
276
  "",
277
  "## 9. Visualizations",
278
- f"- Detection Trend Chart: outputs/chart_{timestamp}.jpg",
279
- f"- Issue Locations Map: outputs/map_{timestamp}.jpg",
280
  "",
281
  "## 10. Processing Timestamps",
282
  f"- Total Processing Time: {total_time:.1f} seconds",
283
- "- Log Entries (Last 10):",
284
  ])
285
 
286
  for entry in log_entries[-10:]:
@@ -293,84 +288,80 @@ def generate_report(
293
  "- PD/RO Comments: [Pending]",
294
  "",
295
  "## 12. Recommendations",
296
- "- Repair potholes potholes in high-traffic areas",
297
  "- Seal cracks to prevent further degradation.",
298
  "- Schedule a follow-up survey.",
299
  "",
300
  "## 13. Data Lake References",
301
- f""- Images: `/project_xyz/images/{datetime.now().strftime('%Y%m%d')'}`)",
302
- f""- f"- Flight Logs: `/project_xyz/flight_logs/{timestamp}`),
303
- f"- Video: `/project_xyz/videos/processed_output_{timestamp}.mp4`,
304
- f"- DAMS videos/processed_videos/{timestamp}/processed_video`,
305
- f"- DAMS Dashboard: `/project_xyz/dams/{datetime.now().strftime('%Y%m%d')'}`)",
306
  "",
307
  "## 14. Captured Images",
308
  "Below are the images from the captured frames directory showing detected issues:",
309
- "",
310
  ])
311
 
312
- # Add image references for all all captured images in in captured_frames detected_issues
313
  for image_path in detected_issues:
314
  if os.path.exists(image_path):
315
  image_name = os.path.basename(image_path)
316
  report_content.append(f"![Captured image at {image_name}](captured_frames/{image_name})")
317
 
318
  try:
319
- with open(report_path, mode'w), as) as file:
320
- file.write("\n".join(report_content))
321
  log_entries.append(f"Report saved at: {report_path}")
322
  return report_path
323
  except Exception as e:
324
- log_entries.append(f"Error: Failed to to save report): {str(e)}")
325
  return ""
326
- )
327
 
328
- def process_video(input_video, resize_width=1920, resize_height=1080, frame_skip=10):
329
- global frame_count, last_metrics_counter, detected_counts, detected_dissues, gps_coordinates,, output_log_entries
330
  frame_count = 0
331
  detected_counts.clear()
332
  detected_issues.clear()
333
  gps_coordinates.clear()
334
- output_log_entries.clear()
335
- last_metrics = {})
336
 
337
  if video is None:
338
  log_entries.append("Error: No video uploaded")
339
- return None, None, None, [], None, None, None)
340
 
341
  log_entries.append("Starting video processing...")
342
  start_time = time.time()
343
  cap = cv2.VideoCapture(video)
344
  if not cap.isOpened():
345
- log_entries.append("Error: Could not open videos file")
346
- return None, json.dumps({"),error": "Could not open video file"}, indent=2), None, [], None, None, None)
347
 
348
  frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
349
- width = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
350
- input_resolution = frame_width * height
351
- f = FPS
352
  fps = cap.get(cv2.CAP_PROP_FPS)
353
- total_frames = int(cap.get(cv2).CAP_PRO_PROF_COUNT)
354
- log_entries.append(f"Input video: {frame_width}x{height} at {fps} FPS,, {total_frames} frames")
355
 
356
- out_width, out_height = resize_width, resize_height
357
- output_path = os.path.join(OUTPUT_DIR, str"processed_video.mp4"))
358
- out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'W'XVID'), fps=20, (out_width, out_height)) # Switch to XVID
359
  if not out.isOpened():
360
  log_entries.append("Error: Failed to initialize video writer")
361
  cap.release()
362
- return None, json.dumps({"error": "Video writer failed"}), None, [], None, None, None)
363
 
364
- processed_frames = []0
365
  all_detections = []
366
  frame_times = []
367
  inference_times = []
368
  resize_times = []
369
  io_times = []
370
  detection_frame_count = 0
371
- output_frame_count = = 0
372
- last_frame_detected_frame = None
373
- disk_space_threshold = 1024 * 1024 * 1024 # 1GB minimum disk space
374
 
375
  while True:
376
  ret, frame = cap.read()
@@ -382,8 +373,7 @@ def process_video(input_video, resize_width=1920, resize_height=1080, frame_skip
382
  processed_frames += 1
383
  frame_start = time.time()
384
 
385
- # Check disk space
386
- if os.statvfs(os.path.dirname(output_path)).f_frsize() * os.statvfs().f_bavail < disk_space_threshold:
387
  log_entries.append("Error: Insufficient disk space")
388
  break
389
 
@@ -393,18 +383,18 @@ def process_video(input_video, resize_width=1920, resize_height=1080, frame_skip
393
  if not check_image_quality(frame, input_resolution):
394
  continue
395
 
396
- inference_time = time.time()
397
- results = model(frame, verbose=False, conf=0.5, threshold=0.7)
398
  annotated_frame = results[0].plot()
399
- inference_times.append((time.time() - inference_time) * 1000)
400
 
401
  frame_timestamp = frame_count / fps if fps > 0 else 0
402
- timestamp_str = f"{int(frame_timestamp / 60):02d}:{int(frame_timestamp % 60):02d}"
403
 
404
  gps_coord = [17.385044 + (frame_count * 0.0001), 78.486671 + (frame_count * 0.0001)]
405
  gps_coordinates.append(gps_coord)
406
 
407
- io_start_time = time.time()
408
  frame_detections = []
409
  for detection in results[0].boxes:
410
  cls = int(detection.cls)
@@ -419,7 +409,7 @@ def process_video(input_video, resize_width=1920, resize_height=1080, frame_skip
419
  "gps": gps_coord,
420
  "timestamp": timestamp_str,
421
  "frame": frame_count,
422
- "path": os.path.join(CAPTURED_FRAMES_DIR, f"detected_frame_{frame_count:06d}.jpg")
423
  }
424
  frame_detections.append(detection_data)
425
  log_entries.append(f"Frame {frame_count} at {timestamp_str}: Detected {label} with confidence {conf:.2f}")
@@ -427,23 +417,23 @@ def process_video(input_video, resize_width=1920, resize_height=1080, frame_skip
427
  if frame_detections:
428
  detection_frame_count += 1
429
  if detection_frame_count % SAVE_IMAGE_INTERVAL == 0:
430
- captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"detected_frame_{frame_count:06d}.jpg")
431
  if cv2.imwrite(captured_frame_path, annotated_frame):
432
  if write_geotag(captured_frame_path, gps_coord):
433
  detected_issues.append(captured_frame_path)
434
  if len(detected_issues) > MAX_IMAGES:
435
- os.remove(detected_issues.pop(0)) # Remove oldest image
436
  else:
437
  log_entries.append(f"Frame {frame_count}: Geotagging failed")
438
  else:
439
  log_entries.append(f"Error: Failed to save frame at {captured_frame_path}")
440
  write_flight_log(frame_count, gps_coord, timestamp_str)
441
 
442
- io_times.append((time.time() - io_start_time) * 1000)
443
 
444
  out.write(annotated_frame)
445
  output_frame_count += 1
446
- last_detected_frame = annotated_frame
447
  if frame_skip > 1:
448
  for _ in range(frame_skip - 1):
449
  out.write(annotated_frame)
@@ -460,8 +450,8 @@ def process_video(input_video, resize_width=1920, resize_height=1080, frame_skip
460
  log_entries.append("Error: Processing timeout after 600 seconds")
461
  break
462
 
463
- while output_frame_count < total_frames and last_detected_frame is not None:
464
- out.write(last_detected_frame)
465
  output_frame_count += 1
466
 
467
  last_metrics = update_metrics(all_detections)
@@ -469,7 +459,6 @@ def process_video(input_video, resize_width=1920, resize_height=1080, frame_skip
469
  out.release()
470
  cap.release()
471
 
472
- # Verify output video
473
  cap = cv2.VideoCapture(output_path)
474
  if not cap.isOpened():
475
  log_entries.append("Error: Failed to open output video for verification")
@@ -523,38 +512,34 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange")) as iface:
523
  video_input = gr.Video(label="Upload Video")
524
  width_slider = gr.Slider(320, 1920, value=1920, label="Output Width", step=1)
525
  height_slider = gr.Slider(240, 1080, value=1080, label="Output Height", step=1)
526
- skip_slider = gr.Slider(1, 20, value=10, label="Frame Skip", step=2)
527
- process_btn = gr.Button(value"Process Video", variant="primary")
528
  with gr.Column(scale=1):
529
- metrics_output = gr.Textbox(label="", lines=5, interactive=False)
530
- )
531
  with gr.Row():
532
- output_video = gr.Video(label="Processed Video")
533
- issue_frame_gallery = gr.Gallery(label="Detected Issues", columns=4, height="auto", object_fit="fit"contain")
534
  with gr.Row():
535
- chart_frame = gr.Image(label="Detection Trend")
536
- map_frame = gr.Image(label="Issue Locations")
537
- )
538
  with gr.Row():
539
- logs_frame = gr.Textbox(label="Logs", lines=5, interactive=False)
540
  with gr.Row():
541
- )
542
  gr.Markdown("## Download Results")
543
- )
544
  with gr.Row():
545
- zip_output_downloaded = gr.File(label="Download All Outputs")
546
 
547
  process_btn.click(
548
  fn=process_video,
549
  inputs=[video_input, width_slider, height_slider, skip_slider],
550
  outputs=[
551
- output_video,
552
  metrics_output,
553
- logs_frame_output,
554
- issue_frame_dgallery,
555
- chart_frame_doutput,
556
- maps_frame_doutput,
557
- zip_output_downloads
558
  ]
559
  )
560
 
 
36
  frame_count: int = 0
37
  SAVE_IMAGE_INTERVAL = 1
38
  DETECTION_CLASSES = ["Longitudinal", "Pothole", "Transverse"]
39
+ MAX_IMAGES = 500
40
 
41
  device = "cuda" if torch.cuda.is_available() else "cpu"
42
  model = YOLO('./data/best.pt').to(device)
 
46
  def zip_all_outputs(report_path: str, video_path: str, chart_path: str, map_path: str) -> str:
47
  zip_path = os.path.join(OUTPUT_DIR, f"drone_analysis_outputs_{datetime.now().strftime('%Y%m%d_%H%M%S')}.zip")
48
  try:
49
+ with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_STORED) as zipf:
50
  if os.path.exists(report_path):
51
  zipf.write(report_path, os.path.basename(report_path))
52
  if os.path.exists(video_path):
 
114
  def check_image_quality(frame: np.ndarray, input_resolution: int) -> bool:
115
  height, width, _ = frame.shape
116
  frame_resolution = width * height
117
+ if frame_resolution < 2_073_600:
118
  log_entries.append(f"Frame {frame_count}: Resolution {width}x{height} below 2MP")
119
  return False
120
  if frame_resolution < input_resolution:
 
196
  "- Terrain Follow Mode: Enabled",
197
  "",
198
  "## 3. Quality Check Results",
199
+ "- Resolution: 1920x1080",
200
  "- Overlap: 85%",
201
  "- Camera Angle: 90° nadir",
202
  "- Drone Speed: ≤ 5 m/s",
 
205
  "",
206
  "## 4. AI/ML Analytics",
207
  f"- Total Frames Processed: {frame_count}",
208
+ f"- Detection Frames: {detection_frame_count} ({detection_frame_count/frame_count*100:.1f}%)",
209
+ f"- Total Detections: {metrics['total_detections']}",
210
+ " - Breakdown:"
211
  ]
212
 
213
  for item in metrics.get("items", []):
 
219
  f"- Average Resize Time: {sum(resize_times)/len(resize_times):.1f} ms" if resize_times else "- Average Resize Time: N/A",
220
  f"- Average Inference Time: {sum(inference_times)/len(inference_times):.1f} ms" if inference_times else "- Average Inference Time: N/A",
221
  f"- Average I/O Time: {sum(io_times)/len(io_times):.1f} ms" if io_times else "- Average I/O Time: N/A",
222
+ f"- Timestamp: {metrics.get('timestamp', 'N/A')}",
223
  "- Summary: Potholes and cracks detected in high-traffic areas.",
224
  "",
225
  "## 5. Output File Structure",
226
  "- ZIP file contains:",
227
+ " - `drone_analysis_report_<timestamp>.md`: This report",
228
+ " - `outputs/processed_output.mp4`: Processed video with annotations",
229
+ " - `outputs/chart_<timestamp>.png`: Detection trend chart",
230
+ " - `outputs/map_<timestamp>.png`: Issue locations map",
231
+ " - `captured_frames/detected_<frame>.jpg`: Geotagged images for detected issues",
232
+ " - `flight_logs/flight_log_<frame>.csv`: Flight logs matching image frames",
233
+ "- Note: Images and logs share frame numbers (e.g., `detected_000001.jpg` corresponds to `flight_log_000001.csv`).",
 
 
 
 
234
  "",
235
  "## 6. Geotagged Images",
236
  f"- Total Images: {len(detected_issues)}",
237
+ f"- Storage: Data Lake `/project_xyz/images/{datetime.now().strftime('%Y%m%d')}`",
238
  "",
239
+ "| Frame | Issue Type | GPS (Lat, Lon) | Timestamp | Confidence | Image Path |",
240
+ "|-------|------------|----------------|-----------|------------|------------|"
241
  ])
242
 
243
  for detection in all_detections[:100]:
244
  report_content.append(
245
+ f"| {detection['frame']:06d} | {detection['label']} | ({detection['gps'][0]:.6f}, {detection['gps'][1]:.6f}) | {detection['timestamp']} | {detection['conf']:.1f} | captured_frames/{os.path.basename(detection['path'])} |"
246
  )
247
 
248
  report_content.extend([
249
+ "",
250
+ "## 7. Flight Logs",
251
+ f"- Total Logs: {len(detected_issues)}",
252
+ f"- Storage: Data Lake `/project_xyz/flight_logs/{datetime.now().strftime('%Y%m%d')}`",
253
+ "",
254
+ "| Frame | Timestamp | Latitude | Longitude | Speed (m/s) | Satellites | Altitude (m) | Log Path |",
255
+ "|-------|-----------|----------|-----------|-------------|------------|--------------|----------|"
256
  ])
257
 
258
  for detection in all_detections[:100]:
259
+ log_path = f"flight_logs/flight_log_{detection['frame']:06d}.csv"
260
  report_content.append(
261
+ f"| {detection['frame']:06d} | {detection['timestamp']} | {detection['gps'][0]:.6f} | {detection['gps'][1]:.6f} | 5.0 | 12 | 60 | {log_path} |"
 
262
  )
263
 
264
  report_content.extend([
 
270
  f"- Duration: {output_duration:.1f} seconds",
271
  "",
272
  "## 9. Visualizations",
273
+ f"- Detection Trend Chart: outputs/chart_{timestamp}.png",
274
+ f"- Issue Locations Map: outputs/map_{timestamp}.png",
275
  "",
276
  "## 10. Processing Timestamps",
277
  f"- Total Processing Time: {total_time:.1f} seconds",
278
+ "- Log Entries (Last 10):"
279
  ])
280
 
281
  for entry in log_entries[-10:]:
 
288
  "- PD/RO Comments: [Pending]",
289
  "",
290
  "## 12. Recommendations",
291
+ "- Repair potholes in high-traffic areas.",
292
  "- Seal cracks to prevent further degradation.",
293
  "- Schedule a follow-up survey.",
294
  "",
295
  "## 13. Data Lake References",
296
+ f"- Images: `/project_xyz/images/{datetime.now().strftime('%Y%m%d')}`",
297
+ f"- Flight Logs: `/project_xyz/flight_logs/{datetime.now().strftime('%Y%m%d')}`",
298
+ f"- Video: `/project_xyz/videos/processed_output_{timestamp}.mp4`",
299
+ f"- DAMS Dashboard: `/project_xyz/dams/{datetime.now().strftime('%Y%m%d')}`",
 
300
  "",
301
  "## 14. Captured Images",
302
  "Below are the images from the captured frames directory showing detected issues:",
303
+ ""
304
  ])
305
 
 
306
  for image_path in detected_issues:
307
  if os.path.exists(image_path):
308
  image_name = os.path.basename(image_path)
309
  report_content.append(f"![Captured image at {image_name}](captured_frames/{image_name})")
310
 
311
  try:
312
+ with open(report_path, 'w') as f:
313
+ f.write("\n".join(report_content))
314
  log_entries.append(f"Report saved at: {report_path}")
315
  return report_path
316
  except Exception as e:
317
+ log_entries.append(f"Error: Failed to save report: {str(e)}")
318
  return ""
 
319
 
320
+ def process_video(video, resize_width=1920, resize_height=1080, frame_skip=10):
321
+ global frame_count, last_metrics, detected_counts, detected_issues, gps_coordinates, log_entries
322
  frame_count = 0
323
  detected_counts.clear()
324
  detected_issues.clear()
325
  gps_coordinates.clear()
326
+ log_entries.clear()
327
+ last_metrics = {}
328
 
329
  if video is None:
330
  log_entries.append("Error: No video uploaded")
331
+ return None, json.dumps({"error": "No video uploaded"}, indent=2), "\n".join(log_entries), [], None, None, None
332
 
333
  log_entries.append("Starting video processing...")
334
  start_time = time.time()
335
  cap = cv2.VideoCapture(video)
336
  if not cap.isOpened():
337
+ log_entries.append("Error: Could not open video file")
338
+ return None, json.dumps({"error": "Could not open video file"}, indent=2), "\n".join(log_entries), [], None, None, None
339
 
340
  frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
341
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
342
+ input_resolution = frame_width * frame_height
 
343
  fps = cap.get(cv2.CAP_PROP_FPS)
344
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
345
+ log_entries.append(f"Input video: {frame_width}x{frame_height} at {fps} FPS, {total_frames} frames")
346
 
347
+ out_width, out_height = resize_width, resize_height
348
+ output_path = os.path.join(OUTPUT_DIR, "processed_output.mp4")
349
+ out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'XVID'), fps, (out_width, out_height))
350
  if not out.isOpened():
351
  log_entries.append("Error: Failed to initialize video writer")
352
  cap.release()
353
+ return None, json.dumps({"error": "Video writer failed"}, indent=2), "\n".join(log_entries), [], None, None, None
354
 
355
+ processed_frames = 0
356
  all_detections = []
357
  frame_times = []
358
  inference_times = []
359
  resize_times = []
360
  io_times = []
361
  detection_frame_count = 0
362
+ output_frame_count = 0
363
+ last_annotated_frame = None
364
+ disk_space_threshold = 1024 * 1024 * 1024
365
 
366
  while True:
367
  ret, frame = cap.read()
 
373
  processed_frames += 1
374
  frame_start = time.time()
375
 
376
+ if os.statvfs(os.path.dirname(output_path)).f_frsize * os.statvfs(os.path.dirname(output_path)).f_bavail < disk_space_threshold:
 
377
  log_entries.append("Error: Insufficient disk space")
378
  break
379
 
 
383
  if not check_image_quality(frame, input_resolution):
384
  continue
385
 
386
+ inference_start = time.time()
387
+ results = model(frame, verbose=False, conf=0.5, iou=0.7)
388
  annotated_frame = results[0].plot()
389
+ inference_times.append((time.time() - inference_start) * 1000)
390
 
391
  frame_timestamp = frame_count / fps if fps > 0 else 0
392
+ timestamp_str = f"{int(frame_timestamp // 60):02d}:{int(frame_timestamp % 60):02d}"
393
 
394
  gps_coord = [17.385044 + (frame_count * 0.0001), 78.486671 + (frame_count * 0.0001)]
395
  gps_coordinates.append(gps_coord)
396
 
397
+ io_start = time.time()
398
  frame_detections = []
399
  for detection in results[0].boxes:
400
  cls = int(detection.cls)
 
409
  "gps": gps_coord,
410
  "timestamp": timestamp_str,
411
  "frame": frame_count,
412
+ "path": os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count:06d}.jpg")
413
  }
414
  frame_detections.append(detection_data)
415
  log_entries.append(f"Frame {frame_count} at {timestamp_str}: Detected {label} with confidence {conf:.2f}")
 
417
  if frame_detections:
418
  detection_frame_count += 1
419
  if detection_frame_count % SAVE_IMAGE_INTERVAL == 0:
420
+ captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count:06d}.jpg")
421
  if cv2.imwrite(captured_frame_path, annotated_frame):
422
  if write_geotag(captured_frame_path, gps_coord):
423
  detected_issues.append(captured_frame_path)
424
  if len(detected_issues) > MAX_IMAGES:
425
+ os.remove(detected_issues.pop(0))
426
  else:
427
  log_entries.append(f"Frame {frame_count}: Geotagging failed")
428
  else:
429
  log_entries.append(f"Error: Failed to save frame at {captured_frame_path}")
430
  write_flight_log(frame_count, gps_coord, timestamp_str)
431
 
432
+ io_times.append((time.time() - io_start) * 1000)
433
 
434
  out.write(annotated_frame)
435
  output_frame_count += 1
436
+ last_annotated_frame = annotated_frame
437
  if frame_skip > 1:
438
  for _ in range(frame_skip - 1):
439
  out.write(annotated_frame)
 
450
  log_entries.append("Error: Processing timeout after 600 seconds")
451
  break
452
 
453
+ while output_frame_count < total_frames and last_annotated_frame is not None:
454
+ out.write(last_annotated_frame)
455
  output_frame_count += 1
456
 
457
  last_metrics = update_metrics(all_detections)
 
459
  out.release()
460
  cap.release()
461
 
 
462
  cap = cv2.VideoCapture(output_path)
463
  if not cap.isOpened():
464
  log_entries.append("Error: Failed to open output video for verification")
 
512
  video_input = gr.Video(label="Upload Video")
513
  width_slider = gr.Slider(320, 1920, value=1920, label="Output Width", step=1)
514
  height_slider = gr.Slider(240, 1080, value=1080, label="Output Height", step=1)
515
+ skip_slider = gr.Slider(1, 20, value=10, label="Frame Skip", step=1)
516
+ process_btn = gr.Button("Process Video", variant="primary")
517
  with gr.Column(scale=1):
518
+ metrics_output = gr.Textbox(label="Detection Metrics", lines=5, interactive=False)
 
519
  with gr.Row():
520
+ video_output = gr.Video(label="Processed Video")
521
+ issue_gallery = gr.Gallery(label="Detected Issues", columns=4, height="auto", object_fit="contain")
522
  with gr.Row():
523
+ chart_output = gr.Image(label="Detection Trend")
524
+ map_output = gr.Image(label="Issue Locations Map")
 
525
  with gr.Row():
526
+ logs_output = gr.Textbox(label="Logs", lines=5, interactive=False)
527
  with gr.Row():
 
528
  gr.Markdown("## Download Results")
 
529
  with gr.Row():
530
+ output_zip_download = gr.File(label="Download All Outputs (ZIP)")
531
 
532
  process_btn.click(
533
  fn=process_video,
534
  inputs=[video_input, width_slider, height_slider, skip_slider],
535
  outputs=[
536
+ video_output,
537
  metrics_output,
538
+ logs_output,
539
+ issue_gallery,
540
+ chart_output,
541
+ map_output,
542
+ output_zip_download
543
  ]
544
  )
545