rba28 commited on
Commit
77f06da
·
verified ·
1 Parent(s): 109a299

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -27
app.py CHANGED
@@ -6,18 +6,19 @@ import json
6
  import gradio as gr
7
 
8
  # -------------------
9
- # Lightweight config
10
  # -------------------
11
  REPO_ID = "mshamrai/yolov8s-visdrone"
12
  FILENAME = "weights/best.pt"
13
 
14
  SAMPLES_DIR = "samples"
15
- SAMPLE_IMAGE = os.path.join(SAMPLES_DIR, "drone_sample.jpg")
16
- SAMPLE_VIDEO = os.path.join(SAMPLES_DIR, "airspace_sample.mp4")
 
17
 
18
  SAMPLE_URLS = {
19
- SAMPLE_IMAGE: "https://huggingface.co/datasets/hf-internal-testing/example-documents/resolve/main/airplane.jpg",
20
- SAMPLE_VIDEO: "https://huggingface.co/datasets/hf-internal-testing/example-documents/resolve/main/short_harvard_bridge.mp4",
21
  }
22
 
23
  def ensure_samples():
@@ -30,7 +31,7 @@ def ensure_samples():
30
  if os.path.exists(local_path):
31
  continue
32
  try:
33
- r = requests.get(url, timeout=20)
34
  r.raise_for_status()
35
  with open(local_path, "wb") as f:
36
  f.write(r.content)
@@ -242,7 +243,7 @@ def export_pdf_vid(summary: str, counts: dict):
242
  return _save_pdf("Airspace Drone Detector — Video Report", summary or "No summary.", counts or {}, None)
243
 
244
  # -------------------
245
- # UI
246
  # -------------------
247
  EXAMPLE_NOTE = (
248
  "Tip: This model is trained on VisDrone-style aerial objects (small targets). "
@@ -253,24 +254,26 @@ with gr.Blocks(title="Airspace Drone Detector (YOLOv8 VisDrone)") as demo:
253
  gr.Markdown(
254
  """
255
  # Airspace Drone Detector (Pretrained YOLOv8 - VisDrone)
256
- Upload an **image or video** and detect small aerial objects (e.g., drones) using a **pre-trained VisDrone model**.
257
- No dataset or training required — just run it.
258
 
259
- **Controls:** Adjust Confidence / NMS to tune detections.
260
- **Exports:** Download detections as CSV or a polished PDF report.
261
- **Note:** On CPU Spaces, long videos are truncated via **Max frames** for responsiveness.
262
  """
263
  )
264
 
265
  with gr.Tabs():
266
- # IMAGE
267
  with gr.TabItem("Image"):
268
  with gr.Row():
269
- image_in = gr.Image(label="Upload Image", type="numpy")
 
 
 
 
270
  with gr.Column():
271
- conf_img = gr.Slider(0.05, 0.8, 0.25, step=0.05, label="Confidence")
272
  iou_img = gr.Slider(0.1, 0.9, 0.45, step=0.05, label="NMS IoU")
273
- run_img = gr.Button("Detect")
274
  gr.Markdown(EXAMPLE_NOTE)
275
 
276
  image_out = gr.Image(label="Annotated Image")
@@ -299,18 +302,18 @@ No dataset or training required — just run it.
299
  outputs=[pdf_img_path],
300
  )
301
 
302
- if os.path.exists(SAMPLE_IMAGE):
303
- gr.Examples(examples=[[SAMPLE_IMAGE]], inputs=[image_in], label="Try with a sample image (preloaded)")
304
-
305
- # VIDEO
306
  with gr.TabItem("Video"):
307
  with gr.Row():
308
- video_in = gr.Video(label="Upload Video (mp4/mov)")
 
 
 
309
  with gr.Column():
310
- conf_vid = gr.Slider(0.05, 0.8, 0.25, step=0.05, label="Confidence")
311
  iou_vid = gr.Slider(0.1, 0.9, 0.45, step=0.05, label="NMS IoU")
312
  max_frames = gr.Slider(60, 2000, 300, step=10, label="Max frames to process")
313
- run_vid = gr.Button("Detect")
314
  gr.Markdown(EXAMPLE_NOTE)
315
 
316
  video_out = gr.Video(label="Annotated Video")
@@ -349,10 +352,7 @@ No dataset or training required — just run it.
349
  gr.Markdown(
350
  f"""
351
  **Weights:** `{REPO_ID}/{FILENAME}` (downloaded lazily)
352
- **Diagnostics**
353
- - FFmpeg available: {'Yes' if _ffmpeg_ok() else 'No'}
354
- - Python: 3.10
355
- - Torch & Ultralytics load on first run.
356
  """
357
  )
358
 
 
6
  import gradio as gr
7
 
8
  # -------------------
9
+ # Config
10
  # -------------------
11
  REPO_ID = "mshamrai/yolov8s-visdrone"
12
  FILENAME = "weights/best.pt"
13
 
14
  SAMPLES_DIR = "samples"
15
+ # Embedded samples (auto-downloaded on start)
16
+ TEST_IMAGE = os.path.join(SAMPLES_DIR, "test_image.jpg")
17
+ TEST_VIDEO = os.path.join(SAMPLES_DIR, "test_video.mp4")
18
 
19
  SAMPLE_URLS = {
20
+ TEST_IMAGE: "https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg", # small image
21
+ TEST_VIDEO: "https://github.com/ultralytics/assets/releases/download/v0.0.0/drone.mp4", # short drone clip
22
  }
23
 
24
  def ensure_samples():
 
31
  if os.path.exists(local_path):
32
  continue
33
  try:
34
+ r = requests.get(url, timeout=30)
35
  r.raise_for_status()
36
  with open(local_path, "wb") as f:
37
  f.write(r.content)
 
243
  return _save_pdf("Airspace Drone Detector — Video Report", summary or "No summary.", counts or {}, None)
244
 
245
  # -------------------
246
+ # UI (preloaded samples)
247
  # -------------------
248
  EXAMPLE_NOTE = (
249
  "Tip: This model is trained on VisDrone-style aerial objects (small targets). "
 
254
  gr.Markdown(
255
  """
256
  # Airspace Drone Detector (Pretrained YOLOv8 - VisDrone)
257
+ The sample **image** and **video** are already loaded below just click **Run**.
 
258
 
259
+ **Exports:** CSV + PDF reports.
260
+ **Note:** On CPU Spaces, long videos are truncated via **Max frames**.
 
261
  """
262
  )
263
 
264
  with gr.Tabs():
265
+ # IMAGE (preloaded)
266
  with gr.TabItem("Image"):
267
  with gr.Row():
268
+ image_in = gr.Image(
269
+ value=TEST_IMAGE if os.path.exists(TEST_IMAGE) else None,
270
+ type="numpy",
271
+ label="Input Image (preloaded)"
272
+ )
273
  with gr.Column():
274
+ conf_img = gr.Slider(0.05, 0.8, 0.35, step=0.05, label="Confidence")
275
  iou_img = gr.Slider(0.1, 0.9, 0.45, step=0.05, label="NMS IoU")
276
+ run_img = gr.Button("Run Detection")
277
  gr.Markdown(EXAMPLE_NOTE)
278
 
279
  image_out = gr.Image(label="Annotated Image")
 
302
  outputs=[pdf_img_path],
303
  )
304
 
305
+ # VIDEO (preloaded)
 
 
 
306
  with gr.TabItem("Video"):
307
  with gr.Row():
308
+ video_in = gr.Video(
309
+ value=TEST_VIDEO if os.path.exists(TEST_VIDEO) else None,
310
+ label="Input Video (preloaded)"
311
+ )
312
  with gr.Column():
313
+ conf_vid = gr.Slider(0.05, 0.8, 0.35, step=0.05, label="Confidence")
314
  iou_vid = gr.Slider(0.1, 0.9, 0.45, step=0.05, label="NMS IoU")
315
  max_frames = gr.Slider(60, 2000, 300, step=10, label="Max frames to process")
316
+ run_vid = gr.Button("Run Detection")
317
  gr.Markdown(EXAMPLE_NOTE)
318
 
319
  video_out = gr.Video(label="Annotated Video")
 
352
  gr.Markdown(
353
  f"""
354
  **Weights:** `{REPO_ID}/{FILENAME}` (downloaded lazily)
355
+ **Diagnostics** — FFmpeg: {'Yes' if _ffmpeg_ok() else 'No'} • Python: 3.10
 
 
 
356
  """
357
  )
358