rba28 commited on
Commit
ecfe892
·
verified ·
1 Parent(s): 0de0944

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -88
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import time
3
  import tempfile
4
- from typing import List, Dict, Optional
5
  import json
6
  import gradio as gr
7
 
@@ -12,44 +12,41 @@ SAMPLES_DIR = "samples"
12
  EMBED_IMG = os.path.join(SAMPLES_DIR, "uav_image.jpg")
13
  EMBED_VID = os.path.join(SAMPLES_DIR, "uav_video.mp4")
14
 
15
- # Optional HF secrets (to force a specific checkpoint)
16
- HF_MODEL_REPO = os.getenv("HF_MODEL_REPO", "").strip()
17
- HF_MODEL_FILE = os.getenv("HF_MODEL_FILE", "").strip()
18
- HF_TOKEN = os.getenv("HF_TOKEN", "").strip()
19
 
20
- # Public pretrained models (no auth needed)
21
- MODEL_CANDIDATES = []
22
- if HF_MODEL_REPO and HF_MODEL_FILE:
23
- MODEL_CANDIDATES.append((HF_MODEL_REPO, HF_MODEL_FILE))
24
- MODEL_CANDIDATES += [
25
- ("Javvanny/yolov8m_flying_objects_detection", "yolov8m/weights/best.pt"),
26
- ("doguilmak/Drone-Detection-YOLOv8x", "weight/best.pt"),
27
- ]
28
 
29
  # =========================
30
  # LABELS & THREAT RULES
31
  # =========================
32
  LABEL_MAP = {
 
 
 
 
 
33
  "БПЛА": "UAV",
34
  "БПЛА коптер": "Drone",
35
  "квадрокоптер": "Drone",
36
  "квадроcамолет": "Drone",
37
  "самолет": "Airplane",
38
  "вертолет": "Helicopter",
39
- "птица": "Bird",
40
- "человек": "Person",
41
- "машина": "Car",
42
  "автомобиль": "Car",
 
 
 
43
  }
44
  THREAT_SET = {"drone", "uav", "airplane", "helicopter"}
45
 
46
  def map_label(name: str) -> str:
47
  if not isinstance(name, str): return name
48
- if name in LABEL_MAP: return LABEL_MAP[name]
49
- low = name.lower()
50
- for ru, en in LABEL_MAP.items():
51
- if low == ru.lower(): return en
52
- return name
53
 
54
  def translate_names_dict(names_dict: Dict[int, str]) -> Dict[int, str]:
55
  if not isinstance(names_dict, dict): return names_dict
@@ -59,12 +56,12 @@ def is_threat(label_en: str) -> bool:
59
  return label_en and label_en.lower() in THREAT_SET
60
 
61
  # =========================
62
- # FALSE-POSITIVE FILTERS (tunable via Space secrets)
63
  # =========================
64
  MIN_CONF = float(os.getenv("MIN_CONF", 0.60)) # drop low-confidence hits
65
- MIN_AREA_PCT = float(os.getenv("MIN_AREA_PCT", 0.004)) # drop tiny boxes (fraction of frame area)
66
  SKY_RATIO = float(os.getenv("SKY_RATIO", 0.65)) # keep boxes whose bottoms are above 65% height
67
- ALLOWED_CLASSES = {"Drone", "UAV", "Helicopter", "Airplane"}
68
 
69
  # =========================
70
  # LAZY GLOBAL STATE
@@ -74,6 +71,7 @@ _model_err = None
74
  _model_names = None
75
  _loaded_repo = None
76
  _loaded_file = None
 
77
  _ffmpeg_status = None
78
 
79
  def _lazy_cv2():
@@ -98,34 +96,45 @@ def _download_from_hf(repo_id: str, filename: str) -> str:
98
  except Exception: pass
99
  return hf_hub_download(repo_id=repo_id, filename=filename)
100
 
101
- def _get_model(conf: float, iou: float):
102
- """Load first available YOLO model from HF."""
103
  global _model, _model_err, _model_names, _loaded_repo, _loaded_file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  if _model is None and _model_err is None:
105
- from ultralytics import YOLO
106
  last_err = None
107
- for repo, file in MODEL_CANDIDATES:
 
 
 
 
 
108
  try:
109
- weights = _download_from_hf(repo, file)
110
- m = YOLO(weights)
111
- m.overrides["max_det"] = 300
112
- _model = m
113
- _loaded_repo, _loaded_file = repo, file
114
- try:
115
- _model_names = m.model.names if hasattr(m, "model") else None
116
- except Exception:
117
- _model_names = None
118
- break
119
- except Exception as e:
120
- last_err = e
121
  if _model is None:
122
- _model_err = f"Model load failed. Tried {len(MODEL_CANDIDATES)} candidate(s). Last error: {last_err}"
123
  if _model_err: raise RuntimeError(_model_err)
124
  _model.overrides["conf"] = float(conf)
125
  _model.overrides["iou"] = float(iou)
126
  return _model
127
 
128
- # -------- live footer helper --------
129
  def _model_info_text():
130
  repo = f"{_loaded_repo}/{_loaded_file}" if _loaded_repo else "not loaded"
131
  try:
@@ -160,8 +169,10 @@ def _results_to_rows(results) -> List[dict]:
160
  })
161
  return rows
162
 
163
- def _filter_rows_by_geometry(r, rows: List[dict]) -> List[dict]:
164
- """Drop low-conf, tiny, ground-region boxes; keep only classes we care about."""
 
 
165
  try:
166
  H, W = r.orig_img.shape[:2]
167
  except Exception:
@@ -171,7 +182,7 @@ def _filter_rows_by_geometry(r, rows: List[dict]) -> List[dict]:
171
  if row.get("confidence") is not None and row["confidence"] < MIN_CONF:
172
  continue
173
  cls = map_label(str(row.get("class","")))
174
- if ALLOWED_CLASSES and cls not in ALLOWED_CLASSES:
175
  continue
176
  if H and W:
177
  area = row["width"] * row["height"]
@@ -179,7 +190,7 @@ def _filter_rows_by_geometry(r, rows: List[dict]) -> List[dict]:
179
  continue
180
  y_bottom = row["y2"]
181
  horizon = H * SKY_RATIO
182
- if y_bottom > horizon:
183
  continue
184
  kept.append(row)
185
  return kept
@@ -271,21 +282,6 @@ def _save_pdf_detections(title: str, detections: List[dict], header_note: str =
271
  c.showPage(); c.save()
272
  return out_path
273
 
274
- def _normalize_rows(table_rows):
275
- """Accept DataFrame/list; return list[dict]."""
276
- try:
277
- import pandas as pd
278
- if isinstance(table_rows, pd.DataFrame):
279
- return table_rows.to_dict(orient="records")
280
- except Exception:
281
- pass
282
- if isinstance(table_rows, list) and (not table_rows or isinstance(table_rows[0], dict)):
283
- return table_rows or []
284
- if isinstance(table_rows, list) and table_rows and isinstance(table_rows[0], list):
285
- headers = ["class","confidence","x1","y1","x2","y2","width","height"]
286
- return [dict(zip(headers,row)) for row in table_rows]
287
- return []
288
-
289
  def _apply_english_overlay(r):
290
  try:
291
  if hasattr(r,"names") and isinstance(r.names, dict):
@@ -296,18 +292,18 @@ def _apply_english_overlay(r):
296
  # =========================
297
  # INFERENCE (with filtering + custom draw)
298
  # =========================
299
- def detect_image_safe(image, conf: float, iou: float):
300
  try:
301
  if image is None:
302
- return None, [], "⚠️ No image provided.", [], None
303
  cv2 = _lazy_cv2()
304
- model = _get_model(conf, iou)
305
  results = model.predict(image, imgsz=960, verbose=False)
306
  r = results[0]
307
  _apply_english_overlay(r)
308
 
309
  rows_raw = _results_to_rows(results)
310
- rows = _filter_rows_by_geometry(r, rows_raw)
311
 
312
  annotated_bgr = _draw_annotations_bgr(r.orig_img, rows)
313
  now_utc = time.strftime("%Y-%m-%d %H:%M:%S UTC", time.gmtime())
@@ -318,28 +314,30 @@ def detect_image_safe(image, conf: float, iou: float):
318
  "threat": "Threat" if is_threat(map_label(row["class"])) else "Non-threat",
319
  } for row in rows]
320
 
321
- counts = _count_by_class(rows)
322
- summary = "Detections: " + (", ".join(f"{k}: {v}" for k,v in counts.items()) if rows else "none")
 
 
323
 
324
  tmp_img = os.path.join(tempfile.gettempdir(), f"annotated_{int(time.time())}.jpg")
325
  try: cv2.imwrite(tmp_img, annotated_bgr)
326
  except Exception: tmp_img = None
327
 
328
- annotated_rgb = annotated_bgr[:, :, ::-1] # BGR→RGB for Gradio
329
- return annotated_rgb, rows, summary, det_records, tmp_img
330
  except Exception as e:
331
- return None, [], f"❌ Error during image detection: {e}", [], None
332
 
333
- def detect_video_safe(video_path: str, conf: float, iou: float, max_frames: int = 300):
334
  try:
335
  if not video_path:
336
- return None, "{}", "⚠️ No video provided.", []
337
  cv2 = _lazy_cv2()
338
- model = _get_model(conf, iou)
339
 
340
  cap = cv2.VideoCapture(video_path)
341
  if not cap.isOpened():
342
- return None, "{}", "❌ Failed to open video.", []
343
 
344
  fps = cap.get(cv2.CAP_PROP_FPS) or 24.0
345
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) or 1280)
@@ -348,7 +346,7 @@ def detect_video_safe(video_path: str, conf: float, iou: float, max_frames: int
348
  out_path = os.path.join(tempfile.gettempdir(), f"annotated_{int(time.time())}.mp4")
349
  writer = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
350
  if not writer or (hasattr(writer,"isOpened") and not writer.isOpened()):
351
- return None, "{}", "❌ Video writer could not open. Try another format/resolution.", []
352
 
353
  det_records: List[dict] = []
354
  frames = 0
@@ -364,7 +362,7 @@ def detect_video_safe(video_path: str, conf: float, iou: float, max_frames: int
364
  _apply_english_overlay(r)
365
 
366
  rows_raw = _results_to_rows(results)
367
- rows = _filter_rows_by_geometry(r, rows_raw)
368
 
369
  t_sec = frames / float(fps if fps > 0 else 24.0)
370
  for row in rows:
@@ -387,9 +385,9 @@ def detect_video_safe(video_path: str, conf: float, iou: float, max_frames: int
387
  counts[d["object"]] = counts.get(d["object"], 0) + 1
388
  summary = "Detections (tally): " + (", ".join(f"{k}: {v}" for k,v in sorted(counts.items())) if counts else "none")
389
  detections_json = json.dumps(det_records[:200], ensure_ascii=False, indent=2)
390
- return out_path, detections_json, summary, det_records
391
  except Exception as e:
392
- return None, "{}", f"❌ Error during video detection: {e}", []
393
 
394
  # ---------- PDF export ----------
395
  def export_pdf_img(det_records: List[dict], summary: str, annotated_tmp_jpg: Optional[str]):
@@ -414,13 +412,18 @@ def export_pdf_vid(det_records: List[dict], summary: str):
414
  # =========================
415
  NOTE = (
416
  "Detections include timestamp, object, confidence, and Threat/Non-threat. "
417
- "Anti-noise filters active: min conf, min box area, sky-only region."
418
  )
419
 
420
  with gr.Blocks(title="UAV / Drone Detector (YOLO)") as demo:
421
  gr.Markdown("# UAV / Drone Detection (Pretrained YOLO)")
422
  gr.Markdown("Embedded samples (optional): `samples/uav_image.jpg`, `samples/uav_video.mp4`.")
423
- model_info_md = gr.Markdown(value=_model_info_text())
 
 
 
 
 
424
 
425
  with gr.Tabs():
426
  # IMAGE
@@ -445,13 +448,12 @@ with gr.Blocks(title="UAV / Drone Detector (YOLO)") as demo:
445
  annotated_tmp_img_path = gr.State(value=None)
446
  image_det_state = gr.State(value=[])
447
 
448
- def _run_img(image, conf, iou):
449
- result = detect_image_safe(image, conf, iou)
450
- return (*result, _model_info_text())
451
 
452
  run_img.click(
453
  fn=_run_img,
454
- inputs=[image_in, conf_img, iou_img],
455
  outputs=[image_out, table_out, msg_img, image_det_state, annotated_tmp_img_path, model_info_md],
456
  )
457
 
@@ -482,13 +484,12 @@ with gr.Blocks(title="UAV / Drone Detector (YOLO)") as demo:
482
  pdf_vid_path = gr.File(label="PDF Report", interactive=False)
483
  video_det_state = gr.State(value=[])
484
 
485
- def _run_vid(vpath, conf, iou, maxf):
486
- result = detect_video_safe(vpath, conf, iou, int(maxf))
487
- return (*result, _model_info_text())
488
 
489
  run_vid.click(
490
  fn=_run_vid,
491
- inputs=[video_in, conf_vid, iou_vid, max_frames],
492
  outputs=[video_out, detections_json_text, msg_vid, video_det_state, model_info_md],
493
  )
494
 
 
1
  import os
2
  import time
3
  import tempfile
4
+ from typing import List, Dict, Optional, Tuple
5
  import json
6
  import gradio as gr
7
 
 
12
  EMBED_IMG = os.path.join(SAMPLES_DIR, "uav_image.jpg")
13
  EMBED_VID = os.path.join(SAMPLES_DIR, "uav_video.mp4")
14
 
15
+ HF_TOKEN = os.getenv("HF_TOKEN", "").strip() # optional (for private/gated repos)
 
 
 
16
 
17
+ # Selectable models (public)
18
+ MODEL_CHOICES: Dict[str, Tuple[str, str]] = {
19
+ "Multi-class (Drone/Helicopter/Airplane/Bird)":
20
+ ("Javvanny/yolov8m_flying_objects_detection", "yolov8m/weights/best.pt"),
21
+ "Drone-only (cleaner, fewer false positives)":
22
+ ("keremberke/yolov8m-drone-detection", "best.pt"),
23
+ }
 
24
 
25
  # =========================
26
  # LABELS & THREAT RULES
27
  # =========================
28
  LABEL_MAP = {
29
+ "Airplane": "Airplane",
30
+ "Bird": "Bird",
31
+ "Drone": "Drone",
32
+ "Helicopter": "Helicopter",
33
+ "UAV": "UAV",
34
  "БПЛА": "UAV",
35
  "БПЛА коптер": "Drone",
36
  "квадрокоптер": "Drone",
37
  "квадроcамолет": "Drone",
38
  "самолет": "Airplane",
39
  "вертолет": "Helicopter",
 
 
 
40
  "автомобиль": "Car",
41
+ "машина": "Car",
42
+ "БПЛА самелет": "UAV Airplane",
43
+ "drone": "Drone", # some checkpoints use lowercase
44
  }
45
  THREAT_SET = {"drone", "uav", "airplane", "helicopter"}
46
 
47
  def map_label(name: str) -> str:
48
  if not isinstance(name, str): return name
49
+ return LABEL_MAP.get(name, LABEL_MAP.get(name.lower(), name))
 
 
 
 
50
 
51
  def translate_names_dict(names_dict: Dict[int, str]) -> Dict[int, str]:
52
  if not isinstance(names_dict, dict): return names_dict
 
56
  return label_en and label_en.lower() in THREAT_SET
57
 
58
  # =========================
59
+ # FALSE-POSITIVE FILTERS (tunable via Space Secrets)
60
  # =========================
61
  MIN_CONF = float(os.getenv("MIN_CONF", 0.60)) # drop low-confidence hits
62
+ MIN_AREA_PCT = float(os.getenv("MIN_AREA_PCT", 0.004)) # drop tiny boxes (fraction of frame)
63
  SKY_RATIO = float(os.getenv("SKY_RATIO", 0.65)) # keep boxes whose bottoms are above 65% height
64
+ ALLOWED_CLASSES = {"Drone", "UAV", "Helicopter", "Airplane"} # ignored for drone-only automatically
65
 
66
  # =========================
67
  # LAZY GLOBAL STATE
 
71
  _model_names = None
72
  _loaded_repo = None
73
  _loaded_file = None
74
+ _loaded_key = None # which dropdown choice loaded
75
  _ffmpeg_status = None
76
 
77
  def _lazy_cv2():
 
96
  except Exception: pass
97
  return hf_hub_download(repo_id=repo_id, filename=filename)
98
 
99
+ def _reset_model_cache():
 
100
  global _model, _model_err, _model_names, _loaded_repo, _loaded_file
101
+ _model = None
102
+ _model_err = None
103
+ _model_names = None
104
+ _loaded_repo = None
105
+ _loaded_file = None
106
+
107
+ def _get_model(model_key: str, conf: float, iou: float):
108
+ """Load the YOLO model selected in the dropdown."""
109
+ from ultralytics import YOLO
110
+ global _model, _model_err, _model_names, _loaded_repo, _loaded_file, _loaded_key
111
+ if _loaded_key != model_key:
112
+ _reset_model_cache()
113
+ _loaded_key = model_key
114
+
115
  if _model is None and _model_err is None:
116
+ repo, file = MODEL_CHOICES[model_key]
117
  last_err = None
118
+ try:
119
+ weights = _download_from_hf(repo, file)
120
+ m = YOLO(weights)
121
+ m.overrides["max_det"] = 300
122
+ _model = m
123
+ _loaded_repo, _loaded_file = repo, file
124
  try:
125
+ _model_names = m.model.names if hasattr(m, "model") else None
126
+ except Exception:
127
+ _model_names = None
128
+ except Exception as e:
129
+ last_err = e
130
+ _model = None
 
 
 
 
 
 
131
  if _model is None:
132
+ _model_err = f"Model load failed for {repo}/{file}. Error: {last_err}"
133
  if _model_err: raise RuntimeError(_model_err)
134
  _model.overrides["conf"] = float(conf)
135
  _model.overrides["iou"] = float(iou)
136
  return _model
137
 
 
138
  def _model_info_text():
139
  repo = f"{_loaded_repo}/{_loaded_file}" if _loaded_repo else "not loaded"
140
  try:
 
169
  })
170
  return rows
171
 
172
+ def _filter_rows_by_geometry(r, rows: List[dict], model_key: str) -> List[dict]:
173
+ """Drop low-conf, tiny, ground-region boxes; for drone-only, allow Drone/UAV only."""
174
+ # Restrict classes differently if drone-only
175
+ allowed = ALLOWED_CLASSES if "Multi-class" in model_key else {"Drone", "UAV"}
176
  try:
177
  H, W = r.orig_img.shape[:2]
178
  except Exception:
 
182
  if row.get("confidence") is not None and row["confidence"] < MIN_CONF:
183
  continue
184
  cls = map_label(str(row.get("class","")))
185
+ if allowed and cls not in allowed:
186
  continue
187
  if H and W:
188
  area = row["width"] * row["height"]
 
190
  continue
191
  y_bottom = row["y2"]
192
  horizon = H * SKY_RATIO
193
+ if y_bottom > horizon: # below sky line → likely ground/grass noise
194
  continue
195
  kept.append(row)
196
  return kept
 
282
  c.showPage(); c.save()
283
  return out_path
284
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
  def _apply_english_overlay(r):
286
  try:
287
  if hasattr(r,"names") and isinstance(r.names, dict):
 
292
  # =========================
293
  # INFERENCE (with filtering + custom draw)
294
  # =========================
295
+ def detect_image_safe(model_key: str, image, conf: float, iou: float):
296
  try:
297
  if image is None:
298
+ return None, [], "⚠️ No image provided.", [], None, _model_info_text()
299
  cv2 = _lazy_cv2()
300
+ model = _get_model(model_key, conf, iou)
301
  results = model.predict(image, imgsz=960, verbose=False)
302
  r = results[0]
303
  _apply_english_overlay(r)
304
 
305
  rows_raw = _results_to_rows(results)
306
+ rows = _filter_rows_by_geometry(r, rows_raw, model_key)
307
 
308
  annotated_bgr = _draw_annotations_bgr(r.orig_img, rows)
309
  now_utc = time.strftime("%Y-%m-%d %H:%M:%S UTC", time.gmtime())
 
314
  "threat": "Threat" if is_threat(map_label(row["class"])) else "Non-threat",
315
  } for row in rows]
316
 
317
+ counts = {}
318
+ for d in det_records:
319
+ counts[d["object"]] = counts.get(d["object"], 0) + 1
320
+ summary = "Detections: " + (", ".join(f"{k}: {v}" for k,v in counts.items()) if counts else "none")
321
 
322
  tmp_img = os.path.join(tempfile.gettempdir(), f"annotated_{int(time.time())}.jpg")
323
  try: cv2.imwrite(tmp_img, annotated_bgr)
324
  except Exception: tmp_img = None
325
 
326
+ annotated_rgb = annotated_bgr[:, :, ::-1]
327
+ return annotated_rgb, rows, summary, det_records, tmp_img, _model_info_text()
328
  except Exception as e:
329
+ return None, [], f"❌ Error during image detection: {e}", [], None, _model_info_text()
330
 
331
+ def detect_video_safe(model_key: str, video_path: str, conf: float, iou: float, max_frames: int = 300):
332
  try:
333
  if not video_path:
334
+ return None, "{}", "⚠️ No video provided.", [], _model_info_text()
335
  cv2 = _lazy_cv2()
336
+ model = _get_model(model_key, conf, iou)
337
 
338
  cap = cv2.VideoCapture(video_path)
339
  if not cap.isOpened():
340
+ return None, "{}", "❌ Failed to open video.", [], _model_info_text()
341
 
342
  fps = cap.get(cv2.CAP_PROP_FPS) or 24.0
343
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) or 1280)
 
346
  out_path = os.path.join(tempfile.gettempdir(), f"annotated_{int(time.time())}.mp4")
347
  writer = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
348
  if not writer or (hasattr(writer,"isOpened") and not writer.isOpened()):
349
+ return None, "{}", "❌ Video writer could not open. Try another format/resolution.", [], _model_info_text()
350
 
351
  det_records: List[dict] = []
352
  frames = 0
 
362
  _apply_english_overlay(r)
363
 
364
  rows_raw = _results_to_rows(results)
365
+ rows = _filter_rows_by_geometry(r, rows_raw, model_key)
366
 
367
  t_sec = frames / float(fps if fps > 0 else 24.0)
368
  for row in rows:
 
385
  counts[d["object"]] = counts.get(d["object"], 0) + 1
386
  summary = "Detections (tally): " + (", ".join(f"{k}: {v}" for k,v in sorted(counts.items())) if counts else "none")
387
  detections_json = json.dumps(det_records[:200], ensure_ascii=False, indent=2)
388
+ return out_path, detections_json, summary, det_records, _model_info_text()
389
  except Exception as e:
390
+ return None, "{}", f"❌ Error during video detection: {e}", [], _model_info_text()
391
 
392
  # ---------- PDF export ----------
393
  def export_pdf_img(det_records: List[dict], summary: str, annotated_tmp_jpg: Optional[str]):
 
412
  # =========================
413
  NOTE = (
414
  "Detections include timestamp, object, confidence, and Threat/Non-threat. "
415
+ "Anti-noise filters: min conf, min box area, sky-only region. Choose model below."
416
  )
417
 
418
  with gr.Blocks(title="UAV / Drone Detector (YOLO)") as demo:
419
  gr.Markdown("# UAV / Drone Detection (Pretrained YOLO)")
420
  gr.Markdown("Embedded samples (optional): `samples/uav_image.jpg`, `samples/uav_video.mp4`.")
421
+
422
+ with gr.Row():
423
+ model_key = gr.Dropdown(choices=list(MODEL_CHOICES.keys()),
424
+ value=list(MODEL_CHOICES.keys())[0],
425
+ label="Model")
426
+ model_info_md = gr.Markdown(value=_model_info_text())
427
 
428
  with gr.Tabs():
429
  # IMAGE
 
448
  annotated_tmp_img_path = gr.State(value=None)
449
  image_det_state = gr.State(value=[])
450
 
451
+ def _run_img(mkey, image, conf, iou):
452
+ return detect_image_safe(mkey, image, conf, iou)
 
453
 
454
  run_img.click(
455
  fn=_run_img,
456
+ inputs=[model_key, image_in, conf_img, iou_img],
457
  outputs=[image_out, table_out, msg_img, image_det_state, annotated_tmp_img_path, model_info_md],
458
  )
459
 
 
484
  pdf_vid_path = gr.File(label="PDF Report", interactive=False)
485
  video_det_state = gr.State(value=[])
486
 
487
+ def _run_vid(mkey, vpath, conf, iou, maxf):
488
+ return detect_video_safe(mkey, vpath, conf, iou, int(maxf))
 
489
 
490
  run_vid.click(
491
  fn=_run_vid,
492
+ inputs=[model_key, video_in, conf_vid, iou_vid, max_frames],
493
  outputs=[video_out, detections_json_text, msg_vid, video_det_state, model_info_md],
494
  )
495