| import argparse |
| from pathlib import Path |
| import subprocess |
| from dataclasses import dataclass |
|
|
| import cv2 |
| import numpy as np |
| import pandas as pd |
| from tqdm import tqdm |
| from ultralytics import YOLO |
| from filterpy.kalman import KalmanFilter |
|
|
|
|
| def run_cmd(cmd: list[str]) -> None: |
| p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) |
| if p.returncode != 0: |
| raise RuntimeError(f"Command failed:\n{' '.join(cmd)}\n\nSTDERR:\n{p.stderr}") |
|
|
|
|
| def extract_frames_ffmpeg(video_path: Path, out_dir: Path, fps: float) -> None: |
| out_dir.mkdir(parents=True, exist_ok=True) |
| cmd = [ |
| "ffmpeg", "-y", |
| "-i", str(video_path), |
| "-vf", f"fps={fps}", |
| str(out_dir / "frame_%04d.jpg") |
| ] |
| run_cmd(cmd) |
|
|
|
|
| def list_frames(frame_dir: Path) -> list[Path]: |
| return sorted(frame_dir.glob("frame_*.jpg")) |
|
|
|
|
| def xyxy_to_center(xyxy: np.ndarray) -> tuple[float, float]: |
| x1, y1, x2, y2 = xyxy |
| return float((x1 + x2) / 2.0), float((y1 + y2) / 2.0) |
|
|
|
|
| def clip_box(x1, y1, x2, y2, w, h): |
| x1 = max(0, min(int(x1), w - 1)) |
| y1 = max(0, min(int(y1), h - 1)) |
| x2 = max(0, min(int(x2), w - 1)) |
| y2 = max(0, min(int(y2), h - 1)) |
| if x2 <= x1: x2 = min(w - 1, x1 + 1) |
| if y2 <= y1: y2 = min(h - 1, y1 + 1) |
| return x1, y1, x2, y2 |
|
|
|
|
| @dataclass |
| class TrackConfig: |
| dt: float = 1.0 |
| process_var: float = 20.0 |
| meas_var: float = 50.0 |
| max_missed: int = 8 |
|
|
|
|
| class SingleKalmanTrack: |
| """ |
| Single-object Kalman track. |
| State: [x, y, vx, vy] |
| Measurement: [x, y] |
| """ |
| def __init__(self, initial_xy: tuple[float, float], initial_wh: tuple[float, float], cfg: TrackConfig): |
| self.cfg = cfg |
| self.kf = KalmanFilter(dim_x=4, dim_z=2) |
|
|
| dt = cfg.dt |
| self.kf.F = np.array([ |
| [1, 0, dt, 0], |
| [0, 1, 0, dt], |
| [0, 0, 1, 0 ], |
| [0, 0, 0, 1 ], |
| ], dtype=float) |
|
|
| self.kf.H = np.array([ |
| [1, 0, 0, 0], |
| [0, 1, 0, 0], |
| ], dtype=float) |
|
|
| x0, y0 = initial_xy |
| self.kf.x = np.array([x0, y0, 0.0, 0.0], dtype=float) |
|
|
| self.kf.P *= 500.0 |
| self.kf.R = np.eye(2) * cfg.meas_var |
| self.kf.Q = np.eye(4) * cfg.process_var |
|
|
| self.last_w, self.last_h = initial_wh |
| self.missed = 0 |
| self.trajectory = [(x0, y0)] |
|
|
| def predict(self): |
| self.kf.predict() |
| return float(self.kf.x[0]), float(self.kf.x[1]) |
|
|
| def update(self, meas_xy: tuple[float, float] | None, meas_wh: tuple[float, float] | None): |
| if meas_xy is None: |
| self.missed += 1 |
| self.trajectory.append((float(self.kf.x[0]), float(self.kf.x[1]))) |
| return False |
|
|
| z = np.array([[meas_xy[0]], [meas_xy[1]]], dtype=float) |
| self.kf.update(z) |
| self.missed = 0 |
|
|
| if meas_wh is not None: |
| w, h = meas_wh |
| self.last_w = 0.8 * self.last_w + 0.2 * w |
| self.last_h = 0.8 * self.last_h + 0.2 * h |
|
|
| self.trajectory.append((float(self.kf.x[0]), float(self.kf.x[1]))) |
| return True |
|
|
| def alive(self) -> bool: |
| return self.missed <= self.cfg.max_missed |
|
|
| def current_box_xyxy(self): |
| x, y = float(self.kf.x[0]), float(self.kf.x[1]) |
| w, h = float(self.last_w), float(self.last_h) |
| return (x - w/2, y - h/2, x + w/2, y + h/2) |
|
|
|
|
| def pick_best_detection(result, conf_thres: float): |
| if result.boxes is None or len(result.boxes) == 0: |
| return None |
| boxes = result.boxes |
| xyxy = boxes.xyxy.cpu().numpy() |
| conf = boxes.conf.cpu().numpy() |
| keep = conf >= conf_thres |
| if not np.any(keep): |
| return None |
| xyxy = xyxy[keep] |
| conf = conf[keep] |
| best_i = int(np.argmax(conf)) |
| return xyxy[best_i], float(conf[best_i]) |
|
|
|
|
| def draw_overlay(img, xyxy, traj_points): |
| h, w = img.shape[:2] |
| x1, y1, x2, y2 = xyxy |
| x1, y1, x2, y2 = clip_box(x1, y1, x2, y2, w, h) |
|
|
| cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2) |
|
|
| if len(traj_points) >= 2: |
| pts = np.array([[int(x), int(y)] for (x, y) in traj_points], dtype=np.int32) |
| cv2.polylines(img, [pts], isClosed=False, color=(255, 0, 0), thickness=2) |
|
|
| return img |
|
|
|
|
| def process_video(video_path: Path, model: YOLO, out_root: Path, fps: float, conf_thres: float, track_cfg: TrackConfig): |
| video_name = video_path.stem |
|
|
| frames_dir = out_root / "frames" / video_name |
| det_frames_dir = out_root / "detections" / video_name |
| tracks_dir = out_root / "tracks" / video_name |
|
|
| det_frames_dir.mkdir(parents=True, exist_ok=True) |
| tracks_dir.mkdir(parents=True, exist_ok=True) |
|
|
| extract_frames_ffmpeg(video_path, frames_dir, fps) |
| frames = list_frames(frames_dir) |
| if not frames: |
| print(f"[WARN] No frames extracted for {video_path}") |
| return |
|
|
| det_rows = [] |
| output_frames = [] |
| tracker = None |
|
|
| for frame_path in tqdm(frames, desc=f"Processing {video_name}"): |
| img = cv2.imread(str(frame_path)) |
| if img is None: |
| continue |
|
|
| results = model.predict(source=img, verbose=False) |
| r = results[0] |
|
|
| best = pick_best_detection(r, conf_thres=conf_thres) |
| meas_xy = None |
| meas_wh = None |
| meas_xyxy = None |
| det_conf = None |
|
|
| if best is not None: |
| meas_xyxy, det_conf = best |
| cx, cy = xyxy_to_center(meas_xyxy) |
| meas_xy = (cx, cy) |
| w_box = float(meas_xyxy[2] - meas_xyxy[0]) |
| h_box = float(meas_xyxy[3] - meas_xyxy[1]) |
| meas_wh = (w_box, h_box) |
|
|
| out_det_frame = det_frames_dir / frame_path.name |
| cv2.imwrite(str(out_det_frame), img) |
|
|
| det_rows.append({ |
| "video": video_name, |
| "frame_file": frame_path.name, |
| "conf": det_conf, |
| "x1": float(meas_xyxy[0]), |
| "y1": float(meas_xyxy[1]), |
| "x2": float(meas_xyxy[2]), |
| "y2": float(meas_xyxy[3]), |
| "cx": float(cx), |
| "cy": float(cy), |
| }) |
|
|
| if tracker is None: |
| if meas_xy is not None and meas_wh is not None: |
| tracker = SingleKalmanTrack(meas_xy, meas_wh, track_cfg) |
| tracker.predict() |
| tracker.update(meas_xy, meas_wh) |
| else: |
| continue |
| else: |
| tracker.predict() |
| tracker.update(meas_xy, meas_wh) |
|
|
| if tracker is not None and tracker.alive(): |
| if meas_xyxy is not None: |
| draw_xyxy = meas_xyxy |
| else: |
| draw_xyxy = np.array(tracker.current_box_xyxy(), dtype=float) |
|
|
| overlay = img.copy() |
| overlay = draw_overlay(overlay, draw_xyxy, tracker.trajectory) |
|
|
| out_annot = tracks_dir / frame_path.name |
| cv2.imwrite(str(out_annot), overlay) |
| output_frames.append(overlay) |
|
|
| det_df = pd.DataFrame(det_rows) |
| parquet_path = out_root / "detections" / f"{video_name}_detections.parquet" |
| det_df.to_parquet(parquet_path, index=False) |
|
|
| if output_frames: |
| h, w = output_frames[0].shape[:2] |
| out_video_path = out_root / "outputs" / f"{video_name}_tracked.mp4" |
| out_video_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
| fourcc = cv2.VideoWriter_fourcc(*"mp4v") |
| writer = cv2.VideoWriter(str(out_video_path), fourcc, fps, (w, h)) |
| for f in output_frames: |
| writer.write(f) |
| writer.release() |
|
|
| print(f"[OK] Wrote {out_video_path}") |
| print(f"[OK] Detections parquet: {parquet_path}") |
| print(f"[OK] Detection frames folder: {det_frames_dir}") |
| else: |
| print(f"[WARN] No output frames for {video_name} (tracker never initialized?)") |
|
|
|
|
| def main(): |
| ap = argparse.ArgumentParser() |
| ap.add_argument("--videos_dir", type=str, default="videos") |
| ap.add_argument("--model", type=str, required=True) |
| ap.add_argument("--out_dir", type=str, default="artifacts") |
| ap.add_argument("--fps", type=float, default=5.0) |
| ap.add_argument("--conf", type=float, default=0.25) |
| ap.add_argument("--process_var", type=float, default=20.0) |
| ap.add_argument("--meas_var", type=float, default=50.0) |
| ap.add_argument("--max_missed", type=int, default=8) |
| args = ap.parse_args() |
|
|
| videos_dir = Path(args.videos_dir) |
| out_root = Path(args.out_dir) |
| out_root.mkdir(parents=True, exist_ok=True) |
|
|
| model = YOLO(args.model) |
| cfg = TrackConfig(dt=1.0, process_var=args.process_var, meas_var=args.meas_var, max_missed=args.max_missed) |
|
|
| mp4s = sorted(videos_dir.glob("*.mp4")) |
| if not mp4s: |
| raise FileNotFoundError(f"No .mp4 files found in {videos_dir}") |
|
|
| for vp in mp4s: |
| process_video(vp, model, out_root, args.fps, args.conf, cfg) |
|
|
|
|
| if __name__ == "__main__": |
| main() |