TejaAlapati commited on
Commit
fae31c3
·
verified ·
1 Parent(s): bd1fad7

Upload 5 files

Browse files
drone_video_1_detections.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eca0b9f7c5d991e96c18366aa9365a2c002280a43de82356fa512af179117dc2
3
+ size 7986
drone_video_1_tracked.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b566f8191923737ec3fc4f65ae6983b643a51ea98e4f5eef98bd4d3131e1ee1c
3
+ size 1877959
drone_video_2_detections.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2fd0f4d9360c532e6774fdad93849e6bd498b8250253564bcd1b31772e0a3f2
3
+ size 16534
drone_video_2_tracked.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d23651954cc4d80b18abd3e0a3000dc65402bd05a08021b443f25fdb5cbfeba3
3
+ size 5690498
main.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from pathlib import Path
3
+ import subprocess
4
+ from dataclasses import dataclass
5
+
6
+ import cv2
7
+ import numpy as np
8
+ import pandas as pd
9
+ from tqdm import tqdm
10
+ from ultralytics import YOLO
11
+ from filterpy.kalman import KalmanFilter
12
+
13
+
14
+ def run_cmd(cmd: list[str]) -> None:
15
+ p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
16
+ if p.returncode != 0:
17
+ raise RuntimeError(f"Command failed:\n{' '.join(cmd)}\n\nSTDERR:\n{p.stderr}")
18
+
19
+
20
+ def extract_frames_ffmpeg(video_path: Path, out_dir: Path, fps: float) -> None:
21
+ out_dir.mkdir(parents=True, exist_ok=True)
22
+ cmd = [
23
+ "ffmpeg", "-y",
24
+ "-i", str(video_path),
25
+ "-vf", f"fps={fps}",
26
+ str(out_dir / "frame_%04d.jpg")
27
+ ]
28
+ run_cmd(cmd)
29
+
30
+
31
+ def list_frames(frame_dir: Path) -> list[Path]:
32
+ return sorted(frame_dir.glob("frame_*.jpg"))
33
+
34
+
35
+ def xyxy_to_center(xyxy: np.ndarray) -> tuple[float, float]:
36
+ x1, y1, x2, y2 = xyxy
37
+ return float((x1 + x2) / 2.0), float((y1 + y2) / 2.0)
38
+
39
+
40
+ def clip_box(x1, y1, x2, y2, w, h):
41
+ x1 = max(0, min(int(x1), w - 1))
42
+ y1 = max(0, min(int(y1), h - 1))
43
+ x2 = max(0, min(int(x2), w - 1))
44
+ y2 = max(0, min(int(y2), h - 1))
45
+ if x2 <= x1: x2 = min(w - 1, x1 + 1)
46
+ if y2 <= y1: y2 = min(h - 1, y1 + 1)
47
+ return x1, y1, x2, y2
48
+
49
+
50
+ @dataclass
51
+ class TrackConfig:
52
+ dt: float = 1.0
53
+ process_var: float = 20.0
54
+ meas_var: float = 50.0
55
+ max_missed: int = 8
56
+
57
+
58
+ class SingleKalmanTrack:
59
+ """
60
+ Single-object Kalman track.
61
+ State: [x, y, vx, vy]
62
+ Measurement: [x, y]
63
+ """
64
+ def __init__(self, initial_xy: tuple[float, float], initial_wh: tuple[float, float], cfg: TrackConfig):
65
+ self.cfg = cfg
66
+ self.kf = KalmanFilter(dim_x=4, dim_z=2)
67
+
68
+ dt = cfg.dt
69
+ self.kf.F = np.array([
70
+ [1, 0, dt, 0],
71
+ [0, 1, 0, dt],
72
+ [0, 0, 1, 0 ],
73
+ [0, 0, 0, 1 ],
74
+ ], dtype=float)
75
+
76
+ self.kf.H = np.array([
77
+ [1, 0, 0, 0],
78
+ [0, 1, 0, 0],
79
+ ], dtype=float)
80
+
81
+ x0, y0 = initial_xy
82
+ self.kf.x = np.array([x0, y0, 0.0, 0.0], dtype=float)
83
+
84
+ self.kf.P *= 500.0
85
+ self.kf.R = np.eye(2) * cfg.meas_var
86
+ self.kf.Q = np.eye(4) * cfg.process_var
87
+
88
+ self.last_w, self.last_h = initial_wh
89
+ self.missed = 0
90
+ self.trajectory = [(x0, y0)]
91
+
92
+ def predict(self):
93
+ self.kf.predict()
94
+ return float(self.kf.x[0]), float(self.kf.x[1])
95
+
96
+ def update(self, meas_xy: tuple[float, float] | None, meas_wh: tuple[float, float] | None):
97
+ if meas_xy is None:
98
+ self.missed += 1
99
+ self.trajectory.append((float(self.kf.x[0]), float(self.kf.x[1])))
100
+ return False
101
+
102
+ z = np.array([[meas_xy[0]], [meas_xy[1]]], dtype=float)
103
+ self.kf.update(z)
104
+ self.missed = 0
105
+
106
+ if meas_wh is not None:
107
+ w, h = meas_wh
108
+ self.last_w = 0.8 * self.last_w + 0.2 * w
109
+ self.last_h = 0.8 * self.last_h + 0.2 * h
110
+
111
+ self.trajectory.append((float(self.kf.x[0]), float(self.kf.x[1])))
112
+ return True
113
+
114
+ def alive(self) -> bool:
115
+ return self.missed <= self.cfg.max_missed
116
+
117
+ def current_box_xyxy(self):
118
+ x, y = float(self.kf.x[0]), float(self.kf.x[1])
119
+ w, h = float(self.last_w), float(self.last_h)
120
+ return (x - w/2, y - h/2, x + w/2, y + h/2)
121
+
122
+
123
+ def pick_best_detection(result, conf_thres: float):
124
+ if result.boxes is None or len(result.boxes) == 0:
125
+ return None
126
+ boxes = result.boxes
127
+ xyxy = boxes.xyxy.cpu().numpy()
128
+ conf = boxes.conf.cpu().numpy()
129
+ keep = conf >= conf_thres
130
+ if not np.any(keep):
131
+ return None
132
+ xyxy = xyxy[keep]
133
+ conf = conf[keep]
134
+ best_i = int(np.argmax(conf))
135
+ return xyxy[best_i], float(conf[best_i])
136
+
137
+
138
+ def draw_overlay(img, xyxy, traj_points):
139
+ h, w = img.shape[:2]
140
+ x1, y1, x2, y2 = xyxy
141
+ x1, y1, x2, y2 = clip_box(x1, y1, x2, y2, w, h)
142
+
143
+ cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
144
+
145
+ if len(traj_points) >= 2:
146
+ pts = np.array([[int(x), int(y)] for (x, y) in traj_points], dtype=np.int32)
147
+ cv2.polylines(img, [pts], isClosed=False, color=(255, 0, 0), thickness=2)
148
+
149
+ return img
150
+
151
+
152
+ def process_video(video_path: Path, model: YOLO, out_root: Path, fps: float, conf_thres: float, track_cfg: TrackConfig):
153
+ video_name = video_path.stem
154
+
155
+ frames_dir = out_root / "frames" / video_name
156
+ det_frames_dir = out_root / "detections" / video_name
157
+ tracks_dir = out_root / "tracks" / video_name
158
+
159
+ det_frames_dir.mkdir(parents=True, exist_ok=True)
160
+ tracks_dir.mkdir(parents=True, exist_ok=True)
161
+
162
+ extract_frames_ffmpeg(video_path, frames_dir, fps)
163
+ frames = list_frames(frames_dir)
164
+ if not frames:
165
+ print(f"[WARN] No frames extracted for {video_path}")
166
+ return
167
+
168
+ det_rows = []
169
+ output_frames = []
170
+ tracker = None
171
+
172
+ for frame_path in tqdm(frames, desc=f"Processing {video_name}"):
173
+ img = cv2.imread(str(frame_path))
174
+ if img is None:
175
+ continue
176
+
177
+ results = model.predict(source=img, verbose=False)
178
+ r = results[0]
179
+
180
+ best = pick_best_detection(r, conf_thres=conf_thres)
181
+ meas_xy = None
182
+ meas_wh = None
183
+ meas_xyxy = None
184
+ det_conf = None
185
+
186
+ if best is not None:
187
+ meas_xyxy, det_conf = best
188
+ cx, cy = xyxy_to_center(meas_xyxy)
189
+ meas_xy = (cx, cy)
190
+ w_box = float(meas_xyxy[2] - meas_xyxy[0])
191
+ h_box = float(meas_xyxy[3] - meas_xyxy[1])
192
+ meas_wh = (w_box, h_box)
193
+
194
+ out_det_frame = det_frames_dir / frame_path.name
195
+ cv2.imwrite(str(out_det_frame), img)
196
+
197
+ det_rows.append({
198
+ "video": video_name,
199
+ "frame_file": frame_path.name,
200
+ "conf": det_conf,
201
+ "x1": float(meas_xyxy[0]),
202
+ "y1": float(meas_xyxy[1]),
203
+ "x2": float(meas_xyxy[2]),
204
+ "y2": float(meas_xyxy[3]),
205
+ "cx": float(cx),
206
+ "cy": float(cy),
207
+ })
208
+
209
+ if tracker is None:
210
+ if meas_xy is not None and meas_wh is not None:
211
+ tracker = SingleKalmanTrack(meas_xy, meas_wh, track_cfg)
212
+ tracker.predict()
213
+ tracker.update(meas_xy, meas_wh)
214
+ else:
215
+ continue
216
+ else:
217
+ tracker.predict()
218
+ tracker.update(meas_xy, meas_wh)
219
+
220
+ if tracker is not None and tracker.alive():
221
+ if meas_xyxy is not None:
222
+ draw_xyxy = meas_xyxy
223
+ else:
224
+ draw_xyxy = np.array(tracker.current_box_xyxy(), dtype=float)
225
+
226
+ overlay = img.copy()
227
+ overlay = draw_overlay(overlay, draw_xyxy, tracker.trajectory)
228
+
229
+ out_annot = tracks_dir / frame_path.name
230
+ cv2.imwrite(str(out_annot), overlay)
231
+ output_frames.append(overlay)
232
+
233
+ det_df = pd.DataFrame(det_rows)
234
+ parquet_path = out_root / "detections" / f"{video_name}_detections.parquet"
235
+ det_df.to_parquet(parquet_path, index=False)
236
+
237
+ if output_frames:
238
+ h, w = output_frames[0].shape[:2]
239
+ out_video_path = out_root / "outputs" / f"{video_name}_tracked.mp4"
240
+ out_video_path.parent.mkdir(parents=True, exist_ok=True)
241
+
242
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
243
+ writer = cv2.VideoWriter(str(out_video_path), fourcc, fps, (w, h))
244
+ for f in output_frames:
245
+ writer.write(f)
246
+ writer.release()
247
+
248
+ print(f"[OK] Wrote {out_video_path}")
249
+ print(f"[OK] Detections parquet: {parquet_path}")
250
+ print(f"[OK] Detection frames folder: {det_frames_dir}")
251
+ else:
252
+ print(f"[WARN] No output frames for {video_name} (tracker never initialized?)")
253
+
254
+
255
+ def main():
256
+ ap = argparse.ArgumentParser()
257
+ ap.add_argument("--videos_dir", type=str, default="videos")
258
+ ap.add_argument("--model", type=str, required=True)
259
+ ap.add_argument("--out_dir", type=str, default="artifacts")
260
+ ap.add_argument("--fps", type=float, default=5.0)
261
+ ap.add_argument("--conf", type=float, default=0.25)
262
+ ap.add_argument("--process_var", type=float, default=20.0)
263
+ ap.add_argument("--meas_var", type=float, default=50.0)
264
+ ap.add_argument("--max_missed", type=int, default=8)
265
+ args = ap.parse_args()
266
+
267
+ videos_dir = Path(args.videos_dir)
268
+ out_root = Path(args.out_dir)
269
+ out_root.mkdir(parents=True, exist_ok=True)
270
+
271
+ model = YOLO(args.model)
272
+ cfg = TrackConfig(dt=1.0, process_var=args.process_var, meas_var=args.meas_var, max_missed=args.max_missed)
273
+
274
+ mp4s = sorted(videos_dir.glob("*.mp4"))
275
+ if not mp4s:
276
+ raise FileNotFoundError(f"No .mp4 files found in {videos_dir}")
277
+
278
+ for vp in mp4s:
279
+ process_video(vp, model, out_root, args.fps, args.conf, cfg)
280
+
281
+
282
+ if __name__ == "__main__":
283
+ main()