Datasets:
Tasks:
Object Detection
Formats:
text
Languages:
English
Size:
100K - 1M
ArXiv:
Tags:
Multi-object-tracking
License:
hamidreza
commited on
Commit
·
fa3c1f6
1
Parent(s):
f6d2583
code updated
Browse files- README.md +13 -1
- convert_to_coco.py +104 -0
- extract_frames.py +60 -0
README.md
CHANGED
|
@@ -34,7 +34,19 @@ _[Hamidreza Hashempoor](https://hamidreza-hashempoor.github.io/), Yu Dong Hwang
|
|
| 34 |
## Dataset Overview
|
| 35 |
|
| 36 |
GT format is like (each line):
|
| 37 |
-
`frame, id, bb_left, bb_top, bb_width, bb_height, conf, class, 1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
Brief statistics and visualization of FastTracker benchmark and its comparison with other benchmarks.
|
| 40 |
|
|
|
|
| 34 |
## Dataset Overview
|
| 35 |
|
| 36 |
GT format is like (each line):
|
| 37 |
+
`frame, id, bb_left, bb_top, bb_width, bb_height, conf, class, 1.0`.
|
| 38 |
+
|
| 39 |
+
To prepare the dataset, first run `extract_frames.py` to decode frames from each video.
|
| 40 |
+
In **line 11** of the script, add the video filename and the number of frames you want to extract.
|
| 41 |
+
```bash
|
| 42 |
+
python extract_frames.py
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
Then, convert the ground truth into COCO format with:
|
| 46 |
+
```bash
|
| 47 |
+
python convert_to_coco.py
|
| 48 |
+
```
|
| 49 |
+
This will generate annotations/train.json ready for training your detector.
|
| 50 |
|
| 51 |
Brief statistics and visualization of FastTracker benchmark and its comparison with other benchmarks.
|
| 52 |
|
convert_to_coco.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
# Paths
|
| 7 |
+
FRAMES_DIR = "test_frame"
|
| 8 |
+
GT_DIR = "GT"
|
| 9 |
+
OUT_PATH = "annotations"
|
| 10 |
+
os.makedirs(OUT_PATH, exist_ok=True)
|
| 11 |
+
|
| 12 |
+
# Output COCO-style JSON
|
| 13 |
+
out_file = os.path.join(OUT_PATH, "train.json")
|
| 14 |
+
|
| 15 |
+
out = {
|
| 16 |
+
"images": [],
|
| 17 |
+
"annotations": [],
|
| 18 |
+
"videos": [],
|
| 19 |
+
"categories": [
|
| 20 |
+
{"id": 1, "name": "pedestrian"} # You can expand with more classes if needed
|
| 21 |
+
]
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
image_cnt = 0
|
| 25 |
+
ann_cnt = 0
|
| 26 |
+
video_cnt = 0
|
| 27 |
+
tid_curr = 0
|
| 28 |
+
tid_last = -1
|
| 29 |
+
|
| 30 |
+
# Loop over sequences (one per video)
|
| 31 |
+
for seq in sorted(os.listdir(FRAMES_DIR)):
|
| 32 |
+
seq_path = os.path.join(FRAMES_DIR, seq)
|
| 33 |
+
if not os.path.isdir(seq_path):
|
| 34 |
+
continue
|
| 35 |
+
|
| 36 |
+
video_cnt += 1
|
| 37 |
+
out["videos"].append({"id": video_cnt, "file_name": seq})
|
| 38 |
+
|
| 39 |
+
# Frames
|
| 40 |
+
images = sorted([f for f in os.listdir(seq_path) if f.endswith(".jpg")])
|
| 41 |
+
num_images = len(images)
|
| 42 |
+
|
| 43 |
+
for i, img_name in enumerate(images):
|
| 44 |
+
img_path = os.path.join(seq_path, img_name)
|
| 45 |
+
img = cv2.imread(img_path)
|
| 46 |
+
if img is None:
|
| 47 |
+
continue
|
| 48 |
+
height, width = img.shape[:2]
|
| 49 |
+
|
| 50 |
+
image_info = {
|
| 51 |
+
"file_name": f"{seq}/{img_name}",
|
| 52 |
+
"id": image_cnt + i + 1,
|
| 53 |
+
"frame_id": i + 1,
|
| 54 |
+
"prev_image_id": image_cnt + i if i > 0 else -1,
|
| 55 |
+
"next_image_id": image_cnt + i + 2 if i < num_images - 1 else -1,
|
| 56 |
+
"video_id": video_cnt,
|
| 57 |
+
"height": height,
|
| 58 |
+
"width": width
|
| 59 |
+
}
|
| 60 |
+
out["images"].append(image_info)
|
| 61 |
+
|
| 62 |
+
# Load GT file
|
| 63 |
+
gt_path = os.path.join(GT_DIR, seq, "gt", "gt.txt")
|
| 64 |
+
if not os.path.exists(gt_path):
|
| 65 |
+
print(f" No GT found for {seq}, skipping annotations.")
|
| 66 |
+
image_cnt += num_images
|
| 67 |
+
continue
|
| 68 |
+
|
| 69 |
+
anns = np.loadtxt(gt_path, dtype=np.float32, delimiter=",")
|
| 70 |
+
|
| 71 |
+
for i in range(anns.shape[0]):
|
| 72 |
+
frame_id = int(anns[i][0])
|
| 73 |
+
track_id = int(anns[i][1])
|
| 74 |
+
x, y, w, h = anns[i][2:6]
|
| 75 |
+
conf = anns[i][6]
|
| 76 |
+
class_id = int(anns[i][7])
|
| 77 |
+
visibility = anns[i][8] # currently unused
|
| 78 |
+
|
| 79 |
+
ann_cnt += 1
|
| 80 |
+
if track_id != tid_last:
|
| 81 |
+
tid_curr += 1
|
| 82 |
+
tid_last = track_id
|
| 83 |
+
|
| 84 |
+
ann = {
|
| 85 |
+
"id": ann_cnt,
|
| 86 |
+
"category_id": class_id, # directly use your GT class
|
| 87 |
+
"image_id": image_cnt + frame_id,
|
| 88 |
+
"track_id": tid_curr,
|
| 89 |
+
"bbox": [float(x), float(y), float(w), float(h)],
|
| 90 |
+
"conf": float(conf),
|
| 91 |
+
"iscrowd": 0,
|
| 92 |
+
"area": float(w * h),
|
| 93 |
+
}
|
| 94 |
+
out["annotations"].append(ann)
|
| 95 |
+
|
| 96 |
+
image_cnt += num_images
|
| 97 |
+
|
| 98 |
+
print(f" Loaded {len(out['images'])} images and {len(out['annotations'])} annotations.")
|
| 99 |
+
|
| 100 |
+
# Save JSON
|
| 101 |
+
with open(out_file, "w") as f:
|
| 102 |
+
json.dump(out, f)
|
| 103 |
+
|
| 104 |
+
print(f" Saved COCO-style annotations to {out_file}")
|
extract_frames.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
# Paths
|
| 6 |
+
videos_dir = "test_vid"
|
| 7 |
+
frames_dir = "test_frame"
|
| 8 |
+
|
| 9 |
+
# Define how many frames to extract for each video (should be same in GT file last row)
|
| 10 |
+
frames_to_extract = {
|
| 11 |
+
"task_day_left_turn-2024_07_31_02_12_05-mot 1.1.mp4": 1965,
|
| 12 |
+
# add more here...
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
os.makedirs(frames_dir, exist_ok=True)
|
| 16 |
+
|
| 17 |
+
for video_file in os.listdir(videos_dir):
|
| 18 |
+
if video_file.lower().endswith(".mp4"):
|
| 19 |
+
video_path = os.path.join(videos_dir, video_file)
|
| 20 |
+
|
| 21 |
+
# Subdirectory name (remove .mp4)
|
| 22 |
+
subdir_name = os.path.splitext(video_file)[0]
|
| 23 |
+
subdir_path = os.path.join(frames_dir, subdir_name)
|
| 24 |
+
os.makedirs(subdir_path, exist_ok=True)
|
| 25 |
+
|
| 26 |
+
cap = cv2.VideoCapture(video_path)
|
| 27 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 28 |
+
desired_frames = frames_to_extract.get(video_file, total_frames)
|
| 29 |
+
|
| 30 |
+
saved = 0
|
| 31 |
+
frames = []
|
| 32 |
+
|
| 33 |
+
while True:
|
| 34 |
+
ret, frame = cap.read()
|
| 35 |
+
if not ret:
|
| 36 |
+
break
|
| 37 |
+
frames.append(frame)
|
| 38 |
+
|
| 39 |
+
cap.release()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
if desired_frames > len(frames):
|
| 43 |
+
last_frame = frames[-1]
|
| 44 |
+
while len(frames) < desired_frames:
|
| 45 |
+
frames.append(last_frame)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
if desired_frames < len(frames):
|
| 49 |
+
indices = np.linspace(0, len(frames) - 1, desired_frames, dtype=int)
|
| 50 |
+
frames = [frames[i] for i in indices]
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
for idx, frame in enumerate(frames):
|
| 54 |
+
frame_filename = os.path.join(subdir_path, f"frame_{idx:05d}.jpg")
|
| 55 |
+
cv2.imwrite(frame_filename, frame)
|
| 56 |
+
saved += 1
|
| 57 |
+
|
| 58 |
+
print(f"Extracted {saved} frames from {video_file} into {subdir_name}/")
|
| 59 |
+
|
| 60 |
+
print(" Done extracting frames for all videos.")
|