| |
| """Profile TACO dataset loading and save results to Markdown. |
| |
| Usage: |
| python profile_dataset.py --root /path/to/dataset --output profile.md |
| python profile_dataset.py --root /path/to/dataset --output profile.md --n-samples 50 |
| """ |
|
|
| import argparse |
| import io |
| import sys |
| import time |
| from pathlib import Path |
|
|
| SCRIPT_DIR = Path(__file__).resolve().parent |
|
|
| import numpy as np |
| import torch |
| from torch.utils.data import DataLoader |
|
|
| import decord |
| decord.bridge.set_bridge("torch") |
|
|
|
|
| SEED = 42 |
| N_CONTEXT = 4 |
| N_TARGET = 3 |
| N_PAST = 4 |
| N_FUTURE = 8 |
| RESOLUTION = (376, 512) |
| WORKER_COUNTS = [0, 2, 4] |
|
|
|
|
| def fmt_time(seconds: float) -> str: |
| if seconds < 0.001: |
| return f"{seconds * 1e6:.0f} us" |
| if seconds < 1.0: |
| return f"{seconds * 1e3:.1f} ms" |
| return f"{seconds:.2f} s" |
|
|
|
|
| class MarkdownReport: |
| def __init__(self): |
| self.buf = io.StringIO() |
|
|
| def h1(self, text): |
| self.buf.write(f"# {text}\n\n") |
|
|
| def h2(self, text): |
| self.buf.write(f"## {text}\n\n") |
|
|
| def h3(self, text): |
| self.buf.write(f"### {text}\n\n") |
|
|
| def text(self, text): |
| self.buf.write(f"{text}\n\n") |
|
|
| def kv(self, key, value): |
| self.buf.write(f"- **{key}**: {value}\n") |
|
|
| def kv_end(self): |
| self.buf.write("\n") |
|
|
| def table(self, headers, rows, align=None): |
| if align is None: |
| align = ["left"] * len(headers) |
| sep = [] |
| for a in align: |
| if a == "right": |
| sep.append("---:") |
| elif a == "center": |
| sep.append(":---:") |
| else: |
| sep.append("---") |
| self.buf.write("| " + " | ".join(headers) + " |\n") |
| self.buf.write("| " + " | ".join(sep) + " |\n") |
| for row in rows: |
| self.buf.write("| " + " | ".join(str(c) for c in row) + " |\n") |
| self.buf.write("\n") |
|
|
| def save(self, path): |
| with open(path, "w") as f: |
| f.write(self.buf.getvalue()) |
|
|
| def getvalue(self): |
| return self.buf.getvalue() |
|
|
|
|
| def make_dataset(csv_path, root_dir, **kwargs): |
| sys.path.insert(0, str(SCRIPT_DIR)) |
| from taco_dataset_loader import TACODataset |
| from view_sampler import RandomViewSampler |
|
|
| defaults = dict( |
| csv_path=csv_path, |
| root_dir=root_dir, |
| view_sampler=RandomViewSampler(), |
| n_context_views=N_CONTEXT, |
| n_target_views=N_TARGET, |
| n_past=N_PAST, |
| n_future=N_FUTURE, |
| resolution=RESOLUTION, |
| seed=SEED, |
| ) |
| defaults.update(kwargs) |
| return TACODataset(**defaults) |
|
|
|
|
| def clear_cache(ds): |
| ds._load_calibration_json.cache_clear() |
|
|
|
|
| def profile_dataset(csv_path, root_dir, n_samples, report): |
| import json |
|
|
| ds = make_dataset(csv_path, root_dir) |
|
|
| |
| report.h2("Dataset Info") |
| report.kv("Root", str(root_dir)) |
| report.kv("Sequences", ds.num_sequences) |
| report.kv("Total samples", len(ds)) |
| report.kv("Config", f"{N_CONTEXT} ctx + {N_TARGET} tgt views, {N_PAST} past + {N_FUTURE} future frames") |
| report.kv("Context frames/sample", f"{N_CONTEXT} cams x {N_PAST} = {N_CONTEXT * N_PAST}") |
| report.kv("Target frames/sample", f"{N_TARGET} cams x {N_PAST + N_FUTURE} = {N_TARGET * (N_PAST + N_FUTURE)}") |
| report.kv("Total frame decodes/sample", N_CONTEXT * N_PAST + N_TARGET * (N_PAST + N_FUTURE)) |
| report.kv("Output resolution", f"{RESOLUTION[1]}x{RESOLUTION[0]} (WxH)") |
|
|
| |
| row0 = ds.meta.iloc[0] |
| cam_ids0 = row0["camera_ids"].split(";") |
| mr_dir0 = row0.get("marker_removed_dir", "") |
| res_seen = {} |
| for cid in cam_ids0: |
| vp = root_dir / mr_dir0 / f"{cid}.mp4" |
| if vp.exists(): |
| vr = decord.VideoReader(str(vp), ctx=decord.cpu(0)) |
| s = vr[0].shape |
| key = f"{s[1]}x{s[0]}" |
| res_seen.setdefault(key, 0) |
| res_seen[key] += 1 |
| native_str = ", ".join(f"{r} ({c} cams)" for r, c in res_seen.items()) |
| report.kv("Native resolution", native_str) |
| report.kv_end() |
|
|
| |
| print(" [1/4] Cold single-sample loading...") |
| report.h2("1. Cold Single-Sample Loading") |
| report.text(f"*{n_samples} diverse samples, no warmup, LRU cache cleared each iteration.*") |
|
|
| for seg_label, load_seg in [("Without segmentation", False), ("With segmentation", True)]: |
| ds_run = make_dataset(csv_path, root_dir, load_segmentation=load_seg) |
| rng = np.random.RandomState(SEED if not load_seg else SEED + 1) |
| indices = rng.choice(len(ds_run), size=n_samples, replace=False) |
|
|
| times = [] |
| for idx in indices: |
| clear_cache(ds_run) |
| t0 = time.perf_counter() |
| _ = ds_run[int(idx)] |
| t1 = time.perf_counter() |
| times.append(t1 - t0) |
| t = np.array(times) |
|
|
| report.h3(seg_label) |
| report.table( |
| ["Metric", "Value"], |
| [ |
| ["Mean", fmt_time(t.mean())], |
| ["Median", fmt_time(np.median(t))], |
| ["Std", fmt_time(t.std())], |
| ["Min", fmt_time(t.min())], |
| ["Max", fmt_time(t.max())], |
| ["P10", fmt_time(np.percentile(t, 10))], |
| ["P90", fmt_time(np.percentile(t, 90))], |
| ], |
| align=["left", "right"], |
| ) |
|
|
| |
| print(" [2/4] Batch throughput...") |
| report.h2("2. Batch Throughput") |
| report.text(f"*No warmup, {n_samples} samples, shuffle=True.*") |
|
|
| batch_rows = [] |
| for nw in WORKER_COUNTS: |
| ds_run = make_dataset(csv_path, root_dir) |
| loader = DataLoader( |
| ds_run, batch_size=1, shuffle=True, |
| num_workers=nw, pin_memory=False, |
| persistent_workers=(nw > 0), |
| ) |
| t0 = time.perf_counter() |
| count = 0 |
| for batch in loader: |
| count += 1 |
| if count >= n_samples: |
| break |
| t1 = time.perf_counter() |
| elapsed = t1 - t0 |
| rate = count / elapsed |
| batch_rows.append([str(nw), str(count), fmt_time(elapsed), f"{rate:.2f}"]) |
| del loader |
|
|
| report.table( |
| ["Workers", "Samples", "Wall Time", "Samples/sec"], |
| batch_rows, |
| align=["right", "right", "right", "right"], |
| ) |
|
|
| |
| print(" [3/4] Component breakdown...") |
| report.h2("3. Per-Component Breakdown") |
| report.text(f"*Cold measurement over {n_samples} diverse samples with cache clearing.*") |
|
|
| ds_run = make_dataset(csv_path, root_dir, load_segmentation=True) |
| rng = np.random.RandomState(SEED + 2) |
| indices = rng.choice(len(ds_run), size=n_samples, replace=False) |
| frame_indices = list(range(N_PAST + N_FUTURE)) |
|
|
| decode_times, resize_times, calib_times, seg_times, getitem_times = [], [], [], [], [] |
|
|
| for idx in indices: |
| seq_idx, snippet_start = ds_run._index[int(idx)] |
| row = ds_run.meta.iloc[seq_idx] |
| cam_ids = row["camera_ids"].split(";") |
| mr_dir = row.get("marker_removed_dir", "") |
| seg_dir_str = row.get("segmentation_dir", "") |
| calib_path = row.get("alloc_camera_params_path", "") |
|
|
| cam_rng = np.random.default_rng(int(idx)) |
| chosen = cam_rng.choice(len(cam_ids), size=min(N_CONTEXT + N_TARGET, len(cam_ids)), replace=False) |
| selected_cams = [cam_ids[i] for i in chosen] |
| ctx_cams = selected_cams[:N_CONTEXT] |
| all_cams = selected_cams |
| actual_frames = [snippet_start + f for f in frame_indices] |
|
|
| |
| frames = None |
| t0 = time.perf_counter() |
| for cid in all_cams: |
| vp = root_dir / mr_dir / f"{cid}.mp4" |
| if vp.exists(): |
| vr = decord.VideoReader(str(vp), ctx=decord.cpu(0)) |
| valid = [min(f, len(vr) - 1) for f in actual_frames] |
| frames = vr.get_batch(valid) |
| t1 = time.perf_counter() |
| decode_times.append(t1 - t0) |
|
|
| |
| if frames is not None: |
| t0 = time.perf_counter() |
| for _ in all_cams: |
| h, w = RESOLUTION |
| x = frames.permute(0, 3, 1, 2).float() |
| x = torch.nn.functional.interpolate(x, size=(h, w), mode="bilinear", align_corners=False) |
| _ = x.permute(0, 2, 3, 1).to(torch.uint8) |
| t1 = time.perf_counter() |
| resize_times.append(t1 - t0) |
|
|
| |
| if calib_path: |
| calib_full = root_dir / calib_path |
| t0 = time.perf_counter() |
| with open(calib_full) as f: |
| calib = json.load(f) |
| for cid in all_cams: |
| cd = calib.get(cid, {}) |
| K = np.array(cd.get("K", np.eye(3).flatten().tolist()), dtype=np.float32).reshape(3, 3) |
| R = np.array(cd.get("R", np.eye(3).flatten().tolist()), dtype=np.float32).reshape(3, 3) |
| T = np.array(cd.get("T", [0, 0, 0]), dtype=np.float32).reshape(3, 1) |
| _ = torch.from_numpy(K) |
| _ = torch.from_numpy(np.concatenate([R, T], axis=1)) |
| t1 = time.perf_counter() |
| calib_times.append(t1 - t0) |
|
|
| |
| if seg_dir_str: |
| seg_idx = [round(f / 5) for f in actual_frames] |
| t0 = time.perf_counter() |
| for cid in ctx_cams: |
| mp = root_dir / seg_dir_str / f"{cid}_masks.npy" |
| if mp.exists(): |
| masks = np.load(str(mp), mmap_mode="r") |
| vs = [min(si, len(masks) - 1) for si in seg_idx] |
| _ = torch.from_numpy(masks[vs].copy()) |
| t1 = time.perf_counter() |
| seg_times.append(t1 - t0) |
|
|
| |
| clear_cache(ds_run) |
| t0 = time.perf_counter() |
| _ = ds_run[int(idx)] |
| t1 = time.perf_counter() |
| getitem_times.append(t1 - t0) |
|
|
| avg = lambda a: np.mean(a) if a else 0.0 |
| med = lambda a: np.median(a) if a else 0.0 |
| avg_gi = avg(getitem_times) |
|
|
| def pct(v): |
| return f"{v / avg_gi * 100:.1f}%" if avg_gi > 0 else "—" |
|
|
| comp_rows = [ |
| ["Video decode (decord)", fmt_time(avg(decode_times)), fmt_time(med(decode_times)), pct(avg(decode_times))], |
| ["Frame resize (bilinear)", fmt_time(avg(resize_times)), fmt_time(med(resize_times)), pct(avg(resize_times))], |
| ["Camera params (JSON)", fmt_time(avg(calib_times)), fmt_time(med(calib_times)), pct(avg(calib_times))], |
| ["Segmentation masks (npy)", fmt_time(avg(seg_times)), fmt_time(med(seg_times)), pct(avg(seg_times))], |
| ["**Sum of components**", fmt_time(avg(decode_times) + avg(resize_times) + avg(calib_times) + avg(seg_times)), "—", pct(avg(decode_times) + avg(resize_times) + avg(calib_times) + avg(seg_times))], |
| ["**Full __getitem__**", fmt_time(avg_gi), fmt_time(med(getitem_times)), "100.0%"], |
| ] |
|
|
| report.table( |
| ["Component", "Mean", "Median", "% of getitem"], |
| comp_rows, |
| align=["left", "right", "right", "right"], |
| ) |
|
|
| |
| print(" [4/4] Checking disk sizes...") |
| report.h2("4. Dataset Size on Disk") |
|
|
| import subprocess |
| result = subprocess.run( |
| ["du", "-sh", str(root_dir)], |
| capture_output=True, text=True, timeout=120, |
| ) |
| total_size = result.stdout.strip().split()[0] if result.returncode == 0 else "unknown" |
| report.kv("Total dataset size", total_size) |
|
|
| |
| for subdir in [ |
| "Marker_Removed_Allocentric_RGB_Videos", |
| "2D_Segmentation", |
| "Egocentric_RGB_Videos", |
| "Egocentric_Depth_Videos", |
| "Hand_Poses", |
| "Object_Poses", |
| ]: |
| p = root_dir / subdir |
| if p.exists(): |
| r = subprocess.run(["du", "-sh", str(p)], capture_output=True, text=True, timeout=120) |
| sz = r.stdout.strip().split()[0] if r.returncode == 0 else "—" |
| report.kv(subdir, sz) |
| report.kv_end() |
|
|
| return report |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--root", type=Path, required=True, help="Dataset root directory") |
| parser.add_argument("--csv", type=Path, default=None, help="taco_info.csv (default: ROOT/taco_info.csv)") |
| parser.add_argument("--output", type=Path, required=True, help="Output markdown file") |
| parser.add_argument("--n-samples", type=int, default=30) |
| parser.add_argument("--title", type=str, default=None) |
| args = parser.parse_args() |
|
|
| csv_path = args.csv or (args.root / "taco_info.csv") |
| title = args.title or f"TACO Dataset Profile: {args.root.name}" |
|
|
| |
| sys.path.insert(0, str(SCRIPT_DIR)) |
|
|
| report = MarkdownReport() |
| report.h1(title) |
| report.text(f"*Generated: {time.strftime('%Y-%m-%d %H:%M:%S')}*") |
|
|
| print(f"Profiling {args.root} ({args.n_samples} samples)...") |
| profile_dataset(csv_path, args.root, args.n_samples, report) |
|
|
| args.output.parent.mkdir(parents=True, exist_ok=True) |
| report.save(args.output) |
| print(f"\nSaved: {args.output}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|