taco_dataset_resized / tools /taco_analysis /profile_loading.py
mzhobro's picture
Upload folder using huggingface_hub
ce575dd verified
#!/usr/bin/env python3
"""Profile TACO dataset loading performance.
Measures:
1. Single-sample loading time at different resolutions
2. Batch loading throughput with varying num_workers
3. Per-component timing breakdown for a single sample
"""
import sys
import time
from pathlib import Path
# ---------------------------------------------------------------------------
# Setup paths so we can import from the parent taco_dataset directory
# ---------------------------------------------------------------------------
SCRIPT_DIR = Path(__file__).resolve().parent
DATASET_DIR = SCRIPT_DIR.parent.parent # taco_dataset_resized/
sys.path.insert(0, str(DATASET_DIR))
import numpy as np
import torch
from torch.utils.data import DataLoader
import decord
decord.bridge.set_bridge("torch")
from taco_dataset_loader import TACODataset
from view_sampler import RandomViewSampler
# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
CSV_PATH = DATASET_DIR / "taco_info.csv"
ROOT_DIR = DATASET_DIR
SEED = 42
N_CONTEXT = 4
N_TARGET = 2
N_PAST = 3
N_FUTURE = 1
# Resolutions to test: (H, W) or None for native
RESOLUTIONS = {
"native (no resize)": None,
"376x512 (target)": (376, 512),
"360x640 (common)": (360, 640),
}
BATCH_THROUGHPUT_RESOLUTION = (376, 512)
BATCH_SAMPLES = 20
WORKER_COUNTS = [0, 1, 2, 4]
def fmt_time(seconds: float) -> str:
"""Format a time duration nicely."""
if seconds < 0.001:
return f"{seconds * 1e6:.0f} us"
if seconds < 1.0:
return f"{seconds * 1e3:.1f} ms"
return f"{seconds:.2f} s"
def fmt_rate(samples: int, seconds: float) -> str:
"""Format throughput."""
if seconds == 0:
return "inf"
return f"{samples / seconds:.2f}"
# ===================================================================
# 1. Single-sample loading time at different resolutions
# ===================================================================
def profile_single_sample():
print("=" * 72)
print("1. SINGLE-SAMPLE LOADING TIME (different resolutions)")
print("=" * 72)
print()
results = []
for label, resolution in RESOLUTIONS.items():
if resolution is None:
# Native resolution: load a single camera to avoid torch.stack
# error from mixed resolutions (2048x1500 vs 4096x3000).
ds = TACODataset(
csv_path=CSV_PATH,
root_dir=ROOT_DIR,
view_sampler=RandomViewSampler(),
n_context_views=N_CONTEXT,
n_target_views=N_TARGET,
n_past=N_PAST,
n_future=N_FUTURE,
resolution=None,
seed=SEED,
)
# Use load_sequence_views but only decode one camera manually
row = ds.meta.iloc[0]
cam_ids = row["camera_ids"].split(";")
mr_dir = row.get("marker_removed_dir", "")
frame_indices = [0, 1, 2, 3] # n_past + n_future = 4
# Profile loading a single camera at native resolution
cam_id = cam_ids[0]
video_path = ROOT_DIR / mr_dir / f"{cam_id}.mp4"
# Warmup
vr = decord.VideoReader(str(video_path), ctx=decord.cpu(0))
_ = vr.get_batch(frame_indices)
times = []
for _ in range(3):
t0 = time.perf_counter()
vr = decord.VideoReader(str(video_path), ctx=decord.cpu(0))
frames = vr.get_batch(frame_indices)
t1 = time.perf_counter()
times.append(t1 - t0)
avg = np.mean(times)
shape = frames.shape
results.append((label, f"1 cam, {shape[1]}x{shape[2]}", avg, np.std(times)))
# Also profile the higher-res camera
cam_id_hi = cam_ids[1]
video_path_hi = ROOT_DIR / mr_dir / f"{cam_id_hi}.mp4"
vr_hi = decord.VideoReader(str(video_path_hi), ctx=decord.cpu(0))
_ = vr_hi.get_batch(frame_indices)
times_hi = []
for _ in range(3):
t0 = time.perf_counter()
vr_hi = decord.VideoReader(str(video_path_hi), ctx=decord.cpu(0))
frames_hi = vr_hi.get_batch(frame_indices)
t1 = time.perf_counter()
times_hi.append(t1 - t0)
avg_hi = np.mean(times_hi)
shape_hi = frames_hi.shape
results.append(
(f"native (high-res cam)", f"1 cam, {shape_hi[1]}x{shape_hi[2]}", avg_hi, np.std(times_hi))
)
else:
ds = TACODataset(
csv_path=CSV_PATH,
root_dir=ROOT_DIR,
view_sampler=RandomViewSampler(),
n_context_views=N_CONTEXT,
n_target_views=N_TARGET,
n_past=N_PAST,
n_future=N_FUTURE,
resolution=resolution,
seed=SEED,
)
# Warmup
_ = ds[0]
times = []
for _ in range(3):
t0 = time.perf_counter()
sample = ds[0]
t1 = time.perf_counter()
times.append(t1 - t0)
avg = np.mean(times)
ctx_shape = sample["context_rgb"].shape
results.append(
(label, f"{N_CONTEXT}+{N_TARGET} cams, {ctx_shape[2]}x{ctx_shape[3]}", avg, np.std(times))
)
# Print table
print(f"{'Resolution':<25} {'Config':<30} {'Mean Time':>12} {'Std':>10}")
print("-" * 77)
for label, config, mean_t, std_t in results:
print(f"{label:<25} {config:<30} {fmt_time(mean_t):>12} {fmt_time(std_t):>10}")
print()
# ===================================================================
# 2. Batch loading throughput
# ===================================================================
def profile_batch_throughput():
print("=" * 72)
print(f"2. BATCH LOADING THROUGHPUT (resolution={BATCH_THROUGHPUT_RESOLUTION}, "
f"{BATCH_SAMPLES} samples)")
print("=" * 72)
print()
results = []
for nw in WORKER_COUNTS:
ds = TACODataset(
csv_path=CSV_PATH,
root_dir=ROOT_DIR,
view_sampler=RandomViewSampler(),
n_context_views=N_CONTEXT,
n_target_views=N_TARGET,
n_past=N_PAST,
n_future=N_FUTURE,
resolution=BATCH_THROUGHPUT_RESOLUTION,
seed=SEED,
)
loader = DataLoader(
ds,
batch_size=1,
shuffle=False,
num_workers=nw,
pin_memory=False,
# For num_workers>0, each worker needs its own decord reader
persistent_workers=(nw > 0),
)
# Warmup (1 sample)
it = iter(loader)
_ = next(it)
# Time loading BATCH_SAMPLES samples
t0 = time.perf_counter()
count = 0
for batch in loader:
count += 1
if count >= BATCH_SAMPLES:
break
t1 = time.perf_counter()
elapsed = t1 - t0
rate = count / elapsed
results.append((nw, count, elapsed, rate))
# Clean up workers
del loader
print(f"{'num_workers':>12} {'Samples':>10} {'Wall Time':>12} {'Samples/sec':>14}")
print("-" * 52)
for nw, count, elapsed, rate in results:
print(f"{nw:>12} {count:>10} {fmt_time(elapsed):>12} {fmt_rate(count, elapsed):>14}")
print()
# ===================================================================
# 3. Per-component timing breakdown
# ===================================================================
def profile_component_breakdown():
print("=" * 72)
print("3. PER-COMPONENT TIMING BREAKDOWN (single sample)")
print("=" * 72)
print()
row_ds = TACODataset(
csv_path=CSV_PATH,
root_dir=ROOT_DIR,
view_sampler=RandomViewSampler(),
n_context_views=N_CONTEXT,
n_target_views=N_TARGET,
n_past=N_PAST,
n_future=N_FUTURE,
resolution=(376, 512),
load_segmentation=True,
seed=SEED,
)
# Get the first sequence's data
row = row_ds.meta.iloc[0]
cam_ids = row["camera_ids"].split(";")
mr_dir = row.get("marker_removed_dir", "")
seg_dir = row.get("segmentation_dir", "")
calib_path = row.get("alloc_camera_params_path", "")
frame_indices = [0, 1, 2, 3]
resolution = (376, 512)
# Select cameras
rng = np.random.default_rng(SEED)
chosen = rng.choice(len(cam_ids), size=N_CONTEXT + N_TARGET, replace=False)
selected_cams = [cam_ids[i] for i in chosen]
ctx_cams = selected_cams[:N_CONTEXT]
tgt_cams = selected_cams[N_CONTEXT:]
all_cams = ctx_cams + tgt_cams
# --- A. Video decode (no resize) ---
# Warmup
for cam_id in all_cams[:1]:
vp = ROOT_DIR / mr_dir / f"{cam_id}.mp4"
vr = decord.VideoReader(str(vp), ctx=decord.cpu(0))
_ = vr.get_batch(frame_indices)
decode_times = []
for trial in range(3):
t0 = time.perf_counter()
for cam_id in all_cams:
vp = ROOT_DIR / mr_dir / f"{cam_id}.mp4"
vr = decord.VideoReader(str(vp), ctx=decord.cpu(0))
frames = vr.get_batch(frame_indices)
t1 = time.perf_counter()
decode_times.append(t1 - t0)
avg_decode = np.mean(decode_times)
# --- B. Resize frames ---
# Get a representative frame batch for resizing
vp = ROOT_DIR / mr_dir / f"{all_cams[0]}.mp4"
vr = decord.VideoReader(str(vp), ctx=decord.cpu(0))
sample_frames = vr.get_batch(frame_indices) # (T, H, W, 3)
resize_times = []
for trial in range(5):
t0 = time.perf_counter()
# Simulate resizing for all cameras
for _ in all_cams:
h, w = resolution
x = sample_frames.permute(0, 3, 1, 2).float()
x = torch.nn.functional.interpolate(x, size=(h, w), mode="bilinear", align_corners=False)
_ = x.permute(0, 2, 3, 1).to(torch.uint8)
t1 = time.perf_counter()
resize_times.append(t1 - t0)
avg_resize = np.mean(resize_times)
# --- C. Camera parameters ---
import json
calib_full_path = ROOT_DIR / calib_path
# Warmup
with open(calib_full_path) as f:
_ = json.load(f)
calib_times = []
for trial in range(10):
t0 = time.perf_counter()
with open(calib_full_path) as f:
calib = json.load(f)
for cam_id in all_cams:
cam_data = calib.get(cam_id, {})
K = np.array(cam_data.get("K", np.eye(3).flatten().tolist()), dtype=np.float32).reshape(3, 3)
R = np.array(cam_data.get("R", np.eye(3).flatten().tolist()), dtype=np.float32).reshape(3, 3)
T = np.array(cam_data.get("T", [0, 0, 0]), dtype=np.float32).reshape(3, 1)
_ = torch.from_numpy(K)
_ = torch.from_numpy(np.concatenate([R, T], axis=1))
t1 = time.perf_counter()
calib_times.append(t1 - t0)
avg_calib = np.mean(calib_times)
# --- D. Segmentation masks ---
seg_times = []
seg_indices = [round(f / 5) for f in frame_indices]
# Warmup
for cam_id in ctx_cams[:1]:
mask_path = ROOT_DIR / seg_dir / f"{cam_id}_masks.npy"
if mask_path.exists():
masks = np.load(str(mask_path), mmap_mode="r")
_ = masks[seg_indices].copy()
for trial in range(3):
t0 = time.perf_counter()
for cam_id in ctx_cams:
mask_path = ROOT_DIR / seg_dir / f"{cam_id}_masks.npy"
if mask_path.exists():
masks = np.load(str(mask_path), mmap_mode="r")
valid_seg = [min(idx, len(masks) - 1) for idx in seg_indices]
selected = masks[valid_seg]
_ = torch.from_numpy(selected.copy())
t1 = time.perf_counter()
seg_times.append(t1 - t0)
avg_seg = np.mean(seg_times) if seg_times else 0.0
# --- E. Full __getitem__ call (with resize) ---
# Warmup
_ = row_ds[0]
getitem_times = []
for trial in range(3):
t0 = time.perf_counter()
_ = row_ds[0]
t1 = time.perf_counter()
getitem_times.append(t1 - t0)
avg_getitem = np.mean(getitem_times)
# Print table
total_breakdown = avg_decode + avg_resize + avg_calib + avg_seg
components = [
("Video decode (decord)", avg_decode, f"{len(all_cams)} cameras x {len(frame_indices)} frames"),
("Frame resize (bilinear)", avg_resize, f"{len(all_cams)} cameras to {resolution[0]}x{resolution[1]}"),
("Camera params (JSON)", avg_calib, f"{len(all_cams)} cameras"),
("Segmentation masks (npy)", avg_seg, f"{len(ctx_cams)} ctx cameras"),
("---", 0, ""),
("Sum of components", total_breakdown, ""),
("Actual __getitem__", avg_getitem, "with resize + segmentation"),
]
print(f"{'Component':<30} {'Time':>12} {'% of Total':>12} {'Details':<35}")
print("-" * 92)
for name, t, details in components:
if name == "---":
print("-" * 92)
continue
pct = (t / avg_getitem * 100) if avg_getitem > 0 else 0
print(f"{name:<30} {fmt_time(t):>12} {pct:>10.1f}% {details:<35}")
print()
print(" NOTE: Component times are measured independently. The sum may")
print(" exceed __getitem__ due to different memory/cache conditions in")
print(" isolated benchmarks vs the integrated pipeline. The key takeaway")
print(" is the *relative* cost: video decode dominates by far.")
print()
# ===================================================================
# 4. Summary of dataset stats
# ===================================================================
def print_dataset_stats():
print("=" * 72)
print("4. DATASET OVERVIEW")
print("=" * 72)
print()
ds = TACODataset(
csv_path=CSV_PATH,
root_dir=ROOT_DIR,
view_sampler=RandomViewSampler(),
n_context_views=N_CONTEXT,
n_target_views=N_TARGET,
n_past=N_PAST,
n_future=N_FUTURE,
resolution=(376, 512),
seed=SEED,
)
print(f" Dataset repr: {ds}")
print(f" Total samples (snippets): {len(ds)}")
print(f" Number of sequences: {ds.num_sequences}")
print()
# Check native resolution of first sequence
row = ds.meta.iloc[0]
cam_ids = row["camera_ids"].split(";")
mr_dir = row.get("marker_removed_dir", "")
print(" Native camera resolutions (1st sequence):")
resolutions_seen = {}
for cam_id in cam_ids:
vp = ROOT_DIR / mr_dir / f"{cam_id}.mp4"
if vp.exists():
vr = decord.VideoReader(str(vp), ctx=decord.cpu(0))
shape = vr[0].shape
key = f"{shape[1]}x{shape[0]}"
resolutions_seen.setdefault(key, []).append(cam_id)
for res, cams in resolutions_seen.items():
print(f" {res}: {len(cams)} cameras")
print()
# ===================================================================
# Main
# ===================================================================
def main():
print()
print("*" * 72)
print(" TACO Dataset Loading Profiler")
print(f" Dataset: {ROOT_DIR}")
print(f" CSV: {CSV_PATH}")
print(f" Python: {sys.executable}")
print(f" PyTorch: {torch.__version__}")
print("*" * 72)
print()
print_dataset_stats()
profile_single_sample()
profile_batch_throughput()
profile_component_breakdown()
print("=" * 72)
print("Done.")
print("=" * 72)
if __name__ == "__main__":
main()