taco_dataset / taco_analysis /taco_dataset_loader.py
mzhobro's picture
Upload taco_analysis/taco_dataset_loader.py with huggingface_hub
e51b9e1 verified
"""PyTorch Dataset for the TACO dataset (NVS training).
Loads temporal snippets from context and target camera views for
feed-forward dynamic scene reconstruction with novel view rendering.
Dependencies:
pip install torch pandas numpy decord
Example:
from taco_dataset_loader import TACODataset
from view_sampler import RandomViewSampler
ds = TACODataset(
csv_path="taco_info.csv",
root_dir="/path/to/taco_dataset",
view_sampler=RandomViewSampler(),
n_context_views=4,
n_target_views=2,
n_past=3,
n_future=1,
)
sample = ds[0]
"""
import json
import pickle
from functools import lru_cache
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
try:
import decord
decord.bridge.set_bridge("torch")
HAS_DECORD = True
except ImportError:
HAS_DECORD = False
from view_sampler import ViewSampler
class TACODataset(Dataset):
"""TACO dataset for dynamic novel view synthesis + forecasting.
For a given index, loads temporal snippets from context and target
camera views of one sequence. Context views observe the past;
target views span past **and** future so the model must both
reconstruct novel views at known timesteps and forecast at future ones.
Returns a dict with:
context_rgb: (T_past, V_ctx, H, W, 3) uint8
target_rgb: (T_past+T_future, V_tgt, H, W, 3) uint8
context_cameras: (V_ctx, 3, 4) float32 — [R|t] extrinsics
target_cameras: (V_tgt, 3, 4) float32
context_intrinsics:(V_ctx, 3, 3) float32
target_intrinsics: (V_tgt, 3, 3) float32
context_depth: (T_past, H, W) float32 — egocentric depth (if available)
context_segmentation: (T_past, V_ctx, H, W) uint8 (if available)
hand_joints: (T, 2, 21, 3) float32 — 3D joints, left=0/right=1 (if available)
hand_poses: dict with left/right MANO params (if available)
object_poses: dict with tool/target 6DoF (if available)
sequence_id: str
frame_indices: (T_past + T_future,) int
context_camera_ids: list[str]
target_camera_ids: list[str]
"""
def __init__(
self,
csv_path: str | Path,
root_dir: str | Path,
view_sampler: ViewSampler,
n_context_views: int = 4,
n_target_views: int = 2,
n_past: int = 3,
n_future: int = 1,
resolution: tuple[int, int] | None = None,
load_depth: bool = False,
load_segmentation: bool = False,
load_poses: bool = False,
load_hand_joints: bool = False,
frame_step: int = 1,
seed: int | None = None,
):
"""
Args:
csv_path: Path to taco_info.csv.
root_dir: Root directory of the extracted TACO dataset.
view_sampler: Strategy for selecting context/target cameras.
n_context_views: Number of context (input) camera views.
n_target_views: Number of target (supervision) camera views.
n_past: Number of past frames per snippet.
n_future: Number of future frames per snippet.
resolution: (H, W) to resize frames to. None = native resolution.
load_depth: Whether to load egocentric depth maps.
load_segmentation: Whether to load 2D segmentation masks.
load_poses: Whether to load hand and object poses.
load_hand_joints: Whether to load pre-computed 3D hand joints (T, 2, 21, 3).
frame_step: Step between consecutive frames in a snippet.
seed: Random seed for view sampling reproducibility.
"""
if not HAS_DECORD:
raise ImportError("decord is required: pip install decord")
self.root = Path(root_dir).resolve()
self.view_sampler = view_sampler
self.n_context_views = n_context_views
self.n_target_views = n_target_views
self.n_past = n_past
self.n_future = n_future
self.n_frames_per_snippet = n_past + n_future
self.resolution = resolution
self.load_depth = load_depth
self.load_segmentation = load_segmentation
self.load_poses = load_poses
self.load_hand_joints = load_hand_joints
self.frame_step = frame_step
self.seed = seed
# Load metadata
meta = pd.read_csv(csv_path)
# Filter to sequences with enough cameras and frames
min_cameras = n_context_views + n_target_views
min_frames = self.n_frames_per_snippet * frame_step
meta = meta[
(meta["n_allocentric_cameras"] >= min_cameras)
& (meta["n_frames"] >= min_frames)
].reset_index(drop=True)
self.meta = meta
# Build flat index: (seq_idx, t_start) pairs
self._index = []
for seq_idx in range(len(meta)):
n_frames = int(meta.iloc[seq_idx]["n_frames"])
snippet_len = self.n_frames_per_snippet * frame_step
for t_start in range(0, n_frames - snippet_len + 1, frame_step):
self._index.append((seq_idx, t_start))
def __len__(self):
return len(self._index)
def __getitem__(self, idx: int) -> dict:
seq_idx, t_start = self._index[idx]
row = self.meta.iloc[seq_idx]
# Parse camera IDs
camera_ids = row["camera_ids"].split(";")
# Sample views
rng = np.random.default_rng(self.seed + idx if self.seed is not None else None)
ctx_cams, tgt_cams = self.view_sampler.sample(
camera_ids, self.n_context_views, self.n_target_views, rng=rng
)
# Frame indices for the snippet
frame_indices = [t_start + i * self.frame_step for i in range(self.n_frames_per_snippet)]
past_frames = frame_indices[: self.n_past]
future_frames = frame_indices[self.n_past :]
sequence_id = row["sequence_id"]
# Load RGB frames
# Context: past only; Target: past + future (NVS + forecasting)
context_rgb = self._load_multi_view_frames(row, ctx_cams, past_frames)
target_rgb = self._load_multi_view_frames(row, tgt_cams, frame_indices)
# Load camera parameters
ctx_cameras, ctx_intrinsics = self._load_camera_params(row, ctx_cams)
tgt_cameras, tgt_intrinsics = self._load_camera_params(row, tgt_cams)
result = {
"context_rgb": context_rgb, # (T_past, V_ctx, H, W, 3)
"target_rgb": target_rgb, # (T_past+T_future, V_tgt, H, W, 3)
"context_cameras": ctx_cameras, # (V_ctx, 3, 4)
"target_cameras": tgt_cameras, # (V_tgt, 3, 4)
"context_intrinsics": ctx_intrinsics, # (V_ctx, 3, 3)
"target_intrinsics": tgt_intrinsics, # (V_tgt, 3, 3)
"sequence_id": sequence_id,
"frame_indices": torch.tensor(frame_indices, dtype=torch.long),
"context_camera_ids": ctx_cams,
"target_camera_ids": tgt_cams,
}
# Optional: depth
if self.load_depth and row.get("has_egocentric_depth", False):
result["context_depth"] = self._load_ego_depth(row, past_frames)
# Optional: segmentation
if self.load_segmentation and row.get("has_segmentation", False):
result["context_segmentation"] = self._load_segmentation(row, ctx_cams, past_frames)
# Optional: poses
if self.load_poses:
if row.get("has_hand_poses", False):
result["hand_poses"] = self._load_hand_poses(row, frame_indices)
if row.get("has_object_poses", False):
result["object_poses"] = self._load_object_poses(row, frame_indices)
# Optional: pre-computed 3D hand joints
if self.load_hand_joints and row.get("has_hand_poses", False):
result["hand_joints"] = self._load_hand_joints(row, frame_indices)
return result
# ------------------------------------------------------------------
# Video decoding
# ------------------------------------------------------------------
def _get_video_reader(self, video_path: Path) -> decord.VideoReader:
"""Get a decord VideoReader (no caching — readers are not picklable)."""
ctx = decord.cpu(0)
return decord.VideoReader(str(video_path), ctx=ctx)
def _load_multi_view_frames(
self, row: pd.Series, cam_ids: list[str], frame_indices: list[int]
) -> torch.Tensor:
"""Load frames from multiple camera views.
Returns: (n_frames, n_views, H, W, 3) uint8 tensor.
"""
mr_dir = row.get("marker_removed_dir", "")
if not mr_dir:
# Return zeros if marker_removed not available
h, w = self.resolution or (360, 640)
return torch.zeros(len(frame_indices), len(cam_ids), h, w, 3, dtype=torch.uint8)
views = []
for cam_id in cam_ids:
video_path = self.root / mr_dir / f"{cam_id}.mp4"
vr = self._get_video_reader(video_path)
# Batch decode frames
frames = vr.get_batch(frame_indices) # (T, H, W, 3)
if self.resolution is not None:
frames = self._resize_frames(frames, self.resolution)
views.append(frames)
# Stack: (n_views, T, H, W, 3) -> (T, n_views, H, W, 3)
stacked = torch.stack(views, dim=0) # (V, T, H, W, 3)
return stacked.permute(1, 0, 2, 3, 4)
def _resize_frames(self, frames: torch.Tensor, resolution: tuple[int, int]) -> torch.Tensor:
"""Resize (T, H, W, 3) uint8 frames to target resolution."""
h, w = resolution
# (T, H, W, 3) -> (T, 3, H, W) for interpolate, then back
x = frames.permute(0, 3, 1, 2).float()
x = torch.nn.functional.interpolate(x, size=(h, w), mode="bilinear", align_corners=False)
return x.permute(0, 2, 3, 1).to(torch.uint8)
# ------------------------------------------------------------------
# Camera parameters
# ------------------------------------------------------------------
@lru_cache(maxsize=512)
def _load_calibration_json(self, calib_path: str) -> dict:
"""Load and cache calibration.json for a sequence."""
with open(calib_path) as f:
return json.load(f)
def _load_camera_params(
self, row: pd.Series, cam_ids: list[str]
) -> tuple[torch.Tensor, torch.Tensor]:
"""Load camera extrinsics and intrinsics for specified cameras.
Calibration.json stores per-camera:
K: flat (9,) intrinsic matrix (row-major 3x3)
R: flat (9,) rotation matrix (row-major 3x3)
T: (3,) translation vector
Returns:
extrinsics: (V, 3, 4) float32 [R|t]
intrinsics: (V, 3, 3) float32
"""
calib_path = row.get("alloc_camera_params_path", "")
if not calib_path:
return (
torch.zeros(len(cam_ids), 3, 4, dtype=torch.float32),
torch.eye(3, dtype=torch.float32).unsqueeze(0).expand(len(cam_ids), -1, -1),
)
calib = self._load_calibration_json(str(self.root / calib_path))
extrinsics = []
intrinsics = []
for cam_id in cam_ids:
cam_data = calib.get(cam_id, {})
# Intrinsics: K is a flat (9,) array → reshape to (3, 3)
K = cam_data.get("K", None)
if K is not None:
K = np.array(K, dtype=np.float32).reshape(3, 3)
intrinsics.append(torch.from_numpy(K))
else:
intrinsics.append(torch.eye(3, dtype=torch.float32))
# Extrinsics: R is flat (9,) → (3,3), T is (3,) → (3,1), concat to [R|t]
R = cam_data.get("R", None)
T = cam_data.get("T", None)
if R is not None and T is not None:
R = np.array(R, dtype=np.float32).reshape(3, 3)
T = np.array(T, dtype=np.float32).reshape(3, 1)
Rt = np.concatenate([R, T], axis=1) # (3, 4)
extrinsics.append(torch.from_numpy(Rt))
else:
extrinsics.append(torch.zeros(3, 4, dtype=torch.float32))
return torch.stack(extrinsics), torch.stack(intrinsics)
# ------------------------------------------------------------------
# Depth (egocentric)
# ------------------------------------------------------------------
def _load_ego_depth(self, row: pd.Series, frame_indices: list[int]) -> torch.Tensor:
"""Load egocentric depth frames.
The egocentric depth is stored as a video (MP4/AVI). We decode the
specified frames and interpret pixel values as depth.
Returns: (n_frames, H, W) float32 tensor.
"""
depth_dir = row.get("egocentric_depth_dir", "")
if not depth_dir:
h, w = self.resolution or (360, 640)
return torch.zeros(len(frame_indices), h, w, dtype=torch.float32)
depth_path = self.root / depth_dir
# Find the depth video file
depth_files = list(depth_path.glob("*.mp4")) + list(depth_path.glob("*.avi"))
if not depth_files:
h, w = self.resolution or (360, 640)
return torch.zeros(len(frame_indices), h, w, dtype=torch.float32)
vr = self._get_video_reader(depth_files[0])
frames = vr.get_batch(frame_indices) # (T, H, W, 3) or (T, H, W, 1)
# Convert to single-channel depth
if frames.ndim == 4 and frames.shape[-1] == 3:
depth = frames[..., 0].float() # Use first channel
else:
depth = frames.squeeze(-1).float()
if self.resolution is not None:
h, w = self.resolution
depth = torch.nn.functional.interpolate(
depth.unsqueeze(1), size=(h, w), mode="nearest"
).squeeze(1)
return depth
# ------------------------------------------------------------------
# Segmentation
# ------------------------------------------------------------------
def _load_segmentation(
self, row: pd.Series, cam_ids: list[str], frame_indices: list[int]
) -> torch.Tensor:
"""Load 2D segmentation masks for specified cameras and frames.
Segmentation masks are stored as {camera_id}_masks.npy per sequence.
IMPORTANT: Segmentation is at 6 FPS (video is 30 FPS), so seg index i
corresponds to video frame 5*i. We map video frame indices to the
nearest segmentation index: seg_idx = round(frame / 5).
Returns: (n_frames, V, H, W) uint8 tensor.
"""
seg_dir = row.get("segmentation_dir", "")
if not seg_dir:
h, w = self.resolution or (360, 640)
return torch.zeros(len(frame_indices), len(cam_ids), h, w, dtype=torch.uint8)
# Map video frame indices → segmentation indices (6fps vs 30fps)
seg_indices = [round(f / 5) for f in frame_indices]
views = []
for cam_id in cam_ids:
mask_path = self.root / seg_dir / f"{cam_id}_masks.npy"
if mask_path.exists():
masks = np.load(str(mask_path), mmap_mode="r")
# Clamp indices to valid range
valid_seg = [min(idx, len(masks) - 1) for idx in seg_indices]
selected = masks[valid_seg] # (T, H, W)
views.append(torch.from_numpy(selected.copy()))
else:
h, w = self.resolution or (360, 640)
views.append(torch.zeros(len(frame_indices), h, w, dtype=torch.uint8))
return torch.stack(views, dim=1) # (T, V, H, W)
# ------------------------------------------------------------------
# Poses
# ------------------------------------------------------------------
def _load_hand_poses(self, row: pd.Series, frame_indices: list[int]) -> dict:
"""Load hand pose parameters for specified frames.
Returns dict with keys like 'left_hand', 'right_hand', etc.
Each value is a tensor of per-frame MANO parameters.
"""
hand_dir = row.get("hand_poses_dir", "")
if not hand_dir:
return {}
result = {}
hand_path = self.root / hand_dir
for side in ["left", "right"]:
pose_file = hand_path / f"{side}_hand.pkl"
shape_file = hand_path / f"{side}_hand_shape.pkl"
if pose_file.exists():
with open(pose_file, "rb") as f:
pose_data = pickle.load(f)
if isinstance(pose_data, np.ndarray):
result[f"{side}_hand"] = torch.from_numpy(pose_data[frame_indices].copy())
elif isinstance(pose_data, dict):
result[f"{side}_hand"] = {
k: torch.from_numpy(np.array(v)[frame_indices].copy())
if isinstance(v, (np.ndarray, list)) and len(v) > max(frame_indices)
else v
for k, v in pose_data.items()
}
if shape_file.exists():
with open(shape_file, "rb") as f:
result[f"{side}_hand_shape"] = pickle.load(f)
return result
def _load_hand_joints(
self, row: pd.Series, frame_indices: list[int]
) -> torch.Tensor:
"""Load pre-computed 3D hand joint positions.
Returns:
(T, 2, 21, 3) float32 tensor — left=0, right=1, in meters, world frame.
"""
hand_dir = row.get("hand_poses_dir", "")
if not hand_dir:
return torch.zeros(len(frame_indices), 2, 21, 3)
npy_path = self.root / hand_dir / "hand_joints.npy"
if not npy_path.exists():
return torch.zeros(len(frame_indices), 2, 21, 3)
joints = np.load(str(npy_path), mmap_mode="r")
selected = [min(fi, len(joints) - 1) for fi in frame_indices]
return torch.from_numpy(joints[selected].copy())
def _load_object_poses(self, row: pd.Series, frame_indices: list[int]) -> dict:
"""Load object 6DoF poses for specified frames.
Object poses are stored as .npy files: tool_{id}.npy, target_{id}.npy
Each has shape (n_frames, 4, 4) — homogeneous transforms.
Returns dict mapping filename stem to (T, 4, 4) tensor.
"""
obj_dir = row.get("object_poses_dir", "")
if not obj_dir:
return {}
result = {}
obj_path = self.root / obj_dir
for npy_file in obj_path.glob("*.npy"):
poses = np.load(str(npy_file), mmap_mode="r")
valid_indices = [i for i in frame_indices if i < len(poses)]
if valid_indices:
result[npy_file.stem] = torch.from_numpy(poses[valid_indices].copy())
return result
# ------------------------------------------------------------------
# Utilities
# ------------------------------------------------------------------
@property
def num_sequences(self) -> int:
return len(self.meta)
def get_sequence_info(self, seq_idx: int) -> dict:
"""Get metadata for a specific sequence."""
return self.meta.iloc[seq_idx].to_dict()
def load_calibration(self, seq_idx: int) -> dict:
"""Load (and cache) the raw calibration.json for a sequence.
Returns the full dict mapping camera_id → {K, R, T, imgSize, ...}.
"""
row = self.meta.iloc[seq_idx]
calib_path = row.get("alloc_camera_params_path", "")
if not calib_path:
return {}
return self._load_calibration_json(str(self.root / calib_path))
def load_sequence_views(
self, seq_idx: int, frame_indices: int | list[int],
) -> dict:
"""Load all camera views for a sequence at specified frame(s).
Unlike :meth:`__getitem__` which splits views into context/target
for training, this returns **every** camera — useful for
visualization and dataset validation.
Intrinsics are scaled to match ``self.resolution`` when set.
Args:
seq_idx: Index into ``self.meta``.
frame_indices: Single frame index or list of frame indices.
Returns:
dict with keys:
rgb: (T, V, H, W, 3) uint8
extrinsics: (V, 3, 4) float32 — [R|t]
intrinsics: (V, 3, 3) float32 — K (scaled to resolution)
camera_ids: list[str]
sequence_id: str
"""
row = self.meta.iloc[seq_idx]
camera_ids = row["camera_ids"].split(";")
if isinstance(frame_indices, int):
frame_indices = [frame_indices]
rgb = self._load_multi_view_frames(row, camera_ids, frame_indices)
calib_path = row.get("alloc_camera_params_path", "")
if not calib_path:
V = len(camera_ids)
return {
"rgb": rgb,
"extrinsics": torch.zeros(V, 3, 4, dtype=torch.float32),
"intrinsics": torch.eye(3, dtype=torch.float32)
.unsqueeze(0).expand(V, -1, -1).clone(),
"camera_ids": camera_ids,
"sequence_id": row["sequence_id"],
}
calib = self._load_calibration_json(str(self.root / calib_path))
extrinsics_list = []
intrinsics_list = []
for cam_id in camera_ids:
cam_data = calib.get(cam_id, {})
# Intrinsics
K_raw = cam_data.get("K", None)
K = (np.array(K_raw, dtype=np.float32).reshape(3, 3)
if K_raw is not None else np.eye(3, dtype=np.float32))
# Scale K to match target resolution
if self.resolution is not None:
img_size = cam_data.get("imgSize", None)
if img_size is not None:
w_orig, h_orig = img_size
h_new, w_new = self.resolution
K[0, :] *= w_new / w_orig
K[1, :] *= h_new / h_orig
intrinsics_list.append(torch.from_numpy(K))
# Extrinsics
R_raw = cam_data.get("R", None)
T_raw = cam_data.get("T", None)
if R_raw is not None and T_raw is not None:
R = np.array(R_raw, dtype=np.float32).reshape(3, 3)
T = np.array(T_raw, dtype=np.float32).reshape(3, 1)
Rt = np.concatenate([R, T], axis=1)
extrinsics_list.append(torch.from_numpy(Rt))
else:
extrinsics_list.append(torch.zeros(3, 4, dtype=torch.float32))
return {
"rgb": rgb,
"extrinsics": torch.stack(extrinsics_list),
"intrinsics": torch.stack(intrinsics_list),
"camera_ids": camera_ids,
"sequence_id": row["sequence_id"],
}
def __repr__(self):
return (
f"TACODataset("
f"sequences={self.num_sequences}, "
f"samples={len(self)}, "
f"n_past={self.n_past}, n_future={self.n_future}, "
f"ctx_views={self.n_context_views}, tgt_views={self.n_target_views}"
f")"
)