taco_dataset_resized / tools /generate_taco_csv.py
mzhobro's picture
Upload folder using huggingface_hub
0a6ee26 verified
#!/usr/bin/env python
"""Scan extracted TACO dataset directories and produce taco_info.csv.
One row per sequence (~2317 expected). Checks all modalities for existence
and extracts video metadata (frame count, duration, fps) via imageio/ffmpeg.
Usage:
python generate_taco_csv.py [--root /path/to/taco_dataset] [--output taco_info.csv]
"""
import argparse
import logging
import math
import re
from pathlib import Path
import imageio
import pandas as pd
from tqdm import tqdm
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
logger = logging.getLogger(__name__)
# The 12 allocentric camera IDs used in TACO V1
ALLOCENTRIC_CAMERA_IDS = [
"21218078", "22070938", "22139905", "22139906", "22139908",
"22139909", "22139910", "22139911", "22139913", "22139914",
"22139916", "22139946",
]
def parse_triplet_dir(name: str) -> tuple[str, str, str]:
"""Parse '(action, tool, object)' directory name into components.
Examples:
'(dust, roller, pan)' -> ('dust', 'roller', 'pan')
'(cut, knife, bread)' -> ('cut', 'knife', 'bread')
"""
match = re.match(r"\((.+),\s*(.+),\s*(.+)\)", name)
if match:
return match.group(1).strip(), match.group(2).strip(), match.group(3).strip()
return name, "", ""
def parse_sequence_dir(name: str) -> tuple[str, str]:
"""Parse 'YYYYMMDD_XXX' into (date, seq_num).
Example: '20230927_032' -> ('20230927', '032')
"""
parts = name.split("_", 1)
if len(parts) == 2:
return parts[0], parts[1]
return name, ""
def probe_video(video_path: Path) -> dict:
"""Extract frame count, duration, fps from a video file using imageio/ffmpeg."""
result = {"n_frames": 0, "duration_s": 0.0, "fps": 0.0}
if not video_path.exists():
return result
try:
reader = imageio.get_reader(str(video_path))
meta = reader.get_meta_data()
reader.close()
fps = float(meta.get("fps", 0))
duration = float(meta.get("duration", 0))
n_frames = meta.get("nframes", float("inf"))
if not math.isfinite(n_frames):
n_frames = int(round(duration * fps)) if fps > 0 else 0
else:
n_frames = int(n_frames)
result = {"n_frames": n_frames, "duration_s": round(duration, 3), "fps": round(fps, 2)}
except Exception as e:
logger.debug(f"probe failed for {video_path}: {e}")
return result
def discover_sequences(root: Path) -> list[tuple[str, str]]:
"""Discover all (triplet_dir, sequence_dir) pairs from Marker_Removed.
Scans Marker_Removed_Allocentric_RGB_Videos/ for the canonical list of sequences.
Falls back to other modality dirs if Marker_Removed is not yet extracted.
"""
# Try multiple source dirs in priority order
source_dirs = [
root / "Marker_Removed_Allocentric_RGB_Videos",
root / "Allocentric_Camera_Parameters",
root / "Object_Poses",
root / "Hand_Poses",
root / "Egocentric_RGB_Videos",
]
for source_dir in source_dirs:
if not source_dir.is_dir():
continue
sequences = []
for triplet_dir in sorted(source_dir.iterdir()):
if not triplet_dir.is_dir():
continue
for seq_dir in sorted(triplet_dir.iterdir()):
if not seq_dir.is_dir():
continue
sequences.append((triplet_dir.name, seq_dir.name))
if sequences:
logger.info(f"Discovered {len(sequences)} sequences from {source_dir.name}/")
return sequences
logger.error("No sequences found in any modality directory")
return []
def build_record(root: Path, triplet_name: str, seq_name: str) -> dict:
"""Build a metadata record for one sequence."""
action, tool, obj = parse_triplet_dir(triplet_name)
date, seq_num = parse_sequence_dir(seq_name)
record = {
"sequence_id": f"{triplet_name}/{seq_name}",
"action": action,
"tool": tool,
"object": obj,
"triplet": triplet_name,
"date": date,
"seq_num": seq_num,
}
# --- Paths ---
mr_dir = root / "Marker_Removed_Allocentric_RGB_Videos" / triplet_name / seq_name
ego_rgb = root / "Egocentric_RGB_Videos" / triplet_name / seq_name / "color.mp4"
ego_depth_dir = root / "Egocentric_Depth_Videos" / triplet_name / seq_name
hand_dir = root / "Hand_Poses" / triplet_name / seq_name
obj_dir = root / "Object_Poses" / triplet_name / seq_name
seg_dir = root / "2D_Segmentation" / triplet_name / seq_name
alloc_cam = root / "Allocentric_Camera_Parameters" / triplet_name / seq_name / "calibration.json"
ego_cam_dir = root / "Egocentric_Camera_Parameters" / triplet_name / seq_name
record["marker_removed_dir"] = str(mr_dir.relative_to(root)) if mr_dir.exists() else ""
record["egocentric_rgb_path"] = str(ego_rgb.relative_to(root)) if ego_rgb.exists() else ""
record["egocentric_depth_dir"] = str(ego_depth_dir.relative_to(root)) if ego_depth_dir.exists() else ""
record["hand_poses_dir"] = str(hand_dir.relative_to(root)) if hand_dir.exists() else ""
record["object_poses_dir"] = str(obj_dir.relative_to(root)) if obj_dir.exists() else ""
record["segmentation_dir"] = str(seg_dir.relative_to(root)) if seg_dir.exists() else ""
record["alloc_camera_params_path"] = str(alloc_cam.relative_to(root)) if alloc_cam.exists() else ""
record["ego_camera_params_dir"] = str(ego_cam_dir.relative_to(root)) if ego_cam_dir.exists() else ""
# --- Availability: allocentric cameras ---
available_cameras = []
if mr_dir.exists():
for cam_id in ALLOCENTRIC_CAMERA_IDS:
mp4 = mr_dir / f"{cam_id}.mp4"
if mp4.exists():
available_cameras.append(cam_id)
record["n_allocentric_cameras"] = len(available_cameras)
record["camera_ids"] = ";".join(available_cameras)
# --- Availability: other modalities ---
record["has_egocentric_rgb"] = ego_rgb.exists()
record["has_egocentric_depth"] = ego_depth_dir.exists() and any(ego_depth_dir.iterdir()) if ego_depth_dir.exists() else False
record["has_hand_poses"] = hand_dir.exists() and (hand_dir / "left_hand.pkl").exists()
record["has_object_poses"] = obj_dir.exists() and any(obj_dir.glob("*.npy"))
record["has_segmentation"] = seg_dir.exists() and any(seg_dir.glob("*_masks.npy"))
record["has_alloc_cam_params"] = alloc_cam.exists()
record["has_ego_cam_params"] = ego_cam_dir.exists() and (ego_cam_dir / "egocentric_intrinsic.txt").exists()
# --- Video info from one reference allocentric video ---
if available_cameras:
ref_video = mr_dir / f"{available_cameras[0]}.mp4"
vinfo = probe_video(ref_video)
elif ego_rgb.exists():
vinfo = probe_video(ego_rgb)
else:
vinfo = {"n_frames": 0, "duration_s": 0.0, "fps": 0.0}
record["n_frames"] = vinfo["n_frames"]
record["duration_s"] = vinfo["duration_s"]
record["fps"] = vinfo["fps"]
# --- Quality ---
record["all_modalities_complete"] = all([
len(available_cameras) == 12,
record["has_egocentric_rgb"],
record["has_egocentric_depth"],
record["has_hand_poses"],
record["has_object_poses"],
record["has_segmentation"],
record["has_alloc_cam_params"],
record["has_ego_cam_params"],
])
return record
def main():
parser = argparse.ArgumentParser(description="Generate TACO dataset metadata CSV")
parser.add_argument(
"--root", type=Path,
default=Path(__file__).resolve().parent,
help="Root directory of the extracted TACO dataset (default: script directory)",
)
parser.add_argument(
"--output", "-o", type=Path,
default=None,
help="Output CSV path (default: <root>/taco_info.csv)",
)
args = parser.parse_args()
root = args.root.resolve()
output = args.output or (root / "taco_info.csv")
logger.info(f"Scanning TACO dataset at: {root}")
sequences = discover_sequences(root)
if not sequences:
logger.error("No sequences found. Is the dataset extracted?")
return
logger.info(f"Building metadata for {len(sequences)} sequences...")
records = []
for triplet_name, seq_name in tqdm(sequences, desc="Processing sequences"):
record = build_record(root, triplet_name, seq_name)
records.append(record)
df = pd.DataFrame(records)
df = df.sort_values(["triplet", "date", "seq_num"]).reset_index(drop=True)
df.to_csv(output, index=False)
logger.info(f"Saved {len(df)} sequence records to {output}")
# Summary
print(f"\n{'='*70}")
print("TACO DATASET SUMMARY")
print(f"{'='*70}")
print(f"Total sequences: {len(df)}")
print(f"Unique triplets: {df['triplet'].nunique()}")
print(f"Unique actions: {df['action'].nunique()}")
print(f"Unique tools: {df['tool'].nunique()}")
print(f"Unique objects: {df['object'].nunique()}")
print(f"\nAllocentric cameras per sequence:")
print(f" min: {df['n_allocentric_cameras'].min()}, "
f"max: {df['n_allocentric_cameras'].max()}, "
f"mean: {df['n_allocentric_cameras'].mean():.1f}")
print(f"\nModality availability:")
for col in ["has_egocentric_rgb", "has_egocentric_depth", "has_hand_poses",
"has_object_poses", "has_segmentation", "has_alloc_cam_params",
"has_ego_cam_params"]:
count = df[col].sum()
print(f" {col}: {count}/{len(df)} ({100*count/len(df):.1f}%)")
complete = df["all_modalities_complete"].sum()
print(f"\nFully complete sequences: {complete}/{len(df)} ({100*complete/len(df):.1f}%)")
if df["n_frames"].sum() > 0:
print(f"\nVideo stats:")
valid = df[df["n_frames"] > 0]
print(f" Sequences with video info: {len(valid)}")
print(f" FPS: {valid['fps'].mode().iloc[0] if len(valid) > 0 else 'N/A'}")
print(f" Frames: min={valid['n_frames'].min()}, median={valid['n_frames'].median():.0f}, max={valid['n_frames'].max()}")
total_hours = valid["duration_s"].sum() / 3600
print(f" Total duration: {total_hours:.1f} hours")
print(f"{'='*70}")
if __name__ == "__main__":
main()