| |
| """Analyze the TACO dataset and generate summary plots. |
| |
| Usage: |
| python analyze_taco.py [--csv /path/to/taco_info.csv] [--output-dir /path/to/plots] |
| """ |
|
|
| import argparse |
| from pathlib import Path |
|
|
| import matplotlib |
| matplotlib.use("Agg") |
| import matplotlib.pyplot as plt |
| import numpy as np |
| import pandas as pd |
| import seaborn as sns |
|
|
| from taco_dataset_loader import TACODataset |
| from view_sampler import RandomViewSampler |
|
|
| sns.set_theme(style="whitegrid", font_scale=1.1) |
| FIGSIZE = (12, 6) |
| DPI = 150 |
|
|
|
|
| def plot_action_distribution(df: pd.DataFrame, out_dir: Path): |
| """Bar chart of sequences per action verb.""" |
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| counts = df["action"].value_counts().sort_values(ascending=True) |
| counts.plot.barh(ax=ax, color=sns.color_palette("viridis", len(counts))) |
| ax.set_xlabel("Number of sequences") |
| ax.set_title("Sequences per Action") |
| for i, v in enumerate(counts): |
| ax.text(v + 1, i, str(v), va="center", fontsize=9) |
| plt.tight_layout() |
| fig.savefig(out_dir / "action_distribution.png", dpi=DPI) |
| plt.close(fig) |
|
|
|
|
| def plot_tool_distribution(df: pd.DataFrame, out_dir: Path): |
| """Bar chart of sequences per tool.""" |
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| counts = df["tool"].value_counts().sort_values(ascending=True) |
| counts.plot.barh(ax=ax, color=sns.color_palette("mako", len(counts))) |
| ax.set_xlabel("Number of sequences") |
| ax.set_title("Sequences per Tool") |
| for i, v in enumerate(counts): |
| ax.text(v + 1, i, str(v), va="center", fontsize=9) |
| plt.tight_layout() |
| fig.savefig(out_dir / "tool_distribution.png", dpi=DPI) |
| plt.close(fig) |
|
|
|
|
| def plot_object_distribution(df: pd.DataFrame, out_dir: Path): |
| """Bar chart of sequences per object.""" |
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| counts = df["object"].value_counts().sort_values(ascending=True) |
| counts.plot.barh(ax=ax, color=sns.color_palette("rocket", len(counts))) |
| ax.set_xlabel("Number of sequences") |
| ax.set_title("Sequences per Object") |
| for i, v in enumerate(counts): |
| ax.text(v + 1, i, str(v), va="center", fontsize=9) |
| plt.tight_layout() |
| fig.savefig(out_dir / "object_distribution.png", dpi=DPI) |
| plt.close(fig) |
|
|
|
|
| def plot_triplet_heatmap(df: pd.DataFrame, out_dir: Path): |
| """Heatmap of (action, object) combinations.""" |
| pivot = df.groupby(["action", "object"]).size().reset_index(name="count") |
| pivot = pivot.pivot(index="action", columns="object", values="count").fillna(0).astype(int) |
| fig, ax = plt.subplots(figsize=(14, 8)) |
| sns.heatmap(pivot, annot=True, fmt="d", cmap="YlOrRd", ax=ax, linewidths=0.5) |
| ax.set_title("Sequences per (Action, Object) Combination") |
| plt.tight_layout() |
| fig.savefig(out_dir / "action_object_heatmap.png", dpi=DPI) |
| plt.close(fig) |
|
|
|
|
| def plot_frame_count_distribution(df: pd.DataFrame, out_dir: Path): |
| """Histogram of frame counts across sequences.""" |
| valid = df[df["n_frames"] > 0] |
| if valid.empty: |
| return |
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| ax.hist(valid["n_frames"], bins=50, color=sns.color_palette("deep")[0], edgecolor="white", alpha=0.8) |
| ax.axvline(valid["n_frames"].median(), color="red", linestyle="--", label=f'Median: {valid["n_frames"].median():.0f}') |
| ax.axvline(valid["n_frames"].mean(), color="orange", linestyle="--", label=f'Mean: {valid["n_frames"].mean():.0f}') |
| ax.set_xlabel("Number of frames") |
| ax.set_ylabel("Number of sequences") |
| ax.set_title("Frame Count Distribution") |
| ax.legend() |
| plt.tight_layout() |
| fig.savefig(out_dir / "frame_count_distribution.png", dpi=DPI) |
| plt.close(fig) |
|
|
|
|
| def plot_duration_distribution(df: pd.DataFrame, out_dir: Path): |
| """Histogram of sequence durations.""" |
| valid = df[df["duration_s"] > 0] |
| if valid.empty: |
| return |
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| ax.hist(valid["duration_s"], bins=50, color=sns.color_palette("deep")[1], edgecolor="white", alpha=0.8) |
| ax.axvline(valid["duration_s"].median(), color="red", linestyle="--", label=f'Median: {valid["duration_s"].median():.1f}s') |
| ax.axvline(valid["duration_s"].mean(), color="orange", linestyle="--", label=f'Mean: {valid["duration_s"].mean():.1f}s') |
| ax.set_xlabel("Duration (seconds)") |
| ax.set_ylabel("Number of sequences") |
| ax.set_title("Sequence Duration Distribution") |
| ax.legend() |
| plt.tight_layout() |
| fig.savefig(out_dir / "duration_distribution.png", dpi=DPI) |
| plt.close(fig) |
|
|
|
|
| def plot_duration_by_action(df: pd.DataFrame, out_dir: Path): |
| """Box plot of duration per action.""" |
| valid = df[df["duration_s"] > 0] |
| if valid.empty: |
| return |
| order = valid.groupby("action")["duration_s"].median().sort_values(ascending=False).index |
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| sns.boxplot(data=valid, x="action", y="duration_s", order=order, ax=ax, palette="viridis") |
| ax.set_xlabel("Action") |
| ax.set_ylabel("Duration (seconds)") |
| ax.set_title("Sequence Duration by Action") |
| plt.xticks(rotation=45, ha="right") |
| plt.tight_layout() |
| fig.savefig(out_dir / "duration_by_action.png", dpi=DPI) |
| plt.close(fig) |
|
|
|
|
| def plot_modality_completeness(df: pd.DataFrame, out_dir: Path): |
| """Bar chart of modality availability.""" |
| modality_cols = [ |
| "has_egocentric_rgb", "has_egocentric_depth", "has_hand_poses", |
| "has_object_poses", "has_segmentation", "has_alloc_cam_params", |
| "has_ego_cam_params", |
| ] |
| labels = [c.replace("has_", "").replace("_", " ").title() for c in modality_cols] |
| counts = [df[c].sum() for c in modality_cols] |
| pcts = [100 * c / len(df) for c in counts] |
|
|
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| bars = ax.barh(labels, pcts, color=sns.color_palette("Greens_d", len(labels))) |
| ax.set_xlabel("Availability (%)") |
| ax.set_title("Modality Availability Across Sequences") |
| ax.set_xlim(0, 105) |
| for i, (p, c) in enumerate(zip(pcts, counts)): |
| ax.text(p + 0.5, i, f"{p:.1f}% ({c}/{len(df)})", va="center", fontsize=9) |
| plt.tight_layout() |
| fig.savefig(out_dir / "modality_completeness.png", dpi=DPI) |
| plt.close(fig) |
|
|
|
|
| def plot_sequences_per_triplet(df: pd.DataFrame, out_dir: Path): |
| """Histogram of how many sequences each triplet has.""" |
| triplet_counts = df["triplet"].value_counts() |
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| ax.hist(triplet_counts, bins=range(1, triplet_counts.max() + 2), color=sns.color_palette("deep")[2], |
| edgecolor="white", alpha=0.8, align="left") |
| ax.set_xlabel("Sequences per triplet") |
| ax.set_ylabel("Number of triplets") |
| ax.set_title(f"Sequences per Triplet ({df['triplet'].nunique()} unique triplets)") |
| ax.axvline(triplet_counts.median(), color="red", linestyle="--", |
| label=f"Median: {triplet_counts.median():.0f}") |
| ax.legend() |
| plt.tight_layout() |
| fig.savefig(out_dir / "sequences_per_triplet.png", dpi=DPI) |
| plt.close(fig) |
|
|
|
|
| def plot_date_timeline(df: pd.DataFrame, out_dir: Path): |
| """Sequences collected over time.""" |
| dates = pd.to_datetime(df["date"], format="%Y%m%d", errors="coerce") |
| valid = dates.dropna() |
| if valid.empty: |
| return |
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| daily = valid.groupby(valid.dt.date).size() |
| daily.plot(kind="bar", ax=ax, color=sns.color_palette("deep")[3], width=0.8) |
| ax.set_xlabel("Date") |
| ax.set_ylabel("Sequences recorded") |
| ax.set_title("Data Collection Timeline") |
| |
| n_ticks = len(daily) |
| if n_ticks > 20: |
| step = max(1, n_ticks // 15) |
| ticks = ax.get_xticks() |
| ax.set_xticks(ticks[::step]) |
| plt.xticks(rotation=45, ha="right") |
| plt.tight_layout() |
| fig.savefig(out_dir / "collection_timeline.png", dpi=DPI) |
| plt.close(fig) |
|
|
|
|
| def plot_frames_by_action(df: pd.DataFrame, out_dir: Path): |
| """Box plot of frame counts per action.""" |
| valid = df[df["n_frames"] > 0] |
| if valid.empty: |
| return |
| order = valid.groupby("action")["n_frames"].median().sort_values(ascending=False).index |
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| sns.boxplot(data=valid, x="action", y="n_frames", order=order, ax=ax, palette="mako") |
| ax.set_xlabel("Action") |
| ax.set_ylabel("Frame count") |
| ax.set_title("Frame Count by Action") |
| plt.xticks(rotation=45, ha="right") |
| plt.tight_layout() |
| fig.savefig(out_dir / "frames_by_action.png", dpi=DPI) |
| plt.close(fig) |
|
|
|
|
| def print_summary(df: pd.DataFrame): |
| """Print text summary to console.""" |
| print(f"\n{'='*70}") |
| print("TACO DATASET ANALYSIS SUMMARY") |
| print(f"{'='*70}") |
| print(f"Total sequences: {len(df)}") |
| print(f"Unique triplets: {df['triplet'].nunique()}") |
| print(f"Unique actions: {df['action'].nunique()}: {sorted(df['action'].unique())}") |
| print(f"Unique tools: {df['tool'].nunique()}: {sorted(df['tool'].unique())}") |
| print(f"Unique objects: {df['object'].nunique()}: {sorted(df['object'].unique())}") |
| print(f"All 12 cameras: {(df['n_allocentric_cameras'] == 12).sum()}/{len(df)}") |
| complete = df["all_modalities_complete"].sum() |
| print(f"Fully complete: {complete}/{len(df)} ({100*complete/len(df):.1f}%)") |
|
|
| valid = df[df["n_frames"] > 0] |
| if len(valid) > 0: |
| print(f"\nVideo statistics ({len(valid)} sequences with video info):") |
| print(f" FPS: {valid['fps'].mode().iloc[0]}") |
| print(f" Frames: min={valid['n_frames'].min()}, median={valid['n_frames'].median():.0f}, " |
| f"mean={valid['n_frames'].mean():.0f}, max={valid['n_frames'].max()}") |
| print(f" Duration: min={valid['duration_s'].min():.1f}s, median={valid['duration_s'].median():.1f}s, " |
| f"mean={valid['duration_s'].mean():.1f}s, max={valid['duration_s'].max():.1f}s") |
| print(f" Total: {valid['duration_s'].sum()/3600:.1f} hours") |
|
|
| |
| top = df["triplet"].value_counts().head(10) |
| print(f"\nTop 10 triplets by sequence count:") |
| for triplet, count in top.items(): |
| print(f" {triplet}: {count}") |
|
|
| print(f"{'='*70}") |
|
|
|
|
| def validate_samples(dataset: TACODataset, n_samples: int = 5): |
| """Load a few samples from the dataset and validate shapes/types.""" |
| print(f"\n{'='*70}") |
| print(f"DATASET SAMPLE VALIDATION ({n_samples} samples)") |
| print(f"{'='*70}") |
| print(f"Dataset: {dataset}") |
| print(f" T_past={dataset.n_past}, T_future={dataset.n_future}, " |
| f"V_ctx={dataset.n_context_views}, V_tgt={dataset.n_target_views}") |
|
|
| rng = np.random.default_rng(42) |
| indices = rng.choice(len(dataset), size=min(n_samples, len(dataset)), replace=False) |
| n_ok = 0 |
|
|
| for i, idx in enumerate(indices): |
| idx = int(idx) |
| sample = dataset[idx] |
| seq_id = sample["sequence_id"] |
| ctx_rgb = sample["context_rgb"] |
| tgt_rgb = sample["target_rgb"] |
|
|
| T_past = dataset.n_past |
| T_target = dataset.n_past + dataset.n_future |
| V_ctx = dataset.n_context_views |
| V_tgt = dataset.n_target_views |
|
|
| |
| errors = [] |
| if ctx_rgb.shape[0] != T_past: |
| errors.append(f"context_rgb T={ctx_rgb.shape[0]}, expected {T_past}") |
| if ctx_rgb.shape[1] != V_ctx: |
| errors.append(f"context_rgb V={ctx_rgb.shape[1]}, expected {V_ctx}") |
| if tgt_rgb.shape[0] != T_target: |
| errors.append(f"target_rgb T={tgt_rgb.shape[0]}, expected {T_target}") |
| if tgt_rgb.shape[1] != V_tgt: |
| errors.append(f"target_rgb V={tgt_rgb.shape[1]}, expected {V_tgt}") |
| if ctx_rgb.dtype != tgt_rgb.dtype: |
| errors.append(f"dtype mismatch: ctx={ctx_rgb.dtype}, tgt={tgt_rgb.dtype}") |
| if sample["context_cameras"].shape != (V_ctx, 3, 4): |
| errors.append(f"context_cameras shape {sample['context_cameras'].shape}") |
| if sample["target_cameras"].shape != (V_tgt, 3, 4): |
| errors.append(f"target_cameras shape {sample['target_cameras'].shape}") |
| if sample["context_intrinsics"].shape != (V_ctx, 3, 3): |
| errors.append(f"context_intrinsics shape {sample['context_intrinsics'].shape}") |
| if sample["target_intrinsics"].shape != (V_tgt, 3, 3): |
| errors.append(f"target_intrinsics shape {sample['target_intrinsics'].shape}") |
| if len(sample["frame_indices"]) != T_target: |
| errors.append(f"frame_indices len={len(sample['frame_indices'])}, expected {T_target}") |
|
|
| status = "OK" if not errors else "FAIL" |
| if not errors: |
| n_ok += 1 |
| H, W = ctx_rgb.shape[2], ctx_rgb.shape[3] |
| print(f"\n [{i+1}/{n_samples}] idx={idx} seq={seq_id} [{status}]") |
| print(f" context_rgb: {tuple(ctx_rgb.shape)} dtype={ctx_rgb.dtype}") |
| print(f" target_rgb: {tuple(tgt_rgb.shape)} dtype={tgt_rgb.dtype}") |
| print(f" resolution: {H}x{W}") |
| print(f" cameras: ctx={sample['context_camera_ids']} tgt={sample['target_camera_ids']}") |
| print(f" frames: {sample['frame_indices'].tolist()}") |
| if errors: |
| for e in errors: |
| print(f" ERROR: {e}") |
|
|
| print(f"\n Passed: {n_ok}/{n_samples}") |
| print(f"{'='*70}") |
| return n_ok == n_samples |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Analyze TACO dataset and generate plots") |
| parser.add_argument( |
| "--csv", type=Path, |
| default=Path(__file__).resolve().parent.parent.parent / "taco_info.csv", |
| help="Path to taco_info.csv", |
| ) |
| parser.add_argument( |
| "--root-dir", type=Path, |
| default=Path(__file__).resolve().parent.parent, |
| help="Root directory of the TACO dataset", |
| ) |
| parser.add_argument( |
| "--output-dir", type=Path, |
| default=Path(__file__).resolve().parent / "plots", |
| help="Directory to save plots", |
| ) |
| parser.add_argument("--n-context-views", type=int, default=4) |
| parser.add_argument("--n-target-views", type=int, default=2) |
| parser.add_argument("--n-past", type=int, default=3) |
| parser.add_argument("--n-future", type=int, default=1) |
| parser.add_argument("--resolution", type=int, nargs=2, default=[360, 640], |
| metavar=("H", "W"), help="Resize frames to HxW") |
| parser.add_argument("--n-validate", type=int, default=5, |
| help="Number of samples to load for validation (0 to skip)") |
| args = parser.parse_args() |
|
|
| |
| dataset = TACODataset( |
| csv_path=args.csv, |
| root_dir=args.root_dir, |
| view_sampler=RandomViewSampler(), |
| n_context_views=args.n_context_views, |
| n_target_views=args.n_target_views, |
| n_past=args.n_past, |
| n_future=args.n_future, |
| resolution=tuple(args.resolution), |
| seed=0, |
| ) |
| df = dataset.meta |
| out_dir = args.output_dir |
| out_dir.mkdir(parents=True, exist_ok=True) |
|
|
| print(f"Dataset: {dataset}") |
| print(f"Metadata: {len(df)} sequences from {args.csv}") |
| print(f"Saving plots to {out_dir}/") |
|
|
| print_summary(df) |
|
|
| |
| if args.n_validate > 0: |
| validate_samples(dataset, n_samples=args.n_validate) |
|
|
| |
| print("\nGenerating plots...") |
| plot_action_distribution(df, out_dir) |
| print(" action_distribution.png") |
| plot_tool_distribution(df, out_dir) |
| print(" tool_distribution.png") |
| plot_object_distribution(df, out_dir) |
| print(" object_distribution.png") |
| plot_triplet_heatmap(df, out_dir) |
| print(" action_object_heatmap.png") |
| plot_frame_count_distribution(df, out_dir) |
| print(" frame_count_distribution.png") |
| plot_duration_distribution(df, out_dir) |
| print(" duration_distribution.png") |
| plot_duration_by_action(df, out_dir) |
| print(" duration_by_action.png") |
| plot_modality_completeness(df, out_dir) |
| print(" modality_completeness.png") |
| plot_sequences_per_triplet(df, out_dir) |
| print(" sequences_per_triplet.png") |
| plot_date_timeline(df, out_dir) |
| print(" collection_timeline.png") |
| plot_frames_by_action(df, out_dir) |
| print(" frames_by_action.png") |
|
|
| print(f"\nDone! 11 plots saved to {out_dir}/") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|