| |
| """Analyze camera extrinsics across the full TACO dataset. |
| |
| Loads all calibration.json files, computes per-camera statistics |
| (position, forward direction, focal length, distortion), flags outliers, |
| and generates diagnostic plots. |
| |
| Usage: |
| python analyze_extrinsics.py [--csv PATH] [--root-dir PATH] [--output-dir PATH] |
| """ |
|
|
| import argparse |
| import json |
| import sys |
| import warnings |
| from collections import defaultdict |
| from pathlib import Path |
|
|
| import matplotlib |
| matplotlib.use("Agg") |
| import matplotlib.pyplot as plt |
| import numpy as np |
| import pandas as pd |
|
|
| |
| |
| |
| |
| SCRIPT_DIR = Path(__file__).resolve().parent |
| DATASET_ROOT = SCRIPT_DIR.parent.parent |
|
|
| sys.path.insert(0, str(SCRIPT_DIR)) |
| sys.path.insert(0, str(DATASET_ROOT)) |
|
|
| from taco_dataset_loader import TACODataset |
| from view_sampler import RandomViewSampler |
|
|
| |
| |
| |
| ALL_CAM_IDS = [ |
| "21218078", "22070938", "22139905", "22139906", "22139908", "22139909", |
| "22139910", "22139911", "22139913", "22139914", "22139916", "22139946", |
| ] |
| BROKEN_CAMS = {"22070938", "22139916", "22139909", "22139905", "22139914"} |
| GOOD_CAMS = set(ALL_CAM_IDS) - BROKEN_CAMS |
|
|
| SIGMA_THRESHOLD = 3 |
|
|
| FIGSIZE = (14, 8) |
| DPI = 150 |
|
|
|
|
| |
| |
| |
|
|
| def collect_calibration_data(dataset: TACODataset): |
| """Iterate over all sequences and collect per-camera calibration data. |
| |
| Returns a dict: |
| cam_id -> { |
| "centers": list of (3,) arrays -- camera center in world |
| "forwards": list of (3,) arrays -- camera forward in world |
| "fx": list of float |
| "fy": list of float |
| "imgSize": list of (w, h) |
| "distCoeff": list of (5,) arrays |
| "R": list of (3,3) arrays |
| "T": list of (3,) arrays |
| "seq_ids": list of str |
| } |
| """ |
| data = {cid: defaultdict(list) for cid in ALL_CAM_IDS} |
|
|
| n_seq = dataset.num_sequences |
| n_missing = 0 |
| n_partial = 0 |
|
|
| for seq_idx in range(n_seq): |
| try: |
| calib = dataset.load_calibration(seq_idx) |
| except Exception as e: |
| n_missing += 1 |
| continue |
|
|
| if not calib: |
| n_missing += 1 |
| continue |
|
|
| seq_id = dataset.meta.iloc[seq_idx]["sequence_id"] |
|
|
| for cam_id in ALL_CAM_IDS: |
| cam = calib.get(cam_id) |
| if cam is None: |
| n_partial += 1 |
| continue |
|
|
| K = np.array(cam["K"], dtype=np.float64).reshape(3, 3) |
| R = np.array(cam["R"], dtype=np.float64).reshape(3, 3) |
| T = np.array(cam["T"], dtype=np.float64).reshape(3,) |
| dist = np.array(cam["distCoeff"], dtype=np.float64) |
| img_size = cam["imgSize"] |
|
|
| |
| center = -R.T @ T |
| |
| forward = R.T @ np.array([0.0, 0.0, 1.0]) |
| forward = forward / (np.linalg.norm(forward) + 1e-12) |
|
|
| fx = K[0, 0] |
| fy = K[1, 1] |
|
|
| d = data[cam_id] |
| d["centers"].append(center) |
| d["forwards"].append(forward) |
| d["fx"].append(fx) |
| d["fy"].append(fy) |
| d["imgSize"].append(tuple(img_size)) |
| d["distCoeff"].append(dist) |
| d["R"].append(R) |
| d["T"].append(T) |
| d["seq_ids"].append(seq_id) |
|
|
| if (seq_idx + 1) % 200 == 0 or seq_idx == n_seq - 1: |
| print(f" Loaded {seq_idx + 1}/{n_seq} calibrations...") |
|
|
| print(f" Missing calibrations: {n_missing}, partial cameras: {n_partial}") |
| return data |
|
|
|
|
| |
| |
| |
|
|
| def compute_per_camera_stats(data): |
| """Compute per-camera summary statistics. |
| |
| Returns a list of dicts (one per camera), and a summary DataFrame. |
| """ |
| rows = [] |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| n = len(d["centers"]) |
| if n == 0: |
| continue |
|
|
| centers = np.array(d["centers"]) |
| forwards = np.array(d["forwards"]) |
| fxs = np.array(d["fx"]) |
| fys = np.array(d["fy"]) |
| dists = np.array(d["distCoeff"]) |
|
|
| row = { |
| "cam_id": cam_id, |
| "is_broken": cam_id in BROKEN_CAMS, |
| "n_sequences": n, |
| |
| "imgSize": d["imgSize"][0], |
| } |
|
|
| |
| for axis, label in enumerate(["x", "y", "z"]): |
| vals = centers[:, axis] |
| row[f"pos_{label}_mean"] = vals.mean() |
| row[f"pos_{label}_std"] = vals.std() |
| row[f"pos_{label}_min"] = vals.min() |
| row[f"pos_{label}_max"] = vals.max() |
|
|
| |
| for axis, label in enumerate(["x", "y", "z"]): |
| vals = forwards[:, axis] |
| row[f"fwd_{label}_mean"] = vals.mean() |
| row[f"fwd_{label}_std"] = vals.std() |
|
|
| |
| row["fx_mean"] = fxs.mean() |
| row["fx_std"] = fxs.std() |
| row["fy_mean"] = fys.mean() |
| row["fy_std"] = fys.std() |
|
|
| |
| widths = np.array([s[0] for s in d["imgSize"]]) |
| heights = np.array([s[1] for s in d["imgSize"]]) |
| row["fx_norm_mean"] = (fxs / widths).mean() |
| row["fx_norm_std"] = (fxs / widths).std() |
| row["fy_norm_mean"] = (fys / heights).mean() |
| row["fy_norm_std"] = (fys / heights).std() |
|
|
| |
| for i in range(5): |
| vals = dists[:, i] |
| row[f"dist_{i}_mean"] = vals.mean() |
| row[f"dist_{i}_std"] = vals.std() |
|
|
| |
| Rs = np.array(d["R"]) |
| dets = np.linalg.det(Rs) |
| row["R_det_mean"] = dets.mean() |
| row["R_det_std"] = dets.std() |
| row["R_det_min"] = dets.min() |
| row["R_det_max"] = dets.max() |
|
|
| rows.append(row) |
|
|
| return pd.DataFrame(rows) |
|
|
|
|
| |
| |
| |
|
|
| def detect_outliers(data, sigma=SIGMA_THRESHOLD): |
| """Flag per-sequence outliers for each camera. |
| |
| Returns a dict: cam_id -> list of (seq_id, reason) tuples. |
| """ |
| outliers = defaultdict(list) |
|
|
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| n = len(d["centers"]) |
| if n < 10: |
| continue |
|
|
| centers = np.array(d["centers"]) |
| forwards = np.array(d["forwards"]) |
| fxs = np.array(d["fx"]) |
| fys = np.array(d["fy"]) |
|
|
| |
| for axis, label in enumerate(["x", "y", "z"]): |
| vals = centers[:, axis] |
| mean, std = vals.mean(), vals.std() |
| if std < 1e-8: |
| continue |
| mask = np.abs(vals - mean) > sigma * std |
| for idx in np.where(mask)[0]: |
| outliers[cam_id].append(( |
| d["seq_ids"][idx], |
| f"position_{label}: {vals[idx]:.4f} (mean={mean:.4f}, std={std:.4f}, " |
| f"dev={abs(vals[idx]-mean)/std:.1f}sigma)" |
| )) |
|
|
| |
| mean_fwd = forwards.mean(axis=0) |
| mean_fwd = mean_fwd / (np.linalg.norm(mean_fwd) + 1e-12) |
| |
| dots = np.clip(forwards @ mean_fwd, -1.0, 1.0) |
| angles_deg = np.degrees(np.arccos(dots)) |
| angle_mean = angles_deg.mean() |
| angle_std = angles_deg.std() |
| if angle_std > 1e-8: |
| mask = np.abs(angles_deg - angle_mean) > sigma * angle_std |
| for idx in np.where(mask)[0]: |
| outliers[cam_id].append(( |
| d["seq_ids"][idx], |
| f"forward_angle: {angles_deg[idx]:.2f}deg " |
| f"(mean={angle_mean:.2f}, std={angle_std:.2f}, " |
| f"dev={abs(angles_deg[idx]-angle_mean)/angle_std:.1f}sigma)" |
| )) |
|
|
| |
| for name, vals in [("fx", fxs), ("fy", fys)]: |
| mean, std = vals.mean(), vals.std() |
| if std < 1e-8: |
| continue |
| mask = np.abs(vals - mean) > sigma * std |
| for idx in np.where(mask)[0]: |
| outliers[cam_id].append(( |
| d["seq_ids"][idx], |
| f"{name}: {vals[idx]:.1f} (mean={mean:.1f}, std={std:.1f}, " |
| f"dev={abs(vals[idx]-mean)/std:.1f}sigma)" |
| )) |
|
|
| return outliers |
|
|
|
|
| |
| |
| |
|
|
| def cross_camera_analysis(data, stats_df): |
| """Compare broken vs good cameras systematically.""" |
| print("\n" + "=" * 80) |
| print("CROSS-CAMERA ANALYSIS: BROKEN vs GOOD CAMERAS") |
| print("=" * 80) |
|
|
| broken_ids = sorted(BROKEN_CAMS) |
| good_ids = sorted(GOOD_CAMS) |
|
|
| print(f"\nBroken cameras ({len(broken_ids)}): {broken_ids}") |
| print(f"Good cameras ({len(good_ids)}): {good_ids}") |
|
|
| |
| print("\n--- Image Resolution ---") |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if d["imgSize"]: |
| sizes = set(d["imgSize"]) |
| tag = " [BROKEN]" if cam_id in BROKEN_CAMS else " [GOOD]" |
| print(f" {cam_id}{tag}: {sizes}") |
|
|
| |
| print("\n--- Focal Length (normalized by image width: fx/w) ---") |
| broken_fx_norm = [] |
| good_fx_norm = [] |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["fx"]: |
| continue |
| fxs = np.array(d["fx"]) |
| widths = np.array([s[0] for s in d["imgSize"]]) |
| fx_norm = fxs / widths |
| tag = " [BROKEN]" if cam_id in BROKEN_CAMS else " [GOOD]" |
| print(f" {cam_id}{tag}: fx/w = {fx_norm.mean():.4f} +/- {fx_norm.std():.4f} " |
| f"(fx_mean={fxs.mean():.1f}, w={widths[0]})") |
| if cam_id in BROKEN_CAMS: |
| broken_fx_norm.extend(fx_norm.tolist()) |
| else: |
| good_fx_norm.extend(fx_norm.tolist()) |
|
|
| broken_fx_norm = np.array(broken_fx_norm) |
| good_fx_norm = np.array(good_fx_norm) |
| print(f"\n Broken group fx/w: {broken_fx_norm.mean():.4f} +/- {broken_fx_norm.std():.4f}") |
| print(f" Good group fx/w: {good_fx_norm.mean():.4f} +/- {good_fx_norm.std():.4f}") |
|
|
| |
| print("\n--- Camera Center Distance from Scene Center ---") |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["centers"]: |
| continue |
| centers = np.array(d["centers"]) |
| dists = np.linalg.norm(centers, axis=1) |
| tag = " [BROKEN]" if cam_id in BROKEN_CAMS else " [GOOD]" |
| print(f" {cam_id}{tag}: dist = {dists.mean():.4f} +/- {dists.std():.4f} " |
| f"center_mean=({centers.mean(0)[0]:.3f}, {centers.mean(0)[1]:.3f}, {centers.mean(0)[2]:.3f})") |
|
|
| |
| print("\n--- Forward Direction (mean across sequences) ---") |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["forwards"]: |
| continue |
| forwards = np.array(d["forwards"]) |
| mean_fwd = forwards.mean(axis=0) |
| mean_fwd_norm = mean_fwd / (np.linalg.norm(mean_fwd) + 1e-12) |
| |
| dots = np.clip(forwards @ mean_fwd_norm, -1.0, 1.0) |
| angles = np.degrees(np.arccos(dots)) |
| tag = " [BROKEN]" if cam_id in BROKEN_CAMS else " [GOOD]" |
| print(f" {cam_id}{tag}: fwd=({mean_fwd_norm[0]:+.4f}, {mean_fwd_norm[1]:+.4f}, {mean_fwd_norm[2]:+.4f}) " |
| f"spread={angles.mean():.2f}+/-{angles.std():.2f} deg") |
|
|
| |
| print("\n--- Distortion Coefficients (mean across sequences) ---") |
| print(f" {'cam_id':<12} {'tag':<10} {'k1':>10} {'k2':>10} {'p1':>10} {'p2':>10} {'k3':>12}") |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["distCoeff"]: |
| continue |
| dists = np.array(d["distCoeff"]) |
| means = dists.mean(axis=0) |
| stds = dists.std(axis=0) |
| tag = "BROKEN" if cam_id in BROKEN_CAMS else "GOOD" |
| print(f" {cam_id:<12} [{tag:<6}] " |
| f"{means[0]:>10.4f} {means[1]:>10.4f} {means[2]:>10.6f} " |
| f"{means[3]:>10.6f} {means[4]:>12.4f}") |
|
|
| |
| print("\n--- Rotation Matrix Quality (det should be +1.0) ---") |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["R"]: |
| continue |
| Rs = np.array(d["R"]) |
| dets = np.linalg.det(Rs) |
| |
| orth_errs = [] |
| for R in Rs: |
| err = np.linalg.norm(R @ R.T - np.eye(3)) |
| orth_errs.append(err) |
| orth_errs = np.array(orth_errs) |
| tag = " [BROKEN]" if cam_id in BROKEN_CAMS else " [GOOD]" |
| print(f" {cam_id}{tag}: det={dets.mean():.6f}+/-{dets.std():.2e} " |
| f"orth_err={orth_errs.mean():.2e}+/-{orth_errs.std():.2e}") |
|
|
| |
| print("\n--- Calibration Consistency (how much does each camera vary across sequences?) ---") |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["centers"]: |
| continue |
| centers = np.array(d["centers"]) |
| pos_spread = centers.std(axis=0) |
| fxs = np.array(d["fx"]) |
| tag = " [BROKEN]" if cam_id in BROKEN_CAMS else " [GOOD]" |
| print(f" {cam_id}{tag}: pos_std=({pos_spread[0]:.5f}, {pos_spread[1]:.5f}, {pos_spread[2]:.5f}) " |
| f"fx_std={fxs.std():.2f} fx_cv={fxs.std()/fxs.mean()*100:.2f}%") |
|
|
|
|
| |
| |
| |
|
|
| def make_plots(data, stats_df, outliers, out_dir: Path): |
| """Generate all diagnostic plots.""" |
| out_dir.mkdir(parents=True, exist_ok=True) |
|
|
| |
| broken_colors = plt.cm.Reds(np.linspace(0.4, 0.9, len(BROKEN_CAMS))) |
| good_colors = plt.cm.Blues(np.linspace(0.3, 0.9, len(GOOD_CAMS))) |
| cam_colors = {} |
| bi, gi = 0, 0 |
| for cid in ALL_CAM_IDS: |
| if cid in BROKEN_CAMS: |
| cam_colors[cid] = broken_colors[bi] |
| bi += 1 |
| else: |
| cam_colors[cid] = good_colors[gi] |
| gi += 1 |
|
|
| |
| fig = plt.figure(figsize=(16, 12)) |
| ax = fig.add_subplot(111, projection="3d") |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["centers"]: |
| continue |
| centers = np.array(d["centers"]) |
| label = f"{cam_id}" + (" *" if cam_id in BROKEN_CAMS else "") |
| marker = "x" if cam_id in BROKEN_CAMS else "o" |
| alpha = 0.15 |
| ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], |
| c=[cam_colors[cam_id]], label=label, alpha=alpha, s=8, marker=marker) |
| |
| mean_c = centers.mean(axis=0) |
| ax.scatter(*mean_c, c=[cam_colors[cam_id]], s=120, marker="D", edgecolors="black", linewidths=1.5) |
| ax.set_xlabel("X") |
| ax.set_ylabel("Y") |
| ax.set_zlabel("Z") |
| ax.set_title("Camera Positions Across All Sequences\n(diamonds = mean, * = broken cameras)") |
| ax.legend(fontsize=7, ncol=2, loc="upper left") |
| plt.tight_layout() |
| fig.savefig(out_dir / "camera_positions_3d.png", dpi=DPI) |
| plt.close(fig) |
| print(f" Saved camera_positions_3d.png") |
|
|
| |
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["centers"]: |
| continue |
| centers = np.array(d["centers"]) |
| label = f"{cam_id}" + (" [BROKEN]" if cam_id in BROKEN_CAMS else "") |
| marker = "x" if cam_id in BROKEN_CAMS else "o" |
| ax.scatter(centers[:, 0], centers[:, 2], c=[cam_colors[cam_id]], |
| label=label, alpha=0.1, s=6, marker=marker) |
| mean_c = centers.mean(axis=0) |
| ax.scatter(mean_c[0], mean_c[2], c=[cam_colors[cam_id]], |
| s=100, marker="D", edgecolors="black", linewidths=1.5, zorder=5) |
| |
| forwards = np.array(d["forwards"]) |
| mean_fwd = forwards.mean(axis=0) |
| mean_fwd = mean_fwd / (np.linalg.norm(mean_fwd) + 1e-12) |
| arrow_scale = 0.15 |
| ax.annotate("", xy=(mean_c[0] + arrow_scale * mean_fwd[0], mean_c[2] + arrow_scale * mean_fwd[2]), |
| xytext=(mean_c[0], mean_c[2]), |
| arrowprops=dict(arrowstyle="->", color=cam_colors[cam_id], lw=2)) |
| ax.set_xlabel("X (world)") |
| ax.set_ylabel("Z (world)") |
| ax.set_title("Camera Positions - Top Down (XZ plane)\n(arrows = mean forward direction)") |
| ax.legend(fontsize=7, ncol=2, bbox_to_anchor=(1.02, 1), loc="upper left") |
| ax.set_aspect("equal") |
| plt.tight_layout() |
| fig.savefig(out_dir / "camera_positions_topdown.png", dpi=DPI, bbox_inches="tight") |
| plt.close(fig) |
| print(f" Saved camera_positions_topdown.png") |
|
|
| |
| fig, axes = plt.subplots(1, 3, figsize=(18, 6)) |
| for axis, label in enumerate(["X", "Y", "Z"]): |
| ax = axes[axis] |
| positions = [] |
| labels = [] |
| colors_list = [] |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["centers"]: |
| continue |
| centers = np.array(d["centers"]) |
| positions.append(centers[:, axis]) |
| tag = f"{cam_id}\n{'[B]' if cam_id in BROKEN_CAMS else '[G]'}" |
| labels.append(tag) |
| colors_list.append(cam_colors[cam_id]) |
|
|
| bp = ax.boxplot(positions, labels=labels, patch_artist=True, showfliers=True, |
| flierprops=dict(marker=".", markersize=2, alpha=0.3)) |
| for patch, color in zip(bp["boxes"], colors_list): |
| patch.set_facecolor(color) |
| patch.set_alpha(0.6) |
| ax.set_ylabel(f"Position {label}") |
| ax.set_title(f"Camera Position {label}") |
| ax.tick_params(axis="x", rotation=45, labelsize=7) |
|
|
| plt.suptitle("Camera Position Distribution per Camera ID\n([B]=broken, [G]=good)", fontsize=13) |
| plt.tight_layout() |
| fig.savefig(out_dir / "camera_position_boxplots.png", dpi=DPI) |
| plt.close(fig) |
| print(f" Saved camera_position_boxplots.png") |
|
|
| |
| fig, axes = plt.subplots(2, 1, figsize=(16, 10)) |
|
|
| |
| ax = axes[0] |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["fx"]: |
| continue |
| fxs = np.array(d["fx"]) |
| tag = " [B]" if cam_id in BROKEN_CAMS else " [G]" |
| ax.hist(fxs, bins=50, alpha=0.5, label=f"{cam_id}{tag}", color=cam_colors[cam_id]) |
| ax.set_xlabel("Focal Length fx (pixels)") |
| ax.set_ylabel("Count") |
| ax.set_title("Focal Length (fx) Distribution per Camera") |
| ax.legend(fontsize=7, ncol=3) |
|
|
| |
| ax = axes[1] |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["fx"]: |
| continue |
| fxs = np.array(d["fx"]) |
| widths = np.array([s[0] for s in d["imgSize"]]) |
| fx_norm = fxs / widths |
| tag = " [B]" if cam_id in BROKEN_CAMS else " [G]" |
| ax.hist(fx_norm, bins=50, alpha=0.5, label=f"{cam_id}{tag}", color=cam_colors[cam_id]) |
| ax.set_xlabel("Normalized Focal Length (fx / image_width)") |
| ax.set_ylabel("Count") |
| ax.set_title("Normalized Focal Length Distribution per Camera\n(should cluster if cameras have same FOV)") |
| ax.legend(fontsize=7, ncol=3) |
|
|
| plt.tight_layout() |
| fig.savefig(out_dir / "focal_length_histograms.png", dpi=DPI) |
| plt.close(fig) |
| print(f" Saved focal_length_histograms.png") |
|
|
| |
| fig, axes = plt.subplots(2, 2, figsize=(16, 12)) |
|
|
| |
| ax = axes[0, 0] |
| fx_norms = [] |
| cam_labels = [] |
| bar_colors = [] |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["fx"]: |
| continue |
| fxs = np.array(d["fx"]) |
| widths = np.array([s[0] for s in d["imgSize"]]) |
| fx_norms.append((fxs / widths).mean()) |
| cam_labels.append(cam_id) |
| bar_colors.append("red" if cam_id in BROKEN_CAMS else "steelblue") |
| ax.bar(range(len(fx_norms)), fx_norms, color=bar_colors, edgecolor="black", linewidth=0.5) |
| ax.set_xticks(range(len(cam_labels))) |
| ax.set_xticklabels(cam_labels, rotation=45, fontsize=8) |
| ax.set_ylabel("fx / image_width") |
| ax.set_title("Normalized Focal Length per Camera\n(red=broken, blue=good)") |
| ax.axhline(np.mean([fx_norms[i] for i, c in enumerate(cam_labels) if c in GOOD_CAMS]), |
| color="blue", linestyle="--", alpha=0.5, label="good mean") |
| ax.legend(fontsize=8) |
|
|
| |
| ax = axes[0, 1] |
| mean_dists = [] |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["centers"]: |
| mean_dists.append(0) |
| continue |
| centers = np.array(d["centers"]) |
| mean_dists.append(np.linalg.norm(centers.mean(axis=0))) |
| ax.bar(range(len(mean_dists)), mean_dists, color=bar_colors, edgecolor="black", linewidth=0.5) |
| ax.set_xticks(range(len(cam_labels))) |
| ax.set_xticklabels(cam_labels, rotation=45, fontsize=8) |
| ax.set_ylabel("Distance from origin") |
| ax.set_title("Mean Camera Distance from Origin\n(red=broken, blue=good)") |
|
|
| |
| ax = axes[1, 0] |
| pos_stds = [] |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["centers"]: |
| pos_stds.append(0) |
| continue |
| centers = np.array(d["centers"]) |
| pos_stds.append(centers.std(axis=0).mean()) |
| ax.bar(range(len(pos_stds)), pos_stds, color=bar_colors, edgecolor="black", linewidth=0.5) |
| ax.set_xticks(range(len(cam_labels))) |
| ax.set_xticklabels(cam_labels, rotation=45, fontsize=8) |
| ax.set_ylabel("Mean position std (across x,y,z)") |
| ax.set_title("Camera Position Variability\n(lower = more stable across sequences)") |
|
|
| |
| ax = axes[1, 1] |
| fx_cvs = [] |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["fx"]: |
| fx_cvs.append(0) |
| continue |
| fxs = np.array(d["fx"]) |
| fx_cvs.append(fxs.std() / fxs.mean() * 100) |
| ax.bar(range(len(fx_cvs)), fx_cvs, color=bar_colors, edgecolor="black", linewidth=0.5) |
| ax.set_xticks(range(len(cam_labels))) |
| ax.set_xticklabels(cam_labels, rotation=45, fontsize=8) |
| ax.set_ylabel("Focal length CV (%)") |
| ax.set_title("Focal Length Coefficient of Variation\n(higher = more variable)") |
|
|
| plt.suptitle("Broken (red) vs Good (blue) Camera Comparison", fontsize=14) |
| plt.tight_layout() |
| fig.savefig(out_dir / "broken_vs_good_comparison.png", dpi=DPI) |
| plt.close(fig) |
| print(f" Saved broken_vs_good_comparison.png") |
|
|
| |
| fig, axes = plt.subplots(1, 5, figsize=(22, 5)) |
| dist_names = ["k1", "k2", "p1", "p2", "k3"] |
| for di in range(5): |
| ax = axes[di] |
| vals = [] |
| colors_list = [] |
| labels = [] |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["distCoeff"]: |
| continue |
| dists = np.array(d["distCoeff"])[:, di] |
| vals.append(dists) |
| colors_list.append("red" if cam_id in BROKEN_CAMS else "steelblue") |
| labels.append(cam_id) |
| bp = ax.boxplot(vals, labels=labels, patch_artist=True, showfliers=False) |
| for patch, color in zip(bp["boxes"], colors_list): |
| patch.set_facecolor(color) |
| patch.set_alpha(0.6) |
| ax.set_title(f"{dist_names[di]}") |
| ax.tick_params(axis="x", rotation=90, labelsize=7) |
|
|
| plt.suptitle("Distortion Coefficients per Camera (red=broken, blue=good)", fontsize=13) |
| plt.tight_layout() |
| fig.savefig(out_dir / "distortion_coefficients.png", dpi=DPI) |
| plt.close(fig) |
| print(f" Saved distortion_coefficients.png") |
|
|
| |
| fig, ax = plt.subplots(figsize=(10, 10)) |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["forwards"]: |
| continue |
| forwards = np.array(d["forwards"]) |
| mean_fwd = forwards.mean(axis=0) |
| mean_fwd = mean_fwd / (np.linalg.norm(mean_fwd) + 1e-12) |
| tag = " [B]" if cam_id in BROKEN_CAMS else " [G]" |
| |
| marker = "x" if cam_id in BROKEN_CAMS else "o" |
| ax.scatter(forwards[:, 0], forwards[:, 2], c=[cam_colors[cam_id]], |
| alpha=0.05, s=4, marker=marker) |
| ax.scatter(mean_fwd[0], mean_fwd[2], c=[cam_colors[cam_id]], |
| s=80, marker="D", edgecolors="black", linewidths=1.5, |
| label=f"{cam_id}{tag}", zorder=5) |
| ax.annotate(cam_id, (mean_fwd[0], mean_fwd[2]), fontsize=6, ha="center", va="bottom") |
| ax.set_xlabel("Forward X") |
| ax.set_ylabel("Forward Z") |
| ax.set_title("Camera Forward Directions (XZ plane)\n(diamonds = mean, scatter = per-sequence)") |
| ax.set_aspect("equal") |
| ax.legend(fontsize=6, ncol=2, bbox_to_anchor=(1.02, 1), loc="upper left") |
| plt.tight_layout() |
| fig.savefig(out_dir / "forward_directions.png", dpi=DPI, bbox_inches="tight") |
| plt.close(fig) |
| print(f" Saved forward_directions.png") |
|
|
| |
| fig, ax = plt.subplots(figsize=FIGSIZE) |
| outlier_counts = [] |
| for cam_id in ALL_CAM_IDS: |
| |
| unique_seqs = len(set(s for s, _ in outliers.get(cam_id, []))) |
| outlier_counts.append(unique_seqs) |
| ax.bar(range(len(ALL_CAM_IDS)), outlier_counts, color=bar_colors, edgecolor="black", linewidth=0.5) |
| ax.set_xticks(range(len(ALL_CAM_IDS))) |
| ax.set_xticklabels(ALL_CAM_IDS, rotation=45, fontsize=8) |
| ax.set_ylabel(f"Number of outlier sequences (>{SIGMA_THRESHOLD}sigma)") |
| ax.set_title(f"Outlier Sequences per Camera (>{SIGMA_THRESHOLD}sigma threshold)") |
| for i, v in enumerate(outlier_counts): |
| if v > 0: |
| ax.text(i, v + 0.5, str(v), ha="center", fontsize=8) |
| plt.tight_layout() |
| fig.savefig(out_dir / "outlier_counts.png", dpi=DPI) |
| plt.close(fig) |
| print(f" Saved outlier_counts.png") |
|
|
| |
| fig, ax = plt.subplots(figsize=(16, 6)) |
| all_fx_data = [] |
| all_labels = [] |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["fx"]: |
| continue |
| fxs = np.array(d["fx"]) |
| widths = np.array([s[0] for s in d["imgSize"]]) |
| fx_norm = fxs / widths |
| all_fx_data.append(fx_norm) |
| all_labels.append(cam_id) |
|
|
| parts = ax.violinplot(all_fx_data, showmeans=True, showmedians=True) |
| for i, (body, cam_id) in enumerate(zip(parts["bodies"], all_labels)): |
| body.set_facecolor("red" if cam_id in BROKEN_CAMS else "steelblue") |
| body.set_alpha(0.6) |
| ax.set_xticks(range(1, len(all_labels) + 1)) |
| ax.set_xticklabels(all_labels, rotation=45, fontsize=8) |
| ax.set_ylabel("Normalized focal length (fx / image_width)") |
| ax.set_title("Normalized Focal Length Distribution (violin)\nred=broken, blue=good") |
| plt.tight_layout() |
| fig.savefig(out_dir / "focal_length_violin.png", dpi=DPI) |
| plt.close(fig) |
| print(f" Saved focal_length_violin.png") |
|
|
|
|
| |
| |
| |
|
|
| def print_summary(data, stats_df, outliers): |
| """Print a comprehensive summary of findings.""" |
| print("\n" + "=" * 80) |
| print("EXTRINSICS ANALYSIS SUMMARY") |
| print("=" * 80) |
|
|
| print(f"\nTotal cameras analyzed: {len(ALL_CAM_IDS)}") |
| print(f"Broken cameras (mesh projection issues): {sorted(BROKEN_CAMS)}") |
| print(f"Good cameras: {sorted(GOOD_CAMS)}") |
|
|
| |
| print(f"\n{'='*80}") |
| print("PER-CAMERA STATISTICS") |
| print(f"{'='*80}") |
| print(f"\n{'cam_id':<12} {'broken':<8} {'n_seq':<8} {'imgSize':<14} " |
| f"{'fx_mean':>10} {'fx/w':>8} {'fy_mean':>10} " |
| f"{'pos_x_std':>10} {'pos_y_std':>10} {'pos_z_std':>10}") |
| print("-" * 120) |
| for _, row in stats_df.iterrows(): |
| print(f"{row['cam_id']:<12} {'YES' if row['is_broken'] else 'no':<8} " |
| f"{row['n_sequences']:<8} {str(row['imgSize']):<14} " |
| f"{row['fx_mean']:>10.1f} {row['fx_norm_mean']:>8.4f} {row['fy_mean']:>10.1f} " |
| f"{row['pos_x_std']:>10.5f} {row['pos_y_std']:>10.5f} {row['pos_z_std']:>10.5f}") |
|
|
| |
| print(f"\n{'='*80}") |
| print(f"OUTLIER DETECTION (>{SIGMA_THRESHOLD}-sigma threshold)") |
| print(f"{'='*80}") |
| total_outlier_seqs = 0 |
| for cam_id in ALL_CAM_IDS: |
| cam_outliers = outliers.get(cam_id, []) |
| unique_seqs = set(s for s, _ in cam_outliers) |
| n = len(unique_seqs) |
| total_outlier_seqs += n |
| tag = " [BROKEN]" if cam_id in BROKEN_CAMS else "" |
| if n > 0: |
| print(f"\n {cam_id}{tag}: {n} outlier sequences ({len(cam_outliers)} total flags)") |
| |
| shown = set() |
| for seq_id, reason in cam_outliers[:10]: |
| if seq_id not in shown: |
| print(f" - {seq_id}: {reason}") |
| shown.add(seq_id) |
| if len(cam_outliers) > 10: |
| print(f" ... and {len(cam_outliers) - 10} more") |
| else: |
| print(f"\n {cam_id}{tag}: 0 outlier sequences") |
| print(f"\n Total outlier (camera, sequence) pairs: {total_outlier_seqs}") |
|
|
| |
| print(f"\n{'='*80}") |
| print("KEY FINDINGS") |
| print(f"{'='*80}") |
|
|
| |
| broken_sizes = set() |
| good_sizes = set() |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if d["imgSize"]: |
| sizes = set(d["imgSize"]) |
| if cam_id in BROKEN_CAMS: |
| broken_sizes.update(sizes) |
| else: |
| good_sizes.update(sizes) |
|
|
| |
| fx_norm_per_cam = {} |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if d["fx"]: |
| fxs = np.array(d["fx"]) |
| widths = np.array([s[0] for s in d["imgSize"]]) |
| fx_norm_per_cam[cam_id] = (fxs / widths).mean() |
|
|
| |
| fx_values = np.array(list(fx_norm_per_cam.values())) |
| fx_cams = list(fx_norm_per_cam.keys()) |
|
|
| |
| median_fx = np.median(fx_values) |
| low_fx_cams = [c for c, v in fx_norm_per_cam.items() if v < median_fx * 0.8] |
| high_fx_cams = [c for c, v in fx_norm_per_cam.items() if v >= median_fx * 0.8] |
|
|
| print(f"\n1. IMAGE RESOLUTION:") |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if d["imgSize"]: |
| tag = " [BROKEN]" if cam_id in BROKEN_CAMS else " [GOOD]" |
| print(f" {cam_id}{tag}: {d['imgSize'][0]}") |
|
|
| print(f"\n2. FOCAL LENGTH CLUSTERING:") |
| print(f" Low normalized fx (< {median_fx*0.8:.4f}):") |
| for c in low_fx_cams: |
| tag = " [BROKEN]" if c in BROKEN_CAMS else " [GOOD]" |
| print(f" {c}{tag}: fx/w = {fx_norm_per_cam[c]:.4f}") |
| print(f" High normalized fx (>= {median_fx*0.8:.4f}):") |
| for c in high_fx_cams: |
| tag = " [BROKEN]" if c in BROKEN_CAMS else " [GOOD]" |
| print(f" {c}{tag}: fx/w = {fx_norm_per_cam[c]:.4f}") |
|
|
| |
| broken_in_low = set(low_fx_cams) & BROKEN_CAMS |
| broken_in_high = set(high_fx_cams) & BROKEN_CAMS |
|
|
| print(f"\n3. BROKEN CAMERA DISTRIBUTION IN FOCAL LENGTH GROUPS:") |
| print(f" Broken cameras in LOW fx group: {sorted(broken_in_low)}") |
| print(f" Broken cameras in HIGH fx group: {sorted(broken_in_high)}") |
|
|
| print(f"\n4. HYPOTHESIS TESTING:") |
| |
| broken_fx_norms = [fx_norm_per_cam[c] for c in BROKEN_CAMS if c in fx_norm_per_cam] |
| good_fx_norms_list = [fx_norm_per_cam[c] for c in GOOD_CAMS if c in fx_norm_per_cam] |
| if broken_fx_norms and good_fx_norms_list: |
| print(f" Broken cameras mean fx/w: {np.mean(broken_fx_norms):.4f} +/- {np.std(broken_fx_norms):.4f}") |
| print(f" Good cameras mean fx/w: {np.mean(good_fx_norms_list):.4f} +/- {np.std(good_fx_norms_list):.4f}") |
|
|
| |
| print(f"\n5. DISTORTION MAGNITUDE:") |
| for cam_id in ALL_CAM_IDS: |
| d = data[cam_id] |
| if not d["distCoeff"]: |
| continue |
| dists = np.array(d["distCoeff"]) |
| |
| dist_mag = np.linalg.norm(dists, axis=1) |
| tag = " [BROKEN]" if cam_id in BROKEN_CAMS else " [GOOD]" |
| print(f" {cam_id}{tag}: |distCoeff|_L2 = {dist_mag.mean():.2f} +/- {dist_mag.std():.2f}") |
|
|
| print(f"\n{'='*80}") |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Analyze camera extrinsics across TACO dataset") |
| parser.add_argument( |
| "--csv", type=Path, |
| default=DATASET_ROOT / "taco_info.csv", |
| help="Path to taco_info.csv", |
| ) |
| parser.add_argument( |
| "--root-dir", type=Path, |
| default=DATASET_ROOT, |
| help="Root directory of the TACO dataset", |
| ) |
| parser.add_argument( |
| "--output-dir", type=Path, |
| default=Path(__file__).resolve().parent / "plots" / "extrinsics", |
| help="Directory to save plots", |
| ) |
| args = parser.parse_args() |
|
|
| print("=" * 80) |
| print("TACO DATASET - CAMERA EXTRINSICS ANALYSIS") |
| print("=" * 80) |
| print(f"CSV: {args.csv}") |
| print(f"Root dir: {args.root_dir}") |
| print(f"Output dir: {args.output_dir}") |
|
|
| |
| |
| print("\nLoading dataset metadata...") |
| dataset = TACODataset( |
| csv_path=args.csv, |
| root_dir=args.root_dir, |
| view_sampler=RandomViewSampler(), |
| n_context_views=1, |
| n_target_views=0, |
| n_past=1, |
| n_future=0, |
| seed=0, |
| ) |
| print(f"Dataset: {dataset}") |
| print(f"Number of sequences: {dataset.num_sequences}") |
|
|
| |
| print("\nStep 1: Loading calibrations from all sequences...") |
| data = collect_calibration_data(dataset) |
|
|
| for cam_id in ALL_CAM_IDS: |
| n = len(data[cam_id]["centers"]) |
| tag = " [BROKEN]" if cam_id in BROKEN_CAMS else "" |
| print(f" {cam_id}{tag}: {n} calibrations loaded") |
|
|
| |
| print("\nStep 2: Computing per-camera statistics...") |
| stats_df = compute_per_camera_stats(data) |
|
|
| |
| print("\nStep 3: Detecting outliers...") |
| outliers = detect_outliers(data) |
|
|
| |
| cross_camera_analysis(data, stats_df) |
|
|
| |
| print("\nStep 5: Generating plots...") |
| make_plots(data, stats_df, outliers, args.output_dir) |
|
|
| |
| print_summary(data, stats_df, outliers) |
|
|
| |
| stats_csv_path = args.output_dir / "camera_stats.csv" |
| stats_df.to_csv(stats_csv_path, index=False) |
| print(f"\nCamera stats saved to: {stats_csv_path}") |
|
|
| print(f"\nAll plots saved to: {args.output_dir}/") |
| print("Done!") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|