| |
| """Visualize TACO camera setup and object meshes in 3D. |
| |
| For selected sequences, plots: |
| - 12 allocentric camera positions + frustums (colour-coded) |
| - Egocentric camera position + trajectory (if available) |
| - Object meshes at frame 0 pose (tool + target) |
| |
| Camera convention (from TACO dataset_utils): |
| calibration.json stores per-camera: |
| K: (9,) flat 3x3 intrinsic (row-major) |
| R: (9,) flat 3x3 rotation (world-to-camera) |
| T: (3,) translation (world-to-camera) |
| So: p_cam = R @ p_world + T |
| Camera center in world = -R^T @ T |
| |
| Uses TACODataset for metadata and calibration caching. |
| |
| Usage: |
| python visualize_taco_3d_scene.py \ |
| --csv ../taco_info.csv \ |
| --root ../taco_dataset \ |
| --save-dir plots/scene_3d |
| """ |
|
|
| import argparse |
| from pathlib import Path |
|
|
| import matplotlib |
| matplotlib.use("Agg") |
| import matplotlib.pyplot as plt |
| import numpy as np |
| import pandas as pd |
| import trimesh |
| from mpl_toolkits.mplot3d.art3d import Poly3DCollection |
|
|
| from taco_dataset_loader import TACODataset |
| from view_sampler import RandomViewSampler |
|
|
|
|
| |
|
|
| def camera_center_world(R, T): |
| """Camera center in world coords: c = -R^T @ T.""" |
| return -R.T @ T |
|
|
|
|
| def camera_axes_world(R, scale=0.15): |
| """Camera axis directions in world frame (right, down, forward).""" |
| |
| right = R.T @ np.array([1, 0, 0]) * scale |
| down = R.T @ np.array([0, 1, 0]) * scale |
| forward = R.T @ np.array([0, 0, 1]) * scale |
| return right, down, forward |
|
|
|
|
| def camera_frustum_world(R, T, K, w, h, scale=0.12): |
| """Return 5 points defining a camera frustum pyramid in world coords. |
| |
| Points: [center, top-left, top-right, bottom-right, bottom-left] |
| """ |
| c = camera_center_world(R, T) |
| fx, fy = K[0, 0], K[1, 1] |
| cx, cy = K[0, 2], K[1, 2] |
|
|
| |
| corners_cam = np.array([ |
| [(-cx) / fx * scale, (-cy) / fy * scale, scale], |
| [(w - cx) / fx * scale, (-cy) / fy * scale, scale], |
| [(w - cx) / fx * scale, (h - cy) / fy * scale, scale], |
| [(-cx) / fx * scale, (h - cy) / fy * scale, scale], |
| ]) |
|
|
| |
| corners_world = (R.T @ corners_cam.T).T + c |
| return c, corners_world |
|
|
|
|
| |
|
|
| def draw_frustum(ax, center, corners, color, alpha=0.15, label=None): |
| """Draw a camera frustum pyramid.""" |
| |
| for corner in corners: |
| ax.plot3D(*zip(center, corner), color=color, linewidth=1.0, alpha=0.8) |
| |
| rect = list(corners) + [corners[0]] |
| ax.plot3D(*zip(*rect), color=color, linewidth=0.8, alpha=0.6) |
| |
| faces = [[center, corners[i], corners[(i + 1) % 4]] for i in range(4)] |
| faces.append(list(corners)) |
| poly = Poly3DCollection(faces, alpha=alpha, facecolor=color, edgecolor=color, linewidth=0.3) |
| ax.add_collection3d(poly) |
| if label: |
| ax.text(*center, label, fontsize=5, color=color, ha="center") |
|
|
|
|
| def draw_mesh(ax, vertices, faces, color, alpha=0.3, max_faces=2000): |
| """Draw a mesh as a semi-transparent Poly3DCollection.""" |
| if len(faces) > max_faces: |
| |
| idx = np.random.choice(len(faces), max_faces, replace=False) |
| faces = faces[idx] |
| tris = vertices[faces] |
| poly = Poly3DCollection(tris, alpha=alpha, facecolor=color, edgecolor="none") |
| ax.add_collection3d(poly) |
|
|
|
|
| def plot_scene( |
| calib: dict, |
| root: Path, |
| row: pd.Series, |
| frame_idx: int = 0, |
| title: str = "", |
| figsize: tuple = (16, 12), |
| ) -> plt.Figure: |
| """Create a 3D scene plot with cameras and objects.""" |
|
|
| fig = plt.figure(figsize=figsize) |
| ax = fig.add_subplot(111, projection="3d") |
|
|
| |
| cmap = plt.cm.tab20(np.linspace(0, 1, 12)) |
|
|
| all_positions = [] |
|
|
| |
| for i, cam_id in enumerate(sorted(calib.keys())): |
| data = calib[cam_id] |
| R = np.array(data["R"], dtype=np.float64).reshape(3, 3) |
| T = np.array(data["T"], dtype=np.float64).reshape(3) |
| K = np.array(data["K"], dtype=np.float64).reshape(3, 3) |
| w, h = data["imgSize"] |
|
|
| center, corners = camera_frustum_world(R, T, K, w, h, scale=0.12) |
| all_positions.append(center) |
| draw_frustum(ax, center, corners, color=cmap[i], alpha=0.12, label=cam_id) |
|
|
| all_positions = np.array(all_positions) |
|
|
| |
| ego_cam_dir = row.get("ego_camera_params_dir", "") |
| if ego_cam_dir and pd.notna(ego_cam_dir): |
| ego_ext_path = root / ego_cam_dir / "egocentric_frame_extrinsic.npy" |
| if ego_ext_path.exists(): |
| ego_exts = np.load(str(ego_ext_path)) |
| ego_centers = [] |
| for ext in ego_exts: |
| R_ego = ext[:3, :3] |
| T_ego = ext[:3, 3] |
| ego_centers.append(camera_center_world(R_ego, T_ego)) |
| ego_centers = np.array(ego_centers) |
| ax.plot3D(ego_centers[:, 0], ego_centers[:, 1], ego_centers[:, 2], |
| color="magenta", linewidth=1.5, alpha=0.7, label="ego trajectory") |
| ax.scatter(*ego_centers[frame_idx], color="magenta", s=60, marker="^", |
| zorder=5, label=f"ego frame {frame_idx}") |
|
|
| |
| obj_dir = row.get("object_poses_dir", "") |
| obj_colors = {"tool": "dodgerblue", "target": "coral"} |
| models_dir = root / "object_models_released" |
|
|
| if obj_dir and pd.notna(obj_dir): |
| obj_path = root / obj_dir |
| for npy_file in sorted(obj_path.glob("*.npy")): |
| poses = np.load(str(npy_file)) |
| if frame_idx >= len(poses): |
| continue |
| pose = poses[frame_idx].astype(np.float64) |
|
|
| |
| stem = npy_file.stem |
| parts = stem.split("_") |
| role = parts[0] |
| obj_id = parts[1] |
| model_path = models_dir / f"{obj_id}_cm.obj" |
|
|
| if model_path.exists(): |
| mesh = trimesh.load(str(model_path), process=False) |
| verts = np.array(mesh.vertices, dtype=np.float64) |
| |
| verts = verts / 100.0 |
| |
| verts_world = (pose[:3, :3] @ verts.T).T + pose[:3, 3] |
| faces_arr = np.array(mesh.faces) |
| color = obj_colors.get(role, "gray") |
| draw_mesh(ax, verts_world, faces_arr, color=color, alpha=0.25) |
| centroid = verts_world.mean(axis=0) |
| ax.text(*centroid, f"{role}_{obj_id}", fontsize=7, color=color) |
|
|
| |
| axis_len = 0.3 |
| ax.quiver(0, 0, 0, axis_len, 0, 0, color="red", arrow_length_ratio=0.1, linewidth=2) |
| ax.quiver(0, 0, 0, 0, axis_len, 0, color="green", arrow_length_ratio=0.1, linewidth=2) |
| ax.quiver(0, 0, 0, 0, 0, axis_len, color="blue", arrow_length_ratio=0.1, linewidth=2) |
| ax.text(axis_len * 1.1, 0, 0, "X", color="red", fontsize=9) |
| ax.text(0, axis_len * 1.1, 0, "Y", color="green", fontsize=9) |
| ax.text(0, 0, axis_len * 1.1, "Z", color="blue", fontsize=9) |
|
|
| |
| ax.set_xlabel("X (m)") |
| ax.set_ylabel("Y (m)") |
| ax.set_zlabel("Z (m)") |
| ax.set_title(title, fontsize=12, pad=20) |
|
|
| |
| all_pts = all_positions.copy() |
| if len(all_pts) > 0: |
| mid = all_pts.mean(axis=0) |
| max_range = (all_pts.max(axis=0) - all_pts.min(axis=0)).max() / 2 * 1.3 |
| ax.set_xlim(mid[0] - max_range, mid[0] + max_range) |
| ax.set_ylim(mid[1] - max_range, mid[1] + max_range) |
| ax.set_zlim(mid[2] - max_range, mid[2] + max_range) |
|
|
| ax.legend(fontsize=8, loc="upper right") |
| return fig |
|
|
|
|
| |
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="3D visualization of TACO cameras + objects") |
| parser.add_argument("--csv", type=Path, |
| default=Path(__file__).resolve().parent.parent / "taco_info.csv") |
| parser.add_argument("--root", type=Path, |
| default=Path(__file__).resolve().parent.parent) |
| parser.add_argument("--save-dir", type=Path, |
| default=Path(__file__).resolve().parent / "plots" / "scene_3d") |
| parser.add_argument("--n-sequences", type=int, default=5) |
| parser.add_argument("--seed", type=int, default=42) |
| args = parser.parse_args() |
|
|
| dataset = TACODataset( |
| csv_path=args.csv, |
| root_dir=args.root, |
| view_sampler=RandomViewSampler(), |
| n_context_views=1, |
| n_target_views=0, |
| n_past=1, |
| n_future=0, |
| seed=0, |
| ) |
| df = dataset.meta |
| complete_mask = df["all_modalities_complete"] |
| complete_indices = df.index[complete_mask].tolist() |
| args.save_dir.mkdir(parents=True, exist_ok=True) |
|
|
| rng = np.random.RandomState(args.seed) |
| actions = sorted(df.loc[complete_indices, "action"].unique()) |
| rng.shuffle(actions) |
|
|
| sampled = [] |
| for action in actions: |
| subset = [i for i in complete_indices if df.iloc[i]["action"] == action] |
| if subset: |
| idx = rng.choice(subset) |
| sampled.append(idx) |
| if len(sampled) >= args.n_sequences: |
| break |
|
|
| for i, seq_idx in enumerate(sampled): |
| row = df.iloc[seq_idx] |
| seq_id = row["sequence_id"] |
| print(f"[{i+1}/{len(sampled)}] {seq_id}") |
|
|
| calib = dataset.load_calibration(seq_idx) |
|
|
| |
| for elev, azim, suffix in [(25, -60, "view1"), (45, 30, "view2")]: |
| fig = plot_scene( |
| calib=calib, |
| root=dataset.root, |
| row=row, |
| frame_idx=0, |
| title=f"{seq_id} (frame 0)", |
| ) |
| fig.axes[0].view_init(elev=elev, azim=azim) |
| safe_name = (seq_id.replace("/", "_").replace(" ", "") |
| .replace(",", "").replace("(", "").replace(")", "")) |
| fname = f"{safe_name}_{suffix}.png" |
| fig.savefig(args.save_dir / fname, dpi=150, bbox_inches="tight") |
| plt.close(fig) |
| print(f" saved {fname}") |
|
|
| print(f"\nDone — {len(sampled) * 2} images saved to {args.save_dir}/") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|