| import json |
| import os |
| import math |
| import numpy as np |
| import torch |
| from pytorch3d.transforms import so3_exp_map |
|
|
| def get_camera_center(R, T): |
| """计算相机中心: C = -R^T * T""" |
| return -R.T @ T |
|
|
| def get_view_direction(R): |
| """ |
| 获取相机光轴方向。 |
| PyTorch3D 约定: x_cam = x_world @ R + T |
| 这意味着 R 的第 3 行 (index 2) 是相机 Z 轴在世界系下的方向。 |
| """ |
| return R[2, :] |
|
|
| def get_camera_up(R): |
| """ |
| 获取相机的 Up 向量。 |
| 在 PyTorch3D/CO3D 约定中: |
| R 的第 1 行 (index 1) 是相机坐标系的 Y 轴在世界系的方向。 |
| 通常相机坐标系 Y 轴向下 (Screen Down),所以 Up 向量是 -R[1, :]。 |
| """ |
| return -R[1, :] |
|
|
|
|
| def align_scene_to_standard_up(seq_data, to_vec=(0.0, 1.0, 0.0), |
| from_vec=(-0.0396, -0.8306, -0.5554)): |
| """ |
| 将CO3D场景旋转到标准坐标系,使得地面法向量指向Y轴正方向。 |
| |
| 基于CO3D GitHub Issue #64的官方方法。 |
| |
| Args: |
| seq_data: 序列数据 {frame_id: {'R': ..., 'T': ..., 'path': ...}} |
| to_vec: 目标up向量(默认Y轴正方向) |
| from_vec: CO3D场景中的地面法向量(来自官方issue) |
| |
| Returns: |
| aligned_seq_data: 对齐后的序列数据 |
| R_align: 对齐旋转矩阵 |
| mean_center: 场景中心 |
| """ |
| |
| centers = [] |
| for fid, info in seq_data.items(): |
| C = get_camera_center(info['R'], info['T']) |
| centers.append(C) |
| |
| mean_center = np.mean(centers, axis=0) |
| |
| |
| to_vec_tensor = torch.FloatTensor(to_vec) |
| from_vec_tensor = torch.FloatTensor(from_vec) |
| |
| |
| rot_axis_angle = torch.cross(to_vec_tensor, from_vec_tensor) |
| R_align = so3_exp_map(rot_axis_angle[None])[0].numpy() |
| |
| |
| aligned_seq_data = {} |
| for fid, info in seq_data.items(): |
| R_orig = info['R'] |
| T_orig = info['T'] |
| |
| |
| |
| R_new = R_align.T @ R_orig |
| |
| |
| |
| T_new = R_align.T @ T_orig |
| |
| aligned_seq_data[fid] = { |
| 'R': R_new, |
| 'T': T_new, |
| 'path': info['path'] |
| } |
| |
| return aligned_seq_data, R_align, mean_center |
|
|
|
|
| def get_sequence_geometry(seq_data, align_to_standard=True): |
| """ |
| 获取序列的几何信息。 |
| |
| Args: |
| seq_data: 序列数据 |
| align_to_standard: 是否对齐到标准坐标系(Y轴向上) |
| |
| Returns: |
| mean_center: 场景中心 |
| basis: 基底矩阵 |
| aligned_seq_data: 对齐后的序列数据(如果align_to_standard=True) |
| """ |
| if align_to_standard: |
| |
| aligned_seq_data, R_align, mean_center = align_scene_to_standard_up(seq_data) |
| |
| |
| |
| world_up = np.array([0.0, 1.0, 0.0]) |
| u = np.array([1.0, 0.0, 0.0]) |
| v = np.array([0.0, 0.0, 1.0]) |
| basis = np.stack([u, v, world_up], axis=0) |
| |
| return mean_center, basis, aligned_seq_data |
| else: |
| |
| centers = [] |
| for fid, info in seq_data.items(): |
| C = get_camera_center(info['R'], info['T']) |
| centers.append(C) |
| |
| mean_center = np.mean(centers, axis=0) |
| |
| |
| co3d_ground_normal = np.array([-0.0396, -0.8306, -0.5554]) |
| co3d_ground_normal = co3d_ground_normal / np.linalg.norm(co3d_ground_normal) |
| |
| |
| |
| if abs(co3d_ground_normal[0]) < 0.9: |
| arbitrary = np.array([1, 0, 0]) |
| else: |
| arbitrary = np.array([0, 1, 0]) |
| |
| |
| u = arbitrary - np.dot(arbitrary, co3d_ground_normal) * co3d_ground_normal |
| u = u / np.linalg.norm(u) |
| |
| v = np.cross(co3d_ground_normal, u) |
| v = v / np.linalg.norm(v) |
| |
| basis = np.stack([u, v, co3d_ground_normal], axis=0) |
| |
| return mean_center, basis, seq_data |
|
|
|
|
| def get_relative_yaw(R_ref, T_ref, R_curr, T_curr, mean_center, basis): |
| """ |
| 计算两帧之间在地面平面上的相对角度(azimuth)。 |
| |
| Args: |
| R_ref, T_ref: 参考帧的旋转和平移 |
| R_curr, T_curr: 当前帧的旋转和平移 |
| mean_center: 场景中心 |
| basis: 基底矩阵 [u, v, normal],其中normal是地面法向量 |
| |
| Returns: |
| angle_deg: 角度(度),正值表示逆时针,负值表示顺时针 |
| """ |
| C_ref = get_camera_center(R_ref, T_ref) |
| C_curr = get_camera_center(R_curr, T_curr) |
| |
| |
| p_ref_local = (C_ref - mean_center) @ basis.T |
| p_curr_local = (C_curr - mean_center) @ basis.T |
| |
| |
| u_ref, v_ref = p_ref_local[0], p_ref_local[1] |
| u_curr, v_curr = p_curr_local[0], p_curr_local[1] |
| |
| |
| angle_ref = np.arctan2(v_ref, u_ref) |
| angle_curr = np.arctan2(v_curr, u_curr) |
| |
| |
| diff_rad = angle_curr - angle_ref |
| |
| |
| diff_rad = (diff_rad + np.pi) % (2 * np.pi) - np.pi |
| |
| angle_deg = np.degrees(diff_rad) |
| |
| return angle_deg |
|
|
|
|
| def format_angle_direction(angle): |
| """将角度数值转换为 (绝对值, 方向字符串)""" |
| angle = (angle + 180) % 360 - 180 |
| direction = "anticlockwise" if angle > 0 else "clockwise" |
| return int(round(abs(angle))), direction |
|
|
| def get_angle_diff(angle_a, angle_b): |
| """计算两个角度在圆周上的最小差值""" |
| diff = abs(angle_a - angle_b) |
| return min(diff, 360 - diff) |
|
|
| |
|
|
| def format_image_path(raw_path, root_path, image_prefix="data/"): |
| """ |
| 统一处理图片路径: |
| 1. 去除绝对路径中的 root_path |
| 2. 加上统一的 image_prefix |
| """ |
| if root_path.endswith('/'): |
| root_path = root_path[:-1] |
| |
| if raw_path.startswith(root_path): |
| rel_path = raw_path[len(root_path):] |
| if rel_path.startswith('/'): |
| rel_path = rel_path[1:] |
| else: |
| rel_path = raw_path |
|
|
| if not image_prefix: |
| return rel_path |
| |
| if not image_prefix.endswith('/'): |
| image_prefix += '/' |
| |
| if rel_path.startswith(image_prefix): |
| return rel_path |
| |
| return os.path.join(image_prefix, rel_path) |
|
|
| |
|
|
| class CO3DDataLoader: |
| """ |
| 负责加载指定 Category 的 annotation,并提供方便的接口获取帧数据。 |
| """ |
| def __init__(self, root_path, category): |
| self.root_path = root_path |
| self.category = category |
| self.seq_data = {} |
| |
| self._load_data() |
|
|
| def _load_data(self): |
| cat_dir = os.path.join(self.root_path, 'data', 'original', self.category) |
| json_path = os.path.join(cat_dir, 'frame_annotations.json') |
| |
| if not os.path.exists(json_path): |
| print(f"Warning: Annotation file {json_path} not found.") |
| return |
| |
| if not os.path.exists(cat_dir): |
| print(f"Warning: Category directory {cat_dir} not found.") |
| return |
| |
| valid_sequences = set() |
| for d in os.listdir(cat_dir): |
| d_path = os.path.join(cat_dir, d) |
| if os.path.isdir(d_path): |
| valid_sequences.add(d) |
| |
| with open(json_path, 'r') as f: |
| annotations = json.load(f) |
|
|
| for item in annotations: |
| seq = item['sequence_name'] |
| |
| if seq not in valid_sequences: |
| continue |
| |
| fid = item['frame_number'] |
| if seq not in self.seq_data: |
| self.seq_data[seq] = {} |
| |
| self.seq_data[seq][fid] = { |
| 'R': np.array(item['viewpoint']['R']), |
| 'T': np.array(item['viewpoint']['T']), |
| 'path': item['image']['path'] |
| } |
|
|
| def get_sequences(self): |
| """返回排好序的序列列表""" |
| seqs = list(self.seq_data.keys()) |
| seqs.sort() |
| return seqs |
|
|
| def get_frames(self, sequence_name): |
| """返回该序列下所有帧的索引列表""" |
| if sequence_name not in self.seq_data: |
| return [] |
| return list(self.seq_data[sequence_name].keys()) |
|
|
| def get_frame_info(self, sequence_name, frame_idx): |
| """获取特定帧的 R, T 和 Path""" |
| return self.seq_data[sequence_name].get(frame_idx) |
|
|
| |
|
|
| def save_jsonl_splits(all_data, output_dir, ratios=(0.8, 0.1, 0.1), max_items=10000, seed=42): |
| """ |
| 自动打乱、切分 Train/Val/Test 并保存为分片的 JSONL |
| """ |
| import random |
| random.seed(seed) |
| random.shuffle(all_data) |
| |
| n_total = len(all_data) |
| r_train, r_val, r_test = ratios |
| |
| total_r = sum(ratios) |
| r_train, r_val, r_test = r_train/total_r, r_val/total_r, r_test/total_r |
| |
| n_train = int(n_total * r_train) |
| n_val = int(n_total * r_val) |
| |
| splits = { |
| "train": all_data[:n_train], |
| "val": all_data[n_train : n_train + n_val], |
| "test": all_data[n_train + n_val :] |
| } |
| |
| print(f"Split result: Train({len(splits['train'])}), Val({len(splits['val'])}), Test({len(splits['test'])})") |
| |
| for split_name, data_list in splits.items(): |
| if not data_list: continue |
| |
| save_path = os.path.join(output_dir, split_name) |
| os.makedirs(save_path, exist_ok=True) |
| |
| num_files = math.ceil(len(data_list) / max_items) |
| for i in range(num_files): |
| chunk = data_list[i*max_items : (i+1)*max_items] |
| fname = f"{split_name}_{i+1}.jsonl" |
| with open(os.path.join(save_path, fname), 'w') as f: |
| for item in chunk: |
| f.write(json.dumps(item) + '\n') |
|
|
|
|
| def decompose_angle(target_angle, allowed_steps=[15, 30, 45]): |
| """ |
| 使用贪心算法将目标角度拆解为允许的步长组合。 |
| 优先使用大步长,总和最接近 target_angle。 |
| """ |
| allowed_steps = sorted(allowed_steps, reverse=True) |
| steps_taken = [] |
| current_sum = 0 |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| remaining = target_angle |
| |
| while remaining > 0: |
| best_step = None |
| min_diff = float('inf') |
| |
| |
| for step in allowed_steps: |
| |
| |
| if step <= remaining + 5: |
| best_step = step |
| break |
| |
| if best_step: |
| steps_taken.append(best_step) |
| remaining -= best_step |
| current_sum += best_step |
| else: |
| |
| break |
| |
| |
| if not steps_taken: |
| steps_taken.append(allowed_steps[-1]) |
| |
| return steps_taken |
|
|
|
|
| def estimate_ground_normal_pca(R_list, T_list): |
| """ |
| 使用 PCA/SVD 拟合相机轨迹所在的平面,并利用相机 Up 向量消除方向歧义。 |
| |
| Args: |
| R_list: 旋转矩阵列表 |
| T_list: 平移向量列表 |
| |
| Returns: |
| normal: 矫正后的地面法向量 (指向"天空") |
| mean_center: 轨迹的几何中心 |
| """ |
| |
| centers = [] |
| up_vectors = [] |
| |
| for R, T in zip(R_list, T_list): |
| |
| C = -np.dot(R.T, T) |
| centers.append(C) |
| |
| |
| |
| |
| up_vec = -R[1, :] |
| up_vectors.append(up_vec) |
| |
| centers = np.array(centers) |
| up_vectors = np.array(up_vectors) |
| |
| |
| mean_center = np.mean(centers, axis=0) |
| centered_points = centers - mean_center |
| |
| |
| |
| try: |
| u, s, vh = np.linalg.svd(centered_points) |
| normal = vh[2, :] |
| except np.linalg.LinAlgError: |
| |
| return np.array([0.0, 1.0, 0.0]), mean_center |
|
|
| |
| |
| |
| avg_up = np.mean(up_vectors, axis=0) |
| |
| if np.dot(normal, avg_up) > 0: |
| normal = -normal |
| |
| return normal, mean_center |
|
|
| def get_sequence_geometry_pca(seq_data): |
| """ |
| 基于 PCA 拟合的动态对齐方法。 |
| 替代原有的 get_sequence_geometry。 |
| """ |
| |
| R_list = [info['R'] for info in seq_data.values()] |
| T_list = [info['T'] for info in seq_data.values()] |
| |
| |
| pca_normal, mean_center = estimate_ground_normal_pca(R_list, T_list) |
| |
| |
| |
| |
| |
| |
| n = pca_normal / np.linalg.norm(pca_normal) |
| |
| |
| if abs(n[0]) < 0.9: |
| arbitrary = np.array([1, 0, 0]) |
| else: |
| arbitrary = np.array([0, 1, 0]) |
| |
| |
| u = arbitrary - np.dot(arbitrary, n) * n |
| u = u / np.linalg.norm(u) |
| |
| v = np.cross(n, u) |
| v = v / np.linalg.norm(v) |
| |
| |
| basis = np.stack([u, v, n], axis=0) |
| |
| return mean_center, basis, n |
|
|