| """ |
| **任务名称:** 围绕静止物体的相机视角排序(Camera View Ordering around Static Objects) |
| |
| **任务设定:** |
| 本任务模拟一个静止物体被移动相机拍摄的场景。相机围绕物体进行平滑的轨道旋转(Orbital Rotation)。 |
| 1. **数据来源:** 所有图像均从一段连续的视频中抽帧获得,因此具备内在的时空连贯性。 |
| 2. **运动约束:** 从给定的初始参考帧(Reference Frame)到序列的最后一帧,相机的总旋转角度严格限制在半个圆周(<180度)以内,保证视角的单向性和连续性。 |
| |
| **输入数据:** |
| * 一张**初始参考图(Reference Image)**,作为序列的起点(t=0)。 |
| * 四张**候选图片(Candidate Images)**,编号为 1、2、3、4。这四张图片是参考图之后的后续帧,但顺序已被打乱。 |
| |
| **任务目标:** |
| 模型需要分析参考图与候选图片之间的几何关系,将四张候选图片按照真实的时间/空间顺序重新排列,使其构成一个连贯的相机运动轨迹。 |
| |
| **输出格式:** |
| 提供 A、B、C、D 四个选项,每个选项代表一种排序组合(例如 B: 2-1-4-3)。模型需选择能够恢复正确时空顺序的选项。 |
| """ |
|
|
| import argparse |
| import random |
| import itertools |
| import json |
| import os |
| from tqdm import tqdm |
| import numpy as np |
|
|
| |
| from utils import ( |
| CO3DDataLoader, |
| get_relative_yaw, |
| format_image_path, |
| save_jsonl_splits, |
| get_sequence_geometry_pca, |
| decompose_angle, |
| ) |
|
|
| class Task3Generator: |
| def __init__(self, loader, image_prefix): |
| self.loader = loader |
| self.image_prefix = image_prefix |
| self.cat_name = self.loader.category.replace('_', ' ') |
|
|
| def _get_all_relative_angles(self, start_idx, all_frames, seq_data_dict, mean_center, basis): |
| """ |
| 计算序列中所有帧相对于 start_idx 的角度。 |
| |
| Args: |
| start_idx: 起始帧索引 |
| all_frames: 所有帧索引列表 |
| seq_data_dict: 对齐后的序列数据 {frame_idx: {'R': ..., 'T': ..., 'path': ...}} |
| mean_center: 场景中心 |
| basis: 基底矩阵 |
| |
| Returns: |
| 列表: [{'idx': frame_idx, 'angle': relative_angle}, ...] |
| """ |
| start_info = seq_data_dict[start_idx] |
| results = [] |
| |
| for f_idx in all_frames: |
| if f_idx == start_idx: |
| results.append({'idx': f_idx, 'angle': 0.0}) |
| continue |
| |
| f_info = seq_data_dict[f_idx] |
| yaw = get_relative_yaw( |
| start_info['R'], start_info['T'], |
| f_info['R'], f_info['T'], |
| mean_center, basis |
| ) |
| results.append({'idx': f_idx, 'angle': yaw}) |
| |
| return results |
|
|
| def generate_sample(self, seq_name, config): |
| |
| frames = sorted(self.loader.get_frames(seq_name)) |
| if len(frames) < 10: |
| return None |
|
|
| |
| seq_data_dict = self.loader.seq_data[seq_name] |
| mean_center, basis, _ = get_sequence_geometry_pca(seq_data_dict) |
|
|
| min_int = config['min_interval'] |
| max_int = config['max_interval'] |
| max_span = config['max_angle'] |
|
|
| max_attempts = 5000 |
| |
| for _ in range(max_attempts): |
| |
| |
| start_idx = random.choice(frames[:-4]) |
| |
| |
| rel_data = self._get_all_relative_angles( |
| start_idx, frames, seq_data_dict, mean_center, basis |
| ) |
| |
| |
| future_candidates = [x for x in rel_data if x['idx'] > start_idx] |
| |
| |
| |
| |
| |
| chain = [{'idx': start_idx, 'angle': 0.0}] |
| |
| for step in range(4): |
| last_item = chain[-1] |
| last_angle = last_item['angle'] |
| |
| valid_next_frames = [] |
| |
| for cand in future_candidates: |
| |
| if cand['idx'] <= last_item['idx']: |
| continue |
| |
| |
| diff = abs(cand['angle'] - last_angle) |
| |
| |
| |
| |
| |
| if (min_int <= diff <= max_int) and \ |
| (abs(cand['angle']) <= max_span) and \ |
| (abs(cand['angle']) > abs(last_angle)): |
| |
| valid_next_frames.append(cand) |
| |
| if not valid_next_frames: |
| break |
| |
| |
| next_frame = random.choice(valid_next_frames) |
| chain.append(next_frame) |
| |
| |
| if len(chain) == 5: |
| |
| indices = [x['idx'] for x in chain] |
| angles = [x['angle'] for x in chain] |
| return self.create_entry(seq_name, indices, angles, rel_data) |
|
|
| return None |
|
|
| def create_entry(self, seq_name, frame_indices, angles, all_rel_data): |
| """ |
| Args: |
| frame_indices: [Ref, True_1, True_2, True_3, True_4] |
| angles: [0, Ang1, Ang2, Ang3, Ang4] (相对于 Ref) |
| all_rel_data: [{'idx':..., 'angle':...}, ...] 序列中所有帧的角度信息 |
| """ |
| |
| ROTATION_STEPS = [15, 10] |
| |
|
|
| ref_idx = frame_indices[0] |
| true_candidates_indices = frame_indices[1:] |
| |
| ref_info = self.loader.get_frame_info(seq_name, ref_idx) |
| candidates_infos = [self.loader.get_frame_info(seq_name, idx) for idx in true_candidates_indices] |
| |
| |
| labeled_candidates = [] |
| for i, info in enumerate(candidates_infos): |
| labeled_candidates.append({ |
| "true_order": i + 1, |
| "path": format_image_path(info['path'], self.loader.root_path, self.image_prefix), |
| "angle_rel_ref": angles[i+1] |
| }) |
| |
| |
| shuffled_candidates = labeled_candidates.copy() |
| random.shuffle(shuffled_candidates) |
| |
| |
| images_dict = { |
| "image_ref": format_image_path(ref_info['path'], self.loader.root_path, self.image_prefix) |
| } |
| |
| display_mapping = {} |
| candidates_meta = {} |
|
|
| for k, item in enumerate(shuffled_candidates): |
| display_label = k + 1 |
| images_dict[f"image_{display_label}"] = item['path'] |
| display_mapping[display_label] = item['true_order'] |
| |
| |
| candidates_meta[str(display_label)] = { |
| "angle_relative_to_ref": item['angle_rel_ref'], |
| "true_rank": item['true_order'], |
| "image_key": f"image_{display_label}" |
| } |
|
|
| |
| true_to_display = {v: k for k, v in display_mapping.items()} |
| correct_sequence = [true_to_display[i] for i in range(1, 5)] |
| correct_str = "-".join(map(str, correct_sequence)) |
| |
| all_permutations = list(itertools.permutations([1, 2, 3, 4])) |
| distractor_pool = [ |
| "-".join(map(str, p)) |
| for p in all_permutations |
| if list(p) != correct_sequence |
| ] |
| selected_distractors = random.sample(distractor_pool, 3) |
| |
| options_data = [{"seq": correct_str, "is_correct": True}] |
| for d in selected_distractors: |
| options_data.append({"seq": d, "is_correct": False}) |
| random.shuffle(options_data) |
| |
| option_labels = ['A', 'B', 'C', 'D'] |
| gt_option_label = "" |
| sequence_options_text = {} |
| for label, opt in zip(option_labels, options_data): |
| sequence_options_text[f"sequence_{label}"] = opt["seq"] |
| if opt["is_correct"]: |
| gt_option_label = label |
|
|
| |
| |
| |
| |
| is_clockwise = angles[-1] < 0 |
| direction_str = "clockwise" if is_clockwise else "counter-clockwise" |
| |
| chain = [] |
| images_dict_update = {} |
| |
| current_simulated_angle = 0.0 |
| prev_target_angle = 0.0 |
| reasoning_img_counter = 0 |
| |
| |
| |
| |
| for i in range(1, 5): |
| target_angle = angles[i] |
| true_delta = abs(target_angle - prev_target_angle) |
| |
| |
| steps = decompose_angle(true_delta, [30, 15]) |
| |
| matched_display_label = true_to_display[i] |
| |
| for step_idx, step_deg in enumerate(steps): |
| reasoning_img_counter += 1 |
| |
| |
| if is_clockwise: |
| current_simulated_angle -= step_deg |
| else: |
| current_simulated_angle += step_deg |
| |
| |
| closest_frame_data = min(all_rel_data, key=lambda x: abs(x['angle'] - current_simulated_angle)) |
| closest_frame_idx = closest_frame_data['idx'] |
| closest_frame_info = self.loader.get_frame_info(seq_name, closest_frame_idx) |
| |
| |
| |
| result_key = f"reasoning_image_{reasoning_img_counter}" |
| images_dict_update[result_key] = format_image_path( |
| closest_frame_info['path'], self.loader.root_path, self.image_prefix |
| ) |
| |
| |
| is_last_step_of_phase = (step_idx == len(steps) - 1) |
|
|
| |
| chain_item = { |
| "step_index": len(chain) + 1, |
| "action": { |
| "type": "rotate", |
| "degrees": step_deg, |
| "direction": direction_str, |
| "total_angle_so_far": current_simulated_angle |
| }, |
| "result_image_key": result_key, |
| "is_key_frame": is_last_step_of_phase, |
| "matched_display_label": matched_display_label if is_last_step_of_phase else None |
| } |
| chain.append(chain_item) |
| |
| prev_target_angle = target_angle |
| |
| images_dict.update(images_dict_update) |
|
|
| |
| |
| |
| template_id = random.choice([1, 2]) |
| obj_name = self.cat_name |
| if template_id == 1: |
| question = f"""The {obj_name} in the initial view <image_start>[image_ref]<image_end> remains **static**. Imagine a camera rotating around this {obj_name} along a continuous path. The direction of rotation is defined from a **top-down bird's-eye view**. The total rotation from the start to the end is less than 180 degrees. |
| |
| Below are four images captured during this rotation, labeled 1, 2, 3 and 4. They're currently shuffled. |
| |
| 1. <image_start>[image_1]<image_end> |
| 2. <image_start>[image_2]<image_end> |
| 3. <image_start>[image_3]<image_end> |
| 4. <image_start>[image_4]<image_end> |
| |
| Please analyze the change in perspective and determine the correct chronological order of these four images following the initial view. Select the correct sequence. |
| |
| A. {sequence_options_text['sequence_A']} |
| B. {sequence_options_text['sequence_B']} |
| C. {sequence_options_text['sequence_C']} |
| D. {sequence_options_text['sequence_D']}""" |
| else: |
| question = f"""Given the initial view of the **static** {obj_name}: <image_start>[image_ref]<image_end>. |
| |
| Imagine a camera rotating around this {obj_name} to capture a video sequence. The rotation covers an angle of less than 180 degrees. We have extracted four frames from the sequence, labeled 1 to 4, but their order is jumbled. |
| |
| 1. <image_start>[image_1]<image_end> |
| 2. <image_start>[image_2]<image_end> |
| 3. <image_start>[image_3]<image_end> |
| 4. <image_start>[image_4]<image_end> |
| |
| Which of the following four options correctly sorts these images into a coherent spatio-temporal sequence starting after the initial view? |
| |
| A. {sequence_options_text['sequence_A']} |
| B. {sequence_options_text['sequence_B']} |
| C. {sequence_options_text['sequence_C']} |
| D. {sequence_options_text['sequence_D']}""" |
|
|
| return { |
| "id": f"task3_{seq_name}_{ref_idx}", |
| "task": "camera_view_ordering", |
| "sequence": seq_name, |
| "category": self.loader.category, |
| "question": question, |
| "images": images_dict, |
| "oracle_meta": { |
| "direction": direction_str, |
| "correct_sequence_str": correct_str, |
| "correct_label": gt_option_label, |
| "chain": chain, |
| "candidates_meta": candidates_meta |
| }, |
| "gt_answer": f"<answer>{gt_option_label}</answer>" |
| } |
|
|
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Generate Task 3: Camera View Ordering") |
| |
| |
| parser.add_argument("--root_path", type=str, required=True, help="CO3D dataset root") |
| parser.add_argument("--output_dir", type=str, default="output_task3", help="Output directory") |
| parser.add_argument("--image_prefix", type=str, default="data/", help="Prefix for image paths") |
| |
| parser.add_argument("--filter_path", type=str, default=None, help="Root directory for filter logs (containing category/keep.json)") |
| |
| |
| parser.add_argument("--category", type=str, default=None, help="Specific category or None for all") |
| parser.add_argument("--num_samples", type=int, default=1, help="Samples per sequence") |
| parser.add_argument("--seed", type=int, default=42) |
| |
| |
| |
| parser.add_argument("--min_interval", type=float, default=15.0) |
| |
| parser.add_argument("--max_interval", type=float, default=45.0) |
| |
| parser.add_argument("--max_angle", type=float, default=175.0) |
| |
| |
| parser.add_argument("--train_ratio", type=float, default=0.8) |
| parser.add_argument("--val_ratio", type=float, default=0.1) |
| parser.add_argument("--test_ratio", type=float, default=0.1) |
| parser.add_argument("--max_items", type=int, default=10000) |
|
|
| args = parser.parse_args() |
| |
| |
| random.seed(args.seed) |
| np.random.seed(args.seed) |
| |
| if args.category: |
| categories = [args.category] |
| else: |
| data_dir = os.path.join(args.root_path, 'data', 'original') |
| if os.path.exists(data_dir): |
| categories = sorted([d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]) |
| else: |
| print(f"Error: {data_dir} not found.") |
| return |
|
|
| all_results = [] |
| config = { |
| 'min_interval': args.min_interval, |
| 'max_interval': args.max_interval, |
| 'max_angle': args.max_angle |
| } |
|
|
| for cat in categories: |
| loader = CO3DDataLoader(args.root_path, cat) |
| if not loader.seq_data: |
| continue |
| |
| generator = Task3Generator(loader, args.image_prefix) |
| sequences = loader.get_sequences() |
|
|
| |
| if args.filter_path: |
| keep_file = os.path.join(args.filter_path, cat, "keep.json") |
| if os.path.exists(keep_file): |
| try: |
| with open(keep_file, 'r') as f: |
| keep_list = set(json.load(f)) |
| |
| original_count = len(sequences) |
| sequences = [s for s in sequences if s in keep_list] |
| print(f"[{cat}] Filter applied: {original_count} -> {len(sequences)} sequences retained.") |
| except Exception as e: |
| print(f"[{cat}] Error reading keep.json: {e}. Skipping category.") |
| sequences = [] |
| else: |
| print(f"[{cat}] Warning: No keep.json found at {keep_file}. Skipping category.") |
| sequences = [] |
| |
| if not sequences: |
| continue |
| |
| for seq in tqdm(sequences, desc=f"Task3 - {cat}", leave=False): |
| for _ in range(args.num_samples): |
| sample = generator.generate_sample(seq, config) |
| if sample: |
| all_results.append(sample) |
|
|
| print(f"Total generated: {len(all_results)}") |
| save_jsonl_splits( |
| all_results, |
| args.output_dir, |
| ratios=(args.train_ratio, args.val_ratio, args.test_ratio), |
| max_items=args.max_items, |
| seed=args.seed |
| ) |
| print(f"Done. Output saved to {args.output_dir}") |
|
|
| if __name__ == "__main__": |
| main() |
|
|