| """ |
| 物体保持静止。摄像机围绕物体进行水平轨道运动。这里的“旋转”是指摄像机在水平面上绕物体中心移动,其方向定义参考鸟瞰视角下的时钟方向(即从上往下看,顺时针或逆时针移动) |
| |
| 给定一张物体的初始视角图片,以及一系列旋转指令,模型需要计算这些指令叠加后的最终位置,并从A、B、C、D四个选项中选出该视角对应的图片。 |
| |
| |
| 1. |
| The object in the image <image_start>[image_1]<image_end> remains **static**. |
| |
| Imagine a camera rotating around this object. The direction of rotation is defined from a **top-down bird's-eye view**. |
| |
| Please identify the view of the object after the camera follows this sequence of rotations: {instruction_sequence}. Based on this top-down perspective, select the correct answer. |
| |
| A. <image_start>[image_A]<image_end> |
| B. <image_start>[image_B]<image_end> |
| C. <image_start>[image_C]<image_end> |
| D. <image_start>[image_D]<image_end> |
| |
| |
| 2. |
| Given the initial view of a static object: <image_start>[image_1]<image_end>. |
| |
| Imagine looking at the setup from a bird's-eye view (from directly above) to determine the direction. Now, move the camera according to the following instructions: {instruction_sequence}. |
| |
| Which of the following images shows what the object looks like from this new position? |
| |
| A. <image_start>[image_A]<image_end> |
| B. <image_start>[image_B]<image_end> |
| C. <image_start>[image_C]<image_end> |
| D. <image_start>[image_D]<image_end> |
| |
| """ |
|
|
| import argparse |
| import random |
| import json |
| import os |
| from tqdm import tqdm |
| import numpy as np |
|
|
| |
| from utils import ( |
| CO3DDataLoader, |
| get_relative_yaw, |
| format_angle_direction, |
| get_angle_diff, |
| format_image_path, |
| save_jsonl_splits, |
| get_sequence_geometry |
| ) |
|
|
| class Task2Generator: |
| def __init__(self, loader, image_prefix): |
| self.loader = loader |
| self.image_prefix = image_prefix |
| |
| self.cat_name = self.loader.category.replace('_', ' ') |
|
|
| def verify(self, start_R, start_T, target_R, target_T, distractor_infos, |
| min_angle, max_angle, min_interval, mean_center, basis): |
| """ |
| 验证逻辑与 Task 1 相同: |
| 确保 Start, Target, Distractors 视觉上是可区分的。 |
| """ |
| |
| target_yaw = get_relative_yaw(start_R, start_T, target_R, target_T, mean_center, basis) |
| |
| if not (min_angle <= abs(target_yaw) <= max_angle): |
| return False, None, [] |
|
|
| |
| distractor_yaws = [] |
| for d_info in distractor_infos: |
| d_yaw = get_relative_yaw(start_R, start_T, d_info['R'], d_info['T'], mean_center, basis) |
| distractor_yaws.append(d_yaw) |
|
|
| |
| all_angles = [0.0, target_yaw] + distractor_yaws |
| |
| for i in range(len(all_angles)): |
| for j in range(i + 1, len(all_angles)): |
| if get_angle_diff(all_angles[i], all_angles[j]) < min_interval: |
| return False, None, [] |
| |
| return True, target_yaw, distractor_yaws |
|
|
| def _find_closest_frame(self, target_angle, available_frames, tolerance=1.0): |
| """ |
| 在 available_frames 中寻找与 target_angle 差距在 tolerance 以内的帧 |
| """ |
| best_diff = float('inf') |
| best_path = None |
| |
| for item in available_frames: |
| diff = get_angle_diff(target_angle, item['angle']) |
| if diff < best_diff: |
| best_diff = diff |
| best_path = item['path'] |
| |
| if best_diff <= tolerance: |
| return best_path |
| return None |
|
|
| def _generate_instruction_sequence(self, total_yaw, available_frames): |
| """ |
| 将总角度 total_yaw 拆解为多步的旋转指令序列。 |
| 同时确保中间步骤对应的角度存在对应的图片 (reasoning images)。 |
| """ |
| target_deg = int(round(total_yaw)) |
| |
| num_steps = random.choices([3, 4], weights=[0.5, 0.5])[0] |
| |
| max_attempts = 200 |
| for _ in range(max_attempts): |
| steps = [] |
| current_sum = 0 |
| intermediate_paths = [] |
| valid_sequence = True |
| |
| |
| for _ in range(num_steps - 1): |
| step = 0 |
| |
| while abs(step) < 30: |
| step = random.randint(-45, 45) |
| |
| |
| temp_sum = current_sum + step |
| found_path = self._find_closest_frame(temp_sum, available_frames, tolerance=1.0) |
| |
| if found_path: |
| steps.append(step) |
| current_sum = temp_sum |
| intermediate_paths.append(found_path) |
| else: |
| |
| valid_sequence = False |
| break |
| |
| if not valid_sequence: |
| continue |
|
|
| |
| final_step = target_deg - current_sum |
| |
| |
| if 30 <= abs(final_step) <= 45: |
| steps.append(final_step) |
| |
| |
| |
| instruction_parts = [] |
| for i, step in enumerate(steps): |
| val, direction = format_angle_direction(step) |
| action = f"rotate {val} degrees {direction}" |
| if i == 0: |
| instruction_parts.append(action) |
| else: |
| instruction_parts.append(f"then {action}") |
| |
| instruction_str = ", ".join(instruction_parts) |
| return instruction_str, steps, intermediate_paths |
| |
| |
| return None, None, None |
|
|
| def generate_sample(self, seq_name, config): |
| frames = self.loader.get_frames(seq_name) |
| if len(frames) < 5: |
| return None |
|
|
| |
| seq_data_dict = self.loader.seq_data[seq_name] |
| mean_center, basis, aligned_seq_data = get_sequence_geometry(seq_data_dict) |
|
|
| max_attempts = 5000 |
| for _ in range(max_attempts): |
| |
| start_idx = random.choice(frames) |
| start_info = aligned_seq_data[start_idx] |
| |
| possible_targets = [f for f in frames if f != start_idx] |
| target_idx = random.choice(possible_targets) |
| target_info = aligned_seq_data[target_idx] |
| |
| remaining = [f for f in frames if f != start_idx and f != target_idx] |
| if len(remaining) < 3: continue |
| distractor_indices = random.sample(remaining, 3) |
| distractor_infos = [aligned_seq_data[d] for d in distractor_indices] |
|
|
| |
| is_valid, target_yaw, distractor_yaws = self.verify( |
| start_info['R'], start_info['T'], |
| target_info['R'], target_info['T'], |
| distractor_infos, |
| config['min_angle'], |
| config['max_angle'], |
| config['min_interval'], |
| mean_center, basis |
| ) |
|
|
| if is_valid: |
| |
| all_frame_angles = [] |
| for f_idx in frames: |
| if f_idx == start_idx: continue |
| f_info = self.loader.get_frame_info(seq_name, f_idx) |
| |
| f_yaw = get_relative_yaw(start_info['R'], start_info['T'], f_info['R'], f_info['T'], mean_center, basis) |
| all_frame_angles.append({ |
| 'angle': f_yaw, |
| 'path': f_info['path'] |
| }) |
|
|
| |
| instruction_seq, steps_breakdown, reasoning_paths = self._generate_instruction_sequence(target_yaw, all_frame_angles) |
| |
| |
| if instruction_seq is None: |
| continue |
|
|
| return self.create_entry( |
| seq_name, start_idx, target_idx, distractor_indices, |
| target_yaw, distractor_yaws, |
| start_info, target_info, distractor_infos, |
| instruction_seq, steps_breakdown, reasoning_paths, |
| basis |
| ) |
| return None |
|
|
| def create_entry(self, seq_name, start_idx, target_idx, distractor_indices, |
| target_yaw, distractor_yaws, start_info, target_info, distractor_infos, |
| instruction_seq, steps_breakdown, reasoning_paths, basis=None): |
| |
| |
| options = [{ |
| "path": format_image_path(target_info['path'], self.loader.root_path, self.image_prefix), |
| "angle": target_yaw, |
| "is_correct": True |
| }] |
| |
| for d_idx, d_yaw, d_info in zip(distractor_indices, distractor_yaws, distractor_infos): |
| options.append({ |
| "path": format_image_path(d_info['path'], self.loader.root_path, self.image_prefix), |
| "angle": d_yaw, |
| "is_correct": False |
| }) |
| |
| random.shuffle(options) |
| |
| |
| images_dict = { |
| "image_1": format_image_path(start_info['path'], self.loader.root_path, self.image_prefix) |
| } |
| |
| |
| for i, path in enumerate(reasoning_paths): |
| images_dict[f"reasoning_image_{i+1}"] = format_image_path(path, self.loader.root_path, self.image_prefix) |
|
|
| option_labels = ['A', 'B', 'C', 'D'] |
| option_angles_meta = {} |
| correct_label = "" |
| |
| for label, opt in zip(option_labels, options): |
| images_dict[f"image_{label}"] = opt["path"] |
| option_angles_meta[label] = opt["angle"] |
| if opt["is_correct"]: |
| correct_label = label |
|
|
| |
| |
| |
| |
| cot_lines = [] |
| cot_lines.append("From the initial view, I need to follow the rotation instructions step by step.") |
| |
| |
| |
| |
| |
| for i, step in enumerate(steps_breakdown): |
| |
| |
| val, direction = format_angle_direction(step) |
| |
| |
| if i == 0: |
| prefix = "First," |
| elif i == len(steps_breakdown) - 1: |
| prefix = "Finally," |
| else: |
| prefix = "Then," |
| |
| |
| if i < len(reasoning_paths): |
| |
| reasoning_key = f"reasoning_image_{i+1}" |
| step_text = (f"{prefix} I rotate {val} degrees {direction}. " |
| f"The view should look like <image_start>[{reasoning_key}]<image_end>.") |
| else: |
| |
| step_text = (f"{prefix} I rotate {val} degrees {direction} to reach the final position.") |
| |
| cot_lines.append(step_text) |
|
|
| |
| cot_lines.append(f"Comparing the view at this final position with the options, it matches option {correct_label}. So the answer is {correct_label}.") |
| |
| think_content = " ".join(cot_lines) |
| final_answer_field = f"<answer>{correct_label}</answer>" |
|
|
| |
| |
| |
| template_id = random.choice([1, 2]) |
| |
| if template_id == 1: |
| question = f"""The object in the image <image_start>[image_1]<image_end> remains **static**. |
| |
| Imagine a camera rotating around this object. The direction of rotation is defined from a **top-down bird's-eye view**. |
| |
| Please identify the view of the object after the camera follows this sequence of rotations: {instruction_seq}. Based on this top-down perspective, select the correct answer. |
| |
| A. <image_start>[image_A]<image_end> |
| B. <image_start>[image_B]<image_end> |
| C. <image_start>[image_C]<image_end> |
| D. <image_start>[image_D]<image_end>""" |
|
|
| else: |
| question = f"""Given the initial view of a **static** object: <image_start>[image_1]<image_end>. |
| |
| Imagine looking at the setup from a **bird's-eye view (from directly above)** to determine the direction. Now, move the camera according to the following instructions: {instruction_seq}. |
| |
| Which of the following images shows what the object looks like from this new position? |
| |
| A. <image_start>[image_A]<image_end> |
| B. <image_start>[image_B]<image_end> |
| C. <image_start>[image_C]<image_end> |
| D. <image_start>[image_D]<image_end>""" |
|
|
| return { |
| "id": f"task2_{seq_name}_{start_idx}_{target_idx}", |
| "task": "camera_view_sequence_prediction", |
| "sequence": seq_name, |
| "category": self.loader.category, |
| "question": question, |
| "images": images_dict, |
| "metadata": { |
| "start_frame": start_idx, |
| "target_frame": target_idx, |
| "total_yaw": target_yaw, |
| "instruction_sequence": instruction_seq, |
| "steps_degrees": steps_breakdown, |
| "option_angles": option_angles_meta, |
| "cot_trace": think_content |
| }, |
| "gt_answer": final_answer_field |
| } |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Generate Task 2: Sequence Camera View Prediction") |
| |
| |
| parser.add_argument("--root_path", type=str, required=True, help="CO3D dataset root") |
| parser.add_argument("--output_dir", type=str, default="output_task2", help="Output directory") |
| parser.add_argument("--image_prefix", type=str, default="data/", help="Prefix for image paths") |
| |
| parser.add_argument("--filter_path", type=str, default=None, help="Root directory for filter logs (containing category/keep.json)") |
| |
| |
| parser.add_argument("--category", type=str, default=None, help="Specific category or None for all") |
| parser.add_argument("--num_samples", type=int, default=1, help="Samples per sequence") |
| parser.add_argument("--seed", type=int, default=42) |
| |
| |
| parser.add_argument("--min_angle", type=float, default=40.0) |
| parser.add_argument("--max_angle", type=float, default=140.0) |
| parser.add_argument("--min_interval", type=float, default=25.0) |
| |
| |
| parser.add_argument("--train_ratio", type=float, default=0.8) |
| parser.add_argument("--val_ratio", type=float, default=0.1) |
| parser.add_argument("--test_ratio", type=float, default=0.1) |
| parser.add_argument("--max_items", type=int, default=10000) |
|
|
| args = parser.parse_args() |
| |
| |
| random.seed(args.seed) |
| np.random.seed(args.seed) |
| |
| |
| if args.category: |
| categories = [args.category] |
| else: |
| data_dir = os.path.join(args.root_path, 'data', 'original') |
| if os.path.exists(data_dir): |
| categories = sorted([d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]) |
| else: |
| print(f"Error: {data_dir} not found.") |
| return |
|
|
| all_results = [] |
| config = { |
| 'min_angle': args.min_angle, |
| 'max_angle': args.max_angle, |
| 'min_interval': args.min_interval |
| } |
|
|
| |
| for cat in categories: |
| loader = CO3DDataLoader(args.root_path, cat) |
| if not loader.seq_data: |
| continue |
| |
| generator = Task2Generator(loader, args.image_prefix) |
| sequences = loader.get_sequences() |
|
|
| |
| if args.filter_path: |
| keep_file = os.path.join(args.filter_path, cat, "keep.json") |
| if os.path.exists(keep_file): |
| try: |
| with open(keep_file, 'r') as f: |
| keep_list = set(json.load(f)) |
| |
| original_count = len(sequences) |
| sequences = [s for s in sequences if s in keep_list] |
| print(f"[{cat}] Filter applied: {original_count} -> {len(sequences)} sequences retained.") |
| except Exception as e: |
| print(f"[{cat}] Error reading keep.json: {e}. Skipping category.") |
| sequences = [] |
| else: |
| print(f"[{cat}] Warning: No keep.json found at {keep_file}. Skipping category.") |
| sequences = [] |
| |
| if not sequences: |
| continue |
| |
| for seq in tqdm(sequences, desc=f"Task2 - {cat}", leave=False): |
| for _ in range(args.num_samples): |
| sample = generator.generate_sample(seq, config) |
| if sample: |
| all_results.append(sample) |
|
|
| |
| print(f"Total generated: {len(all_results)}") |
| save_jsonl_splits( |
| all_results, |
| args.output_dir, |
| ratios=(args.train_ratio, args.val_ratio, args.test_ratio), |
| max_items=args.max_items, |
| seed=args.seed |
| ) |
| print(f"Done. Output saved to {args.output_dir}") |
|
|
| if __name__ == "__main__": |
| main() |
|
|