interleaved-umm / src /action_state /gen_task2.py
Caesarrr's picture
feat: add cot trace generation code for task2
71c0823
"""
物体保持静止。摄像机围绕物体进行水平轨道运动。这里的“旋转”是指摄像机在水平面上绕物体中心移动,其方向定义参考鸟瞰视角下的时钟方向(即从上往下看,顺时针或逆时针移动)
给定一张物体的初始视角图片,以及一系列旋转指令,模型需要计算这些指令叠加后的最终位置,并从A、B、C、D四个选项中选出该视角对应的图片。
1.
The object in the image <image_start>[image_1]<image_end> remains **static**.
Imagine a camera rotating around this object. The direction of rotation is defined from a **top-down bird's-eye view**.
Please identify the view of the object after the camera follows this sequence of rotations: {instruction_sequence}. Based on this top-down perspective, select the correct answer.
A. <image_start>[image_A]<image_end>
B. <image_start>[image_B]<image_end>
C. <image_start>[image_C]<image_end>
D. <image_start>[image_D]<image_end>
2.
Given the initial view of a static object: <image_start>[image_1]<image_end>.
Imagine looking at the setup from a bird's-eye view (from directly above) to determine the direction. Now, move the camera according to the following instructions: {instruction_sequence}.
Which of the following images shows what the object looks like from this new position?
A. <image_start>[image_A]<image_end>
B. <image_start>[image_B]<image_end>
C. <image_start>[image_C]<image_end>
D. <image_start>[image_D]<image_end>
"""
import argparse
import random
import json
import os
from tqdm import tqdm
import numpy as np
# 导入公共工具库 (假设 utils.py 在同一目录下)
from utils import (
CO3DDataLoader,
get_relative_yaw,
format_angle_direction,
get_angle_diff,
format_image_path,
save_jsonl_splits,
get_sequence_geometry
)
class Task2Generator:
def __init__(self, loader, image_prefix):
self.loader = loader
self.image_prefix = image_prefix
# 处理 category 名称,去掉下划线
self.cat_name = self.loader.category.replace('_', ' ')
def verify(self, start_R, start_T, target_R, target_T, distractor_infos,
min_angle, max_angle, min_interval, mean_center, basis):
"""
验证逻辑与 Task 1 相同:
确保 Start, Target, Distractors 视觉上是可区分的。
"""
# 1. 计算 Target 角度并检查范围
target_yaw = get_relative_yaw(start_R, start_T, target_R, target_T, mean_center, basis)
if not (min_angle <= abs(target_yaw) <= max_angle):
return False, None, []
# 2. 计算所有干扰项角度
distractor_yaws = []
for d_info in distractor_infos:
d_yaw = get_relative_yaw(start_R, start_T, d_info['R'], d_info['T'], mean_center, basis)
distractor_yaws.append(d_yaw)
# 3. 全局互斥检查
all_angles = [0.0, target_yaw] + distractor_yaws
for i in range(len(all_angles)):
for j in range(i + 1, len(all_angles)):
if get_angle_diff(all_angles[i], all_angles[j]) < min_interval:
return False, None, []
return True, target_yaw, distractor_yaws
def _find_closest_frame(self, target_angle, available_frames, tolerance=1.0):
"""
在 available_frames 中寻找与 target_angle 差距在 tolerance 以内的帧
"""
best_diff = float('inf')
best_path = None
for item in available_frames:
diff = get_angle_diff(target_angle, item['angle'])
if diff < best_diff:
best_diff = diff
best_path = item['path']
if best_diff <= tolerance:
return best_path
return None
def _generate_instruction_sequence(self, total_yaw, available_frames):
"""
将总角度 total_yaw 拆解为多步的旋转指令序列。
同时确保中间步骤对应的角度存在对应的图片 (reasoning images)。
"""
target_deg = int(round(total_yaw))
# 保持原有的步数随机逻辑
num_steps = random.choices([3, 4], weights=[0.5, 0.5])[0]
max_attempts = 200 # 增加尝试次数,因为约束变多了
for _ in range(max_attempts):
steps = []
current_sum = 0
intermediate_paths = [] # 用于存储中间步骤对应的图片路径
valid_sequence = True
# 生成前 n-1 步
for _ in range(num_steps - 1):
step = 0
# 限制每步范围大小在 30-45 之间
while abs(step) < 30:
step = random.randint(-45, 45)
# 检查这一步叠加后,是否有对应的图片
temp_sum = current_sum + step
found_path = self._find_closest_frame(temp_sum, available_frames, tolerance=1.0)
if found_path:
steps.append(step)
current_sum = temp_sum
intermediate_paths.append(found_path)
else:
# 如果这一步导致找不到对应的中间图,则该序列无效,跳出重试
valid_sequence = False
break
if not valid_sequence:
continue
# 计算最后一步
final_step = target_deg - current_sum
# 验证最后一步是否合理 (不能太小,也不能太大)
if 30 <= abs(final_step) <= 45:
steps.append(final_step)
# 最后一步到达 Target,不需要在这里存 reasoning image,因为 Target 已经是答案选项了
# 生成文本描述
instruction_parts = []
for i, step in enumerate(steps):
val, direction = format_angle_direction(step)
action = f"rotate {val} degrees {direction}"
if i == 0:
instruction_parts.append(action)
else:
instruction_parts.append(f"then {action}")
instruction_str = ", ".join(instruction_parts)
return instruction_str, steps, intermediate_paths
# [修改] 如果尝试多次失败,返回 None,而不是回退到单步
return None, None, None
def generate_sample(self, seq_name, config):
frames = self.loader.get_frames(seq_name)
if len(frames) < 5:
return None
# 获取该序列的 PCA 几何信息
seq_data_dict = self.loader.seq_data[seq_name]
mean_center, basis, aligned_seq_data = get_sequence_geometry(seq_data_dict)
max_attempts = 5000
for _ in range(max_attempts):
# A. 随机采样 Start, Target, Distractors
start_idx = random.choice(frames)
start_info = aligned_seq_data[start_idx]
possible_targets = [f for f in frames if f != start_idx]
target_idx = random.choice(possible_targets)
target_info = aligned_seq_data[target_idx]
remaining = [f for f in frames if f != start_idx and f != target_idx]
if len(remaining) < 3: continue
distractor_indices = random.sample(remaining, 3)
distractor_infos = [aligned_seq_data[d] for d in distractor_indices]
# B. 验证几何约束
is_valid, target_yaw, distractor_yaws = self.verify(
start_info['R'], start_info['T'],
target_info['R'], target_info['T'],
distractor_infos,
config['min_angle'],
config['max_angle'],
config['min_interval'],
mean_center, basis
)
if is_valid:
# 预先计算所有帧相对于 start_frame 的角度,用于寻找 reasoning images
all_frame_angles = []
for f_idx in frames:
if f_idx == start_idx: continue
f_info = self.loader.get_frame_info(seq_name, f_idx)
# 计算相对于 start 的角度
f_yaw = get_relative_yaw(start_info['R'], start_info['T'], f_info['R'], f_info['T'], mean_center, basis)
all_frame_angles.append({
'angle': f_yaw,
'path': f_info['path']
})
# C. 生成指令序列 (传入可用帧列表以匹配中间图)
instruction_seq, steps_breakdown, reasoning_paths = self._generate_instruction_sequence(target_yaw, all_frame_angles)
# [修改] 检查返回值,如果是 None 则说明无法生成满足条件的序列,跳过本次循环
if instruction_seq is None:
continue
return self.create_entry(
seq_name, start_idx, target_idx, distractor_indices,
target_yaw, distractor_yaws,
start_info, target_info, distractor_infos,
instruction_seq, steps_breakdown, reasoning_paths,
basis
)
return None
def create_entry(self, seq_name, start_idx, target_idx, distractor_indices,
target_yaw, distractor_yaws, start_info, target_info, distractor_infos,
instruction_seq, steps_breakdown, reasoning_paths, basis=None):
# 1. 构建选项列表
options = [{
"path": format_image_path(target_info['path'], self.loader.root_path, self.image_prefix),
"angle": target_yaw,
"is_correct": True
}]
for d_idx, d_yaw, d_info in zip(distractor_indices, distractor_yaws, distractor_infos):
options.append({
"path": format_image_path(d_info['path'], self.loader.root_path, self.image_prefix),
"angle": d_yaw,
"is_correct": False
})
random.shuffle(options)
# 2. 映射到 A, B, C, D
images_dict = {
"image_1": format_image_path(start_info['path'], self.loader.root_path, self.image_prefix)
}
# 添加 reasoning images (中间步骤的图)
for i, path in enumerate(reasoning_paths):
images_dict[f"reasoning_image_{i+1}"] = format_image_path(path, self.loader.root_path, self.image_prefix)
option_labels = ['A', 'B', 'C', 'D']
option_angles_meta = {}
correct_label = ""
for label, opt in zip(option_labels, options):
images_dict[f"image_{label}"] = opt["path"]
option_angles_meta[label] = opt["angle"]
if opt["is_correct"]:
correct_label = label
# ------------------------------------------------------------------
# 3. 生成 Think Process (CoT)
# ------------------------------------------------------------------
cot_lines = []
cot_lines.append("From the initial view, I need to follow the rotation instructions step by step.")
# 遍历每一个指令步骤
# steps_breakdown: [step1, step2, step3]
# reasoning_paths: [img_after_step1, img_after_step2] (长度比 steps 少 1)
for i, step in enumerate(steps_breakdown):
# 使用 utils 中的 format_angle_direction 获取绝对值和方向字符串
# 确保这里的方向描述与 Question 中的 instruction_seq 保持一致
val, direction = format_angle_direction(step)
# 判断是第一步、中间步还是最后一步
if i == 0:
prefix = "First,"
elif i == len(steps_breakdown) - 1:
prefix = "Finally,"
else:
prefix = "Then,"
# 构建句子
if i < len(reasoning_paths):
# === 中间步骤:有对应的 reasoning image ===
reasoning_key = f"reasoning_image_{i+1}"
step_text = (f"{prefix} I rotate {val} degrees {direction}. "
f"The view should look like <image_start>[{reasoning_key}]<image_end>.")
else:
# === 最后一步:到达目标位置,没有 reasoning image (因为要看选项) ===
step_text = (f"{prefix} I rotate {val} degrees {direction} to reach the final position.")
cot_lines.append(step_text)
# 结论句
cot_lines.append(f"Comparing the view at this final position with the options, it matches option {correct_label}. So the answer is {correct_label}.")
think_content = " ".join(cot_lines)
final_answer_field = f"<answer>{correct_label}</answer>"
# ------------------------------------------------------------------
# 4. 生成 Prompt (Task 2 模板) - 保持不变
# ------------------------------------------------------------------
template_id = random.choice([1, 2])
if template_id == 1:
question = f"""The object in the image <image_start>[image_1]<image_end> remains **static**.
Imagine a camera rotating around this object. The direction of rotation is defined from a **top-down bird's-eye view**.
Please identify the view of the object after the camera follows this sequence of rotations: {instruction_seq}. Based on this top-down perspective, select the correct answer.
A. <image_start>[image_A]<image_end>
B. <image_start>[image_B]<image_end>
C. <image_start>[image_C]<image_end>
D. <image_start>[image_D]<image_end>"""
else:
question = f"""Given the initial view of a **static** object: <image_start>[image_1]<image_end>.
Imagine looking at the setup from a **bird's-eye view (from directly above)** to determine the direction. Now, move the camera according to the following instructions: {instruction_seq}.
Which of the following images shows what the object looks like from this new position?
A. <image_start>[image_A]<image_end>
B. <image_start>[image_B]<image_end>
C. <image_start>[image_C]<image_end>
D. <image_start>[image_D]<image_end>"""
return {
"id": f"task2_{seq_name}_{start_idx}_{target_idx}",
"task": "camera_view_sequence_prediction",
"sequence": seq_name,
"category": self.loader.category,
"question": question,
"images": images_dict,
"metadata": {
"start_frame": start_idx,
"target_frame": target_idx,
"total_yaw": target_yaw,
"instruction_sequence": instruction_seq,
"steps_degrees": steps_breakdown,
"option_angles": option_angles_meta,
"cot_trace": think_content # 保留纯文本 trace
},
"gt_answer": final_answer_field
}
def main():
parser = argparse.ArgumentParser(description="Generate Task 2: Sequence Camera View Prediction")
# 路径配置
parser.add_argument("--root_path", type=str, required=True, help="CO3D dataset root")
parser.add_argument("--output_dir", type=str, default="output_task2", help="Output directory")
parser.add_argument("--image_prefix", type=str, default="data/", help="Prefix for image paths")
# [新增] 筛选名单路径
parser.add_argument("--filter_path", type=str, default=None, help="Root directory for filter logs (containing category/keep.json)")
# 采样配置
parser.add_argument("--category", type=str, default=None, help="Specific category or None for all")
parser.add_argument("--num_samples", type=int, default=1, help="Samples per sequence")
parser.add_argument("--seed", type=int, default=42)
# 几何约束配置 (与 Task 1 保持一致,用于筛选 Start/Target)
parser.add_argument("--min_angle", type=float, default=40.0)
parser.add_argument("--max_angle", type=float, default=140.0)
parser.add_argument("--min_interval", type=float, default=25.0)
# 切分配置
parser.add_argument("--train_ratio", type=float, default=0.8)
parser.add_argument("--val_ratio", type=float, default=0.1)
parser.add_argument("--test_ratio", type=float, default=0.1)
parser.add_argument("--max_items", type=int, default=10000)
args = parser.parse_args()
# 初始化
random.seed(args.seed)
np.random.seed(args.seed)
# 确定类别列表
if args.category:
categories = [args.category]
else:
data_dir = os.path.join(args.root_path, 'data', 'original')
if os.path.exists(data_dir):
categories = sorted([d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))])
else:
print(f"Error: {data_dir} not found.")
return
all_results = []
config = {
'min_angle': args.min_angle,
'max_angle': args.max_angle,
'min_interval': args.min_interval
}
# 主循环
for cat in categories:
loader = CO3DDataLoader(args.root_path, cat)
if not loader.seq_data:
continue
generator = Task2Generator(loader, args.image_prefix)
sequences = loader.get_sequences()
# [新增] 过滤逻辑
if args.filter_path:
keep_file = os.path.join(args.filter_path, cat, "keep.json")
if os.path.exists(keep_file):
try:
with open(keep_file, 'r') as f:
keep_list = set(json.load(f)) # 使用 set 加速查找
original_count = len(sequences)
sequences = [s for s in sequences if s in keep_list]
print(f"[{cat}] Filter applied: {original_count} -> {len(sequences)} sequences retained.")
except Exception as e:
print(f"[{cat}] Error reading keep.json: {e}. Skipping category.")
sequences = []
else:
print(f"[{cat}] Warning: No keep.json found at {keep_file}. Skipping category.")
sequences = []
if not sequences:
continue
for seq in tqdm(sequences, desc=f"Task2 - {cat}", leave=False):
for _ in range(args.num_samples):
sample = generator.generate_sample(seq, config)
if sample:
all_results.append(sample)
# 保存与切分
print(f"Total generated: {len(all_results)}")
save_jsonl_splits(
all_results,
args.output_dir,
ratios=(args.train_ratio, args.val_ratio, args.test_ratio),
max_items=args.max_items,
seed=args.seed
)
print(f"Done. Output saved to {args.output_dir}")
if __name__ == "__main__":
main()