interleaved-umm / src /action_state /gen_task3.py
Caesarrr's picture
Add files using upload-large-folder tool
60fde3b verified
"""
**任务名称:** 围绕静止物体的相机视角排序(Camera View Ordering around Static Objects)
**任务设定:**
本任务模拟一个静止物体被移动相机拍摄的场景。相机围绕物体进行平滑的轨道旋转(Orbital Rotation)。
1. **数据来源:** 所有图像均从一段连续的视频中抽帧获得,因此具备内在的时空连贯性。
2. **运动约束:** 从给定的初始参考帧(Reference Frame)到序列的最后一帧,相机的总旋转角度严格限制在半个圆周(<180度)以内,保证视角的单向性和连续性。
**输入数据:**
* 一张**初始参考图(Reference Image)**,作为序列的起点(t=0)。
* 四张**候选图片(Candidate Images)**,编号为 1、2、3、4。这四张图片是参考图之后的后续帧,但顺序已被打乱。
**任务目标:**
模型需要分析参考图与候选图片之间的几何关系,将四张候选图片按照真实的时间/空间顺序重新排列,使其构成一个连贯的相机运动轨迹。
**输出格式:**
提供 A、B、C、D 四个选项,每个选项代表一种排序组合(例如 B: 2-1-4-3)。模型需选择能够恢复正确时空顺序的选项。
"""
import argparse
import random
import itertools
import json
import os
from tqdm import tqdm
import numpy as np
# 导入公共工具库
from utils import (
CO3DDataLoader,
get_relative_yaw,
format_image_path,
save_jsonl_splits,
get_sequence_geometry_pca,
decompose_angle,
)
class Task3Generator:
def __init__(self, loader, image_prefix):
self.loader = loader
self.image_prefix = image_prefix
self.cat_name = self.loader.category.replace('_', ' ')
def _get_all_relative_angles(self, start_idx, all_frames, seq_data_dict, mean_center, basis):
"""
计算序列中所有帧相对于 start_idx 的角度。
Args:
start_idx: 起始帧索引
all_frames: 所有帧索引列表
seq_data_dict: 对齐后的序列数据 {frame_idx: {'R': ..., 'T': ..., 'path': ...}}
mean_center: 场景中心
basis: 基底矩阵
Returns:
列表: [{'idx': frame_idx, 'angle': relative_angle}, ...]
"""
start_info = seq_data_dict[start_idx]
results = []
for f_idx in all_frames:
if f_idx == start_idx:
results.append({'idx': f_idx, 'angle': 0.0})
continue
f_info = seq_data_dict[f_idx]
yaw = get_relative_yaw(
start_info['R'], start_info['T'],
f_info['R'], f_info['T'],
mean_center, basis
)
results.append({'idx': f_idx, 'angle': yaw})
return results
def generate_sample(self, seq_name, config):
# 获取该序列所有帧并排序
frames = sorted(self.loader.get_frames(seq_name))
if len(frames) < 10: # 稍微提高一点门槛,太短的序列很难凑齐5张符合间隔的图
return None
# 获取几何信息(现在返回三个值)
seq_data_dict = self.loader.seq_data[seq_name]
mean_center, basis, _ = get_sequence_geometry_pca(seq_data_dict)
min_int = config['min_interval']
max_int = config['max_interval']
max_span = config['max_angle']
max_attempts = 5000 # 尝试寻找起点的次数
for _ in range(max_attempts):
# 1. 随机选择一个起点 (Reference Frame)
# 留出后面至少4帧的空间,虽然不一定连续,但索引得靠前
start_idx = random.choice(frames[:-4])
# 2. 计算所有后续帧相对于起点的角度(使用对齐后的数据)
rel_data = self._get_all_relative_angles(
start_idx, frames, seq_data_dict, mean_center, basis
)
# 过滤掉 start 之前的帧
future_candidates = [x for x in rel_data if x['idx'] > start_idx]
# 3. 逐步构建链条 (Chain Building)
# 我们需要找到 [Ref, C1, C2, C3, C4]
# 使得 abs(Angle(Ci) - Angle(Ci-1)) 在 [min_int, max_int] 之间
chain = [{'idx': start_idx, 'angle': 0.0}]
for step in range(4): # 需要找 4 个后续帧
last_item = chain[-1]
last_angle = last_item['angle']
valid_next_frames = []
for cand in future_candidates:
# 必须保持时间顺序 (index 递增)
if cand['idx'] <= last_item['idx']:
continue
# 计算与上一帧的间隔
diff = abs(cand['angle'] - last_angle)
# 核心约束检查:
# 1. 间隔在 [min, max] 之间
# 2. 总跨度不超过 max_span (相对于 Ref 的 0 度)
# 3. 单调性检查: 新的角度绝对值 必须 大于 旧的角度绝对值 (确保是越转越远,而不是转回来了)
if (min_int <= diff <= max_int) and \
(abs(cand['angle']) <= max_span) and \
(abs(cand['angle']) > abs(last_angle)):
valid_next_frames.append(cand)
if not valid_next_frames:
break # 链条断了,当前 start_idx 无法构成序列
# 随机选一个符合条件的下一帧
next_frame = random.choice(valid_next_frames)
chain.append(next_frame)
# 4. 检查是否成功找到了 5 帧 (Ref + 4 Candidates)
if len(chain) == 5:
# 提取索引和角度
indices = [x['idx'] for x in chain]
angles = [x['angle'] for x in chain]
return self.create_entry(seq_name, indices, angles, rel_data)
return None
def create_entry(self, seq_name, frame_indices, angles, all_rel_data):
"""
Args:
frame_indices: [Ref, True_1, True_2, True_3, True_4]
angles: [0, Ang1, Ang2, Ang3, Ang4] (相对于 Ref)
all_rel_data: [{'idx':..., 'angle':...}, ...] 序列中所有帧的角度信息
"""
# ================= 配置区域 =================
ROTATION_STEPS = [15, 10] # 优先使用 15度,补齐用 10度
# ===========================================
ref_idx = frame_indices[0]
true_candidates_indices = frame_indices[1:]
ref_info = self.loader.get_frame_info(seq_name, ref_idx)
candidates_infos = [self.loader.get_frame_info(seq_name, idx) for idx in true_candidates_indices]
# 1. 准备候选图片数据 (用于最终选项)
labeled_candidates = []
for i, info in enumerate(candidates_infos):
labeled_candidates.append({
"true_order": i + 1,
"path": format_image_path(info['path'], self.loader.root_path, self.image_prefix),
"angle_rel_ref": angles[i+1]
})
# 打乱顺序
shuffled_candidates = labeled_candidates.copy()
random.shuffle(shuffled_candidates)
# 初始化 images 字典
images_dict = {
"image_ref": format_image_path(ref_info['path'], self.loader.root_path, self.image_prefix)
}
display_mapping = {}
candidates_meta = {}
for k, item in enumerate(shuffled_candidates):
display_label = k + 1
images_dict[f"image_{display_label}"] = item['path']
display_mapping[display_label] = item['true_order']
# <--- [新增] 记录该 Display Image 的详细元数据
candidates_meta[str(display_label)] = {
"angle_relative_to_ref": item['angle_rel_ref'],
"true_rank": item['true_order'],
"image_key": f"image_{display_label}"
}
# 2. 生成选项文本 (A/B/C/D)
true_to_display = {v: k for k, v in display_mapping.items()}
correct_sequence = [true_to_display[i] for i in range(1, 5)]
correct_str = "-".join(map(str, correct_sequence))
all_permutations = list(itertools.permutations([1, 2, 3, 4]))
distractor_pool = [
"-".join(map(str, p))
for p in all_permutations
if list(p) != correct_sequence
]
selected_distractors = random.sample(distractor_pool, 3)
options_data = [{"seq": correct_str, "is_correct": True}]
for d in selected_distractors:
options_data.append({"seq": d, "is_correct": False})
random.shuffle(options_data)
option_labels = ['A', 'B', 'C', 'D']
gt_option_label = ""
sequence_options_text = {}
for label, opt in zip(option_labels, options_data):
sequence_options_text[f"sequence_{label}"] = opt["seq"]
if opt["is_correct"]:
gt_option_label = label
# ------------------------------------------------------------------
# 3. 构建 Oracle Meta 和 Chain (替代原本的文本生成)
# ------------------------------------------------------------------
is_clockwise = angles[-1] < 0
direction_str = "clockwise" if is_clockwise else "counter-clockwise"
chain = []
images_dict_update = {} # 用于存储推理过程中的中间图
current_simulated_angle = 0.0
prev_target_angle = 0.0
reasoning_img_counter = 0
# 遍历 4 个目标阶段 (Ref -> C1, C1 -> C2, ...)
# frame_indices[0] 是 Ref, frame_indices[1:] 是 C1, C2, C3, C4
for i in range(1, 5):
target_angle = angles[i]
true_delta = abs(target_angle - prev_target_angle)
# 拆解步长为 [30, 15]
steps = decompose_angle(true_delta, [30, 15])
matched_display_label = true_to_display[i] # 获取当前真实第i张图对应用户看到的 Label (1-4)
for step_idx, step_deg in enumerate(steps):
reasoning_img_counter += 1
# 1. 更新模拟角度
if is_clockwise:
current_simulated_angle -= step_deg
else:
current_simulated_angle += step_deg
# 2. 寻找最接近该角度的真实帧
closest_frame_data = min(all_rel_data, key=lambda x: abs(x['angle'] - current_simulated_angle))
closest_frame_idx = closest_frame_data['idx']
closest_frame_info = self.loader.get_frame_info(seq_name, closest_frame_idx)
# 3. 【关键修改】始终生成 reasoning_image
# 不管是不是最后一步,都把当前视角的图存下来
result_key = f"reasoning_image_{reasoning_img_counter}"
images_dict_update[result_key] = format_image_path(
closest_frame_info['path'], self.loader.root_path, self.image_prefix
)
# 4. 判断是否到达匹配点
is_last_step_of_phase = (step_idx == len(steps) - 1)
# 4. 构建 Chain Item
chain_item = {
"step_index": len(chain) + 1,
"action": {
"type": "rotate",
"degrees": step_deg,
"direction": direction_str,
"total_angle_so_far": current_simulated_angle
},
"result_image_key": result_key,
"is_key_frame": is_last_step_of_phase,
"matched_display_label": matched_display_label if is_last_step_of_phase else None
}
chain.append(chain_item)
prev_target_angle = target_angle
# 合并图片字典
images_dict.update(images_dict_update)
# ------------------------------------------------------------------
# 4. 生成 Question Prompt (保持不变)
# ------------------------------------------------------------------
template_id = random.choice([1, 2])
obj_name = self.cat_name
if template_id == 1:
question = f"""The {obj_name} in the initial view <image_start>[image_ref]<image_end> remains **static**. Imagine a camera rotating around this {obj_name} along a continuous path. The direction of rotation is defined from a **top-down bird's-eye view**. The total rotation from the start to the end is less than 180 degrees.
Below are four images captured during this rotation, labeled 1, 2, 3 and 4. They're currently shuffled.
1. <image_start>[image_1]<image_end>
2. <image_start>[image_2]<image_end>
3. <image_start>[image_3]<image_end>
4. <image_start>[image_4]<image_end>
Please analyze the change in perspective and determine the correct chronological order of these four images following the initial view. Select the correct sequence.
A. {sequence_options_text['sequence_A']}
B. {sequence_options_text['sequence_B']}
C. {sequence_options_text['sequence_C']}
D. {sequence_options_text['sequence_D']}"""
else:
question = f"""Given the initial view of the **static** {obj_name}: <image_start>[image_ref]<image_end>.
Imagine a camera rotating around this {obj_name} to capture a video sequence. The rotation covers an angle of less than 180 degrees. We have extracted four frames from the sequence, labeled 1 to 4, but their order is jumbled.
1. <image_start>[image_1]<image_end>
2. <image_start>[image_2]<image_end>
3. <image_start>[image_3]<image_end>
4. <image_start>[image_4]<image_end>
Which of the following four options correctly sorts these images into a coherent spatio-temporal sequence starting after the initial view?
A. {sequence_options_text['sequence_A']}
B. {sequence_options_text['sequence_B']}
C. {sequence_options_text['sequence_C']}
D. {sequence_options_text['sequence_D']}"""
return {
"id": f"task3_{seq_name}_{ref_idx}",
"task": "camera_view_ordering",
"sequence": seq_name,
"category": self.loader.category,
"question": question,
"images": images_dict,
"oracle_meta": {
"direction": direction_str,
"correct_sequence_str": correct_str, # e.g. "2-1-4-3"
"correct_label": gt_option_label, # e.g. "B"
"chain": chain,
"candidates_meta": candidates_meta
},
"gt_answer": f"<answer>{gt_option_label}</answer>" # 占位,后面会被 Generator 覆盖
}
def main():
parser = argparse.ArgumentParser(description="Generate Task 3: Camera View Ordering")
# 路径配置
parser.add_argument("--root_path", type=str, required=True, help="CO3D dataset root")
parser.add_argument("--output_dir", type=str, default="output_task3", help="Output directory")
parser.add_argument("--image_prefix", type=str, default="data/", help="Prefix for image paths")
# [新增] 筛选名单路径
parser.add_argument("--filter_path", type=str, default=None, help="Root directory for filter logs (containing category/keep.json)")
# 采样配置
parser.add_argument("--category", type=str, default=None, help="Specific category or None for all")
parser.add_argument("--num_samples", type=int, default=1, help="Samples per sequence")
parser.add_argument("--seed", type=int, default=42)
# 几何约束配置
# min_interval: 相邻两帧之间至少间隔多少度
parser.add_argument("--min_interval", type=float, default=15.0)
# max_interval: 相邻两帧之间最大间隔多少度 (防止跨度太大)
parser.add_argument("--max_interval", type=float, default=45.0)
# max_angle: 整个序列(Ref -> Last)的最大跨度,必须 < 180
parser.add_argument("--max_angle", type=float, default=175.0)
# 切分配置
parser.add_argument("--train_ratio", type=float, default=0.8)
parser.add_argument("--val_ratio", type=float, default=0.1)
parser.add_argument("--test_ratio", type=float, default=0.1)
parser.add_argument("--max_items", type=int, default=10000)
args = parser.parse_args()
# 初始化
random.seed(args.seed)
np.random.seed(args.seed)
if args.category:
categories = [args.category]
else:
data_dir = os.path.join(args.root_path, 'data', 'original')
if os.path.exists(data_dir):
categories = sorted([d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))])
else:
print(f"Error: {data_dir} not found.")
return
all_results = []
config = {
'min_interval': args.min_interval,
'max_interval': args.max_interval,
'max_angle': args.max_angle
}
for cat in categories:
loader = CO3DDataLoader(args.root_path, cat)
if not loader.seq_data:
continue
generator = Task3Generator(loader, args.image_prefix)
sequences = loader.get_sequences()
# [新增] 过滤逻辑
if args.filter_path:
keep_file = os.path.join(args.filter_path, cat, "keep.json")
if os.path.exists(keep_file):
try:
with open(keep_file, 'r') as f:
keep_list = set(json.load(f)) # 使用 set 加速查找
original_count = len(sequences)
sequences = [s for s in sequences if s in keep_list]
print(f"[{cat}] Filter applied: {original_count} -> {len(sequences)} sequences retained.")
except Exception as e:
print(f"[{cat}] Error reading keep.json: {e}. Skipping category.")
sequences = []
else:
print(f"[{cat}] Warning: No keep.json found at {keep_file}. Skipping category.")
sequences = []
if not sequences:
continue
for seq in tqdm(sequences, desc=f"Task3 - {cat}", leave=False):
for _ in range(args.num_samples):
sample = generator.generate_sample(seq, config)
if sample:
all_results.append(sample)
print(f"Total generated: {len(all_results)}")
save_jsonl_splits(
all_results,
args.output_dir,
ratios=(args.train_ratio, args.val_ratio, args.test_ratio),
max_items=args.max_items,
seed=args.seed
)
print(f"Done. Output saved to {args.output_dir}")
if __name__ == "__main__":
main()