Caesarrr's picture
Add files using upload-large-folder tool
60fde3b verified
import os
import json
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
# 假设你的 utils 都在 action_state.utils 里
from action_state.utils import (
CO3DDataLoader,
get_camera_center,
get_view_direction,
get_sequence_geometry
)
# ==========================================
# 配置区域
# ==========================================
ROOT_PATH = "/run/determined/NAS1/public/lixinyuan/interleaved-co3d"
# 设置为 None 则处理所有类别,否则处理列表中的类别,例如 ["bench"]
CATEGORY = None
# CATEGORY = ["bench", "hydrant"]
# 输出目录配置
IMAGE_OUTPUT_DIR = "./debug/traj/"
JSON_OUTPUT_DIR = "./data/filter_log/"
# ==========================================
# 1. 核心筛选算法 (V3版本: Sinuosity + Strict Mono)
# ==========================================
def get_pca_axis_ratio(coords):
if len(coords) < 3: return 999.0
centered = coords - np.mean(coords, axis=0)
cov = np.cov(centered.T)
eigenvalues, _ = np.linalg.eig(cov)
eigenvalues = np.sort(eigenvalues)[::-1]
if eigenvalues[1] < 1e-6: return 999.0
return np.sqrt(eigenvalues[0]) / np.sqrt(eigenvalues[1])
def analyze_trajectory_robust(seq_data, mean_center):
"""V3: 包含弯曲度(Sinuosity)和严格单调性检查"""
frame_indices = sorted(list(seq_data.keys()))
if len(frame_indices) < 10:
return {'valid': False, 'reason': 'too_few_frames'}
coords_xz = []
for fid in frame_indices:
R = seq_data[fid]['R']
T = seq_data[fid]['T']
C = -R.T @ T
dx = C[0] - mean_center[0]
dz = C[2] - mean_center[2]
coords_xz.append([dx, dz])
coords_xz = np.array(coords_xz)
# 几何指标
azimuths = np.arctan2(coords_xz[:, 1], coords_xz[:, 0])
azimuths_unwrapped = np.unwrap(azimuths)
sweep_rad = np.max(azimuths_unwrapped) - np.min(azimuths_unwrapped)
sweep_deg = np.degrees(sweep_rad)
# 单调性
diffs = np.diff(azimuths_unwrapped)
valid_diffs = diffs[np.abs(diffs) > np.radians(0.5)]
if len(valid_diffs) == 0:
monotonicity = 0.0
else:
monotonicity = max(np.sum(valid_diffs > 0), np.sum(valid_diffs < 0)) / len(valid_diffs)
# PCA & 半径
axis_ratio = get_pca_axis_ratio(coords_xz)
radii = np.linalg.norm(coords_xz, axis=1)
mean_radius = np.mean(radii)
r_min = np.percentile(radii, 5)
r_max = np.percentile(radii, 95)
radius_ratio = r_min / (r_max + 1e-6)
# 跳变 & 弯曲度
steps = np.linalg.norm(np.diff(coords_xz, axis=0), axis=1)
jump_factor = np.max(steps) / (np.median(steps) + 1e-6)
ideal_path_length = sweep_rad * mean_radius
total_path_length = np.sum(steps)
sinuosity = total_path_length / (ideal_path_length + 1e-3) if ideal_path_length > 1e-3 else 999.0
return {
'valid': True,
'sweep_deg': sweep_deg,
'monotonicity': monotonicity,
'axis_ratio': axis_ratio,
'radius_ratio': radius_ratio,
'jump_factor': jump_factor,
'sinuosity': sinuosity,
'num_frames': len(frame_indices)
}
def check_if_sequence_is_good(metrics):
"""V3筛选阈值"""
if not metrics['valid']: return False, metrics['reason']
# 阈值配置
MIN_SWEEP_DEG = 120.0
MIN_MONOTONICITY = 0.70 # 严格单调性
MAX_AXIS_RATIO = 6.0
MIN_RADIUS_RATIO = 0.3
MAX_JUMP_FACTOR = 5.0
MAX_SINUOSITY = 2.0 # 严格弯曲度
# 一票否决
if metrics['jump_factor'] > MAX_JUMP_FACTOR: return False, f"Jump ({metrics['jump_factor']:.1f})"
if metrics['radius_ratio'] < MIN_RADIUS_RATIO: return False, f"Unstable Radius ({metrics['radius_ratio']:.2f})"
if metrics['sweep_deg'] > 60.0 and metrics['sinuosity'] > MAX_SINUOSITY: return False, f"Jittery/Wavy ({metrics['sinuosity']:.2f})"
# 分级判断
if metrics['sweep_deg'] > 270.0:
if metrics['monotonicity'] < MIN_MONOTONICITY: return False, f"Messy Loop ({metrics['monotonicity']:.2f})"
return True, "Full Loop"
elif metrics['sweep_deg'] > MIN_SWEEP_DEG:
if metrics['axis_ratio'] > MAX_AXIS_RATIO: return False, f"Linear ({metrics['axis_ratio']:.1f})"
if metrics['monotonicity'] < MIN_MONOTONICITY: return False, f"Messy Semi ({metrics['monotonicity']:.2f})"
return True, "Semi Loop"
else:
return False, f"Small Angle ({metrics['sweep_deg']:.1f})"
# ==========================================
# 2. 绘图函数
# ==========================================
def plot_sequence_trajectory(loader, sequence_name, output_path, metrics):
"""绘制并保存轨迹图"""
try:
frame_ids = sorted(loader.get_frames(sequence_name))
seq_data = loader.seq_data[sequence_name]
mean_center, _, aligned_seq_data = get_sequence_geometry(seq_data, align_to_standard=True)
camera_centers = []
for fid in frame_ids:
info = aligned_seq_data[fid]
C = get_camera_center(info['R'], info['T'])
camera_centers.append(C)
camera_centers = np.array(camera_centers)
x_coords = camera_centers[:, 0] - mean_center[0]
z_coords = camera_centers[:, 2] - mean_center[2]
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
ax.plot(x_coords, z_coords, c='lightgray', alpha=0.5, linestyle='--')
sc = ax.scatter(x_coords, z_coords, c=frame_ids, cmap='viridis', s=30, zorder=5)
ax.scatter(0, 0, c='black', marker='X', s=200, label='Center', zorder=10)
title = (f"Seq: {sequence_name}\n"
f"Sweep={metrics['sweep_deg']:.0f}°, Mono={metrics['monotonicity']:.2f}, "
f"Sinuosity={metrics['sinuosity']:.2f}")
ax.set_title(title, fontsize=12)
ax.axis('equal')
# 确保目录存在
os.makedirs(os.path.dirname(output_path), exist_ok=True)
plt.savefig(output_path, dpi=100, bbox_inches='tight')
plt.close(fig)
except Exception as e:
print(f"Error plotting {sequence_name}: {e}")
# ==========================================
# 3. 流程控制
# ==========================================
def process_category(category_name, global_stats):
"""处理单个类别"""
print(f"\nProcessing Category: {category_name}")
# 1. 加载数据
try:
loader = CO3DDataLoader(ROOT_PATH, category_name)
sequences = loader.get_sequences()
except Exception as e:
print(f"Failed to load category {category_name}: {e}")
return
# 2. 准备输出路径
cat_img_dir = os.path.join(IMAGE_OUTPUT_DIR, category_name)
cat_json_dir = os.path.join(JSON_OUTPUT_DIR, category_name)
os.makedirs(cat_json_dir, exist_ok=True)
# 3. 初始化统计
keep_list = []
stats = {
'total': len(sequences),
'kept': 0,
'rejected': 0,
'reasons': {}
}
# 4. 遍历序列
for seq_name in tqdm(sequences, desc=f"Filtering {category_name}", leave=False):
try:
# 获取数据 & 计算指标
seq_data = loader.seq_data[seq_name]
mean_center, _, aligned_seq_data = get_sequence_geometry(seq_data, align_to_standard=True)
metrics = analyze_trajectory_robust(aligned_seq_data, mean_center)
# 判定
is_good, reason = check_if_sequence_is_good(metrics)
if is_good:
keep_list.append(seq_name)
stats['kept'] += 1
# 保存可视化图片 (仅保存 Keep 的)
img_path = os.path.join(cat_img_dir, f"{seq_name}.png")
plot_sequence_trajectory(loader, seq_name, img_path, metrics)
else:
stats['rejected'] += 1
# 记录拒绝原因
base_reason = reason.split('(')[0].strip() # 简化原因统计
stats['reasons'][base_reason] = stats['reasons'].get(base_reason, 0) + 1
except Exception as e:
print(f"Error processing {seq_name}: {e}")
stats['rejected'] += 1
stats['reasons']['Error'] = stats['reasons'].get('Error', 0) + 1
# 5. 保存 keep.json
keep_json_path = os.path.join(cat_json_dir, "keep.json")
with open(keep_json_path, 'w') as f:
json.dump(keep_list, f, indent=2)
# 6. 更新全局统计
global_stats[category_name] = stats
print(f" -> Kept: {stats['kept']}/{stats['total']} ({stats['kept']/stats['total']*100:.1f}%)")
print(f" -> Saved keep list to: {keep_json_path}")
def main():
start_time = time.time()
print(f"{'='*60}")
print(f"CO3D Trajectory Filtering Pipeline (V3)")
print(f"Root Path: {ROOT_PATH}")
print(f"Output Images: {IMAGE_OUTPUT_DIR}")
print(f"Output JSONs: {JSON_OUTPUT_DIR}")
print(f"{'='*60}")
# 1. 确定要处理的类别列表
if CATEGORY:
categories_to_process = CATEGORY if isinstance(CATEGORY, list) else [CATEGORY]
else:
# 自动扫描所有类别
data_root = os.path.join(ROOT_PATH, 'data', 'original')
if os.path.exists(data_root):
categories_to_process = sorted([d for d in os.listdir(data_root)
if os.path.isdir(os.path.join(data_root, d))])
else:
print(f"Error: Data root {data_root} not found.")
return
print(f"Found {len(categories_to_process)} categories to process.")
# 2. 全局统计容器
global_stats = {}
# 3. 循环处理
for cat in tqdm(categories_to_process, desc="Total Progress"):
process_category(cat, global_stats)
# 4. 保存全局统计信息
os.makedirs(JSON_OUTPUT_DIR, exist_ok=True)
stats_path = os.path.join(JSON_OUTPUT_DIR, "statistics.json")
# 添加汇总信息
total_seqs = sum(s['total'] for s in global_stats.values())
total_kept = sum(s['kept'] for s in global_stats.values())
final_report = {
'summary': {
'total_categories': len(global_stats),
'total_sequences': total_seqs,
'total_kept': total_kept,
'overall_pass_rate': total_kept / total_seqs if total_seqs > 0 else 0
},
'details': global_stats
}
with open(stats_path, 'w') as f:
json.dump(final_report, f, indent=2)
print(f"\n{'='*60}")
print(f"Pipeline Completed in {time.time()-start_time:.1f}s")
print(f"Global Statistics saved to: {stats_path}")
print(f"Overall Pass Rate: {final_report['summary']['overall_pass_rate']*100:.1f}%")
print(f"{'='*60}")
if __name__ == "__main__":
main()