""" build_sequences.py ================== 从族谱(genealogy.pkl)和多尺度 3DGS 量化索引构建 split 序列。 序列结构(固定长度 MAX_SEQ_LEN = 38): [parent(1)] [uncle×≤4] [child×≤32] [EOS(1)] [PAD×...] 每个 token 字段: dx, dy, dz float32 坐标偏移(parent=0,0,0;其他=相对parent) scale_idx int32 scale codebook 索引 rot_idx int32 rotation codebook 索引 dc_idx int32 DC codebook 索引 sh_idx int32 SH codebook 索引 opacity float32 不透明度原值(不量化) role uint8 身份标识: 0 = parent(父节点,坐标原点) 1 = uncle (叔伯节点,相对坐标) 2 = child (子节点,相对坐标) 3 = EOS (序列结束符) 4 = PAD (补齐,不参与计算) 层级方向(粗→细): quant_paths 按 L3, L2, L1, L0 顺序传入 L3 最粗(点最少)作为 parent,逐级向细展开 """ import os import argparse import pickle import numpy as np # ───────────────────────────────────────────── # 常量 # ───────────────────────────────────────────── ROLE_PARENT = np.uint8(0) ROLE_UNCLE = np.uint8(1) ROLE_CHILD = np.uint8(2) ROLE_EOS = np.uint8(3) ROLE_PAD = np.uint8(4) MAX_CHILDREN = 32 MAX_UNCLES = 4 # 固定序列长度:parent(1) + uncle(4) + child(32) + EOS(1) MAX_SEQ_LEN = 1 + MAX_UNCLES + MAX_CHILDREN + 1 # = 38 TOKEN_DTYPE = np.dtype([ ('dx', np.float32), ('dy', np.float32), ('dz', np.float32), ('scale_idx', np.int32), ('rot_idx', np.int32), ('dc_idx', np.int32), ('sh_idx', np.int32), ('opacity', np.float32), ('role', np.uint8), ]) # ───────────────────────────────────────────── # 1. 加载量化索引 # ───────────────────────────────────────────── def load_quantized(npz_path: str) -> dict: npz = np.load(npz_path) return { 'scale_indices': npz['scale_indices'], 'rotation_indices': npz['rotation_indices'], 'dc_indices': npz['dc_indices'], 'sh_indices': npz['sh_indices'], 'positions': npz['positions'], 'opacities': npz['opacities'].squeeze(), } # ───────────────────────────────────────────── # 2. 加载族谱 # ───────────────────────────────────────────── def load_genealogy(genealogy_path: str) -> dict: with open(genealogy_path, 'rb') as f: return pickle.load(f) # ───────────────────────────────────────────── # 3. Token 构造工具 # ───────────────────────────────────────────── def make_token(gauss_idx: int, quant: dict, parent_pos: np.ndarray, role: np.uint8) -> np.ndarray: """构造单个特征 token,坐标为相对 parent_pos 的偏移。""" pos = quant['positions'][gauss_idx] delta = pos - parent_pos token = np.zeros(1, dtype=TOKEN_DTYPE) token['dx'] = delta[0] token['dy'] = delta[1] token['dz'] = delta[2] token['scale_idx'] = quant['scale_indices'][gauss_idx] token['rot_idx'] = quant['rotation_indices'][gauss_idx] token['dc_idx'] = quant['dc_indices'][gauss_idx] token['sh_idx'] = quant['sh_indices'][gauss_idx] token['opacity'] = quant['opacities'][gauss_idx] token['role'] = role return token[0] def make_eos_token() -> np.ndarray: """结束符:特征全 0,role=3。""" token = np.zeros(1, dtype=TOKEN_DTYPE) token['role'] = ROLE_EOS return token[0] def make_pad_token() -> np.ndarray: """补齐符:特征全 0,role=4,不参与任何 loss/attention。""" token = np.zeros(1, dtype=TOKEN_DTYPE) token['role'] = ROLE_PAD return token[0] # ───────────────────────────────────────────── # 4. 构建单层所有 split 序列 # ───────────────────────────────────────────── def build_level_sequences( parent_quant: dict, child_quant: dict, children_ids: np.ndarray, # (N_coarse, MAX_CHILDREN),-1 为空位 max_uncles: int = MAX_UNCLES, min_children: int = 1, fixed_len: int = MAX_SEQ_LEN, ) -> list: """ 遍历每个粗节点,构造一条固定长度的 split 序列。 序列 token 顺序: [parent(1)] [uncle×≤max_uncles] [child×N_valid] [EOS(1)] [PAD×...] role 编码: 0 = parent 坐标固定为 (0,0,0),特征来自自身 1 = uncle 坐标相对 parent,特征来自自身 2 = child 坐标相对 parent,特征来自细尺度 3 = EOS 结束符,标志真实子节点已全部输出 4 = PAD 补齐,attention mask 中被屏蔽 返回:list of np.ndarray,每个元素 shape (fixed_len,) TOKEN_DTYPE """ N_parents = children_ids.shape[0] sequences = [] for p_idx in range(N_parents): child_row = children_ids[p_idx] valid_children = child_row[child_row >= 0] if len(valid_children) < min_children: continue parent_pos = parent_quant['positions'][p_idx] tokens = [] # ── parent(坐标置零)───────────────────────── t = make_token(p_idx, parent_quant, parent_pos, ROLE_PARENT) t['dx'] = t['dy'] = t['dz'] = 0.0 tokens.append(t) # ── uncle(前后各 half 个空间邻居)──────────── half = max_uncles // 2 added_uncles = 0 for offset in list(range(-half, 0)) + list(range(1, half + 1)): u_idx = p_idx + offset if 0 <= u_idx < N_parents and added_uncles < max_uncles: tokens.append( make_token(u_idx, parent_quant, parent_pos, ROLE_UNCLE) ) added_uncles += 1 # ── child(所有有效子节点)────────────────────── for c_idx in valid_children: tokens.append( make_token(int(c_idx), child_quant, parent_pos, ROLE_CHILD) ) # ── EOS ──────────────────────────────────────── tokens.append(make_eos_token()) # ── PAD 补齐到 fixed_len ──────────────────────── while len(tokens) < fixed_len: tokens.append(make_pad_token()) # 超长截断(极端情况:uncle+child 超出 fixed_len-2) if len(tokens) > fixed_len: tokens = tokens[:fixed_len - 1] tokens.append(make_eos_token()) sequences.append(np.array(tokens, dtype=TOKEN_DTYPE)) return sequences # ───────────────────────────────────────────── # 5. 多层序列构建主函数 # ───────────────────────────────────────────── def build_all_sequences( quant_paths: list, # 按粗→细顺序:[L3.npz, L2.npz, L1.npz, L0.npz] genealogy_path: str, save_dir: str, max_uncles: int = MAX_UNCLES, min_children: int = 1, ) -> None: """ quant_paths 按粗→细顺序传入,例如: [L3_quantized.npz, L2_quantized.npz, L1_quantized.npz, L0_quantized.npz] genealogy.pkl 格式: genealogy[3]['children_ids'] shape (N_L3, MAX_CHILDREN) L3粗节点 → L2细节点索引 genealogy[2]['children_ids'] shape (N_L2, MAX_CHILDREN) L2粗节点 → L1细节点索引 genealogy[1]['children_ids'] shape (N_L1, MAX_CHILDREN) L1粗节点 → L0细节点索引 输出(按粗→细命名): save_dir/sequences_L3_to_L2.pkl save_dir/sequences_L2_to_L1.pkl save_dir/sequences_L1_to_L0.pkl """ os.makedirs(save_dir, exist_ok=True) print(f"[build] 加载族谱:{genealogy_path}") genealogy = load_genealogy(genealogy_path) print(f"[build] 加载量化数据(共 {len(quant_paths)} 个尺度,粗→细顺序)...") quants = [] for path in quant_paths: print(f" {os.path.basename(path)}") quants.append(load_quantized(path)) # quants[0]=最粗(L3), quants[1]=L2, ..., quants[-1]=最细(L0) n_levels = len(quants) # genealogy key: quants[0]→quants[1] 对应 key=n_levels-1(即3) # quants[1]→quants[2] 对应 key=n_levels-2(即2) # ... for i in range(n_levels - 1): coarse_level = n_levels - 1 - i # 3, 2, 1 fine_level = coarse_level - 1 # 2, 1, 0 gen_key = coarse_level # genealogy key 与粗尺度编号一致 coarse_name = f"L{coarse_level}" fine_name = f"L{fine_level}" if gen_key not in genealogy: print(f"[build] 警告:族谱中无 key={gen_key},跳过 {coarse_name}→{fine_name}") continue parent_quant = quants[i] child_quant = quants[i + 1] children_ids = genealogy[gen_key]['children_ids'] # (N_coarse, 32) print(f"\n[build] 构建 {coarse_name}→{fine_name} 序列") print(f" 父节点数={children_ids.shape[0]}, " f"children_ids.shape={children_ids.shape}") sequences = build_level_sequences( parent_quant, child_quant, children_ids, max_uncles=max_uncles, min_children=min_children, fixed_len=MAX_SEQ_LEN, ) if len(sequences) == 0: print(" [警告] 没有有效序列,请检查族谱与量化数据") continue # 统计子节点数分布 child_counts = np.array([ int((s['role'] == ROLE_CHILD).sum()) for s in sequences ]) print(f" 生成序列数:{len(sequences)}") print(f" 子节点数:min={child_counts.min()}, " f"max={child_counts.max()}, mean={child_counts.mean():.2f}") # role 分布统计 all_roles = np.concatenate([s['role'] for s in sequences]) for r, name in [(0,'parent'),(1,'uncle'),(2,'child'),(3,'EOS'),(4,'PAD')]: cnt = (all_roles == r).sum() print(f" role={r}({name:6s}):{cnt:,} tokens") out_path = os.path.join(save_dir, f"sequences_{coarse_name}_to_{fine_name}.pkl") with open(out_path, 'wb') as f: pickle.dump(sequences, f, protocol=4) size_mb = os.path.getsize(out_path) / 1024 / 1024 print(f" 保存 → {out_path} ({size_mb:.2f} MB)") print(f"\n[build] 全部序列构建完成!") print(f" 固定序列长度 MAX_SEQ_LEN={MAX_SEQ_LEN} " f"(parent=1, uncle≤{MAX_UNCLES}, child≤{MAX_CHILDREN}, EOS=1)") # ───────────────────────────────────────────── # 6. CLI # ───────────────────────────────────────────── def parse_args(): parser = argparse.ArgumentParser(description="构建 3DGS split 序列") parser.add_argument('--quant_paths', nargs='+', required=True, help='量化 .npz 路径,按粗→细顺序:L3 L2 L1 L0') parser.add_argument('--genealogy', required=True, help='族谱 genealogy.pkl 路径') parser.add_argument('--save_dir', default='./sequences', help='序列输出目录(默认 ./sequences)') parser.add_argument('--max_uncles', type=int, default=MAX_UNCLES) parser.add_argument('--min_children', type=int, default=1) return parser.parse_args() if __name__ == '__main__': args = parse_args() build_all_sequences( quant_paths=args.quant_paths, genealogy_path=args.genealogy, save_dir=args.save_dir, max_uncles=args.max_uncles, min_children=args.min_children, )