""" infer_upsample.py ================= 使用训练好的 Transformer,从粗尺度(Ln)自回归生成细尺度(L(n-1))。 流程: 1. 读取粗尺度量化数据(.npz) 2. 为每个粗节点构造前缀序列(parent + uncles) 3. 自回归生成子节点(遇到 role=EOS 或超过 MAX_CHILDREN 则停止) 4. 将子节点量化索引解码为真实属性(查 codebook) 5. 写出新的 .ply 文件 role 编码(与训练一致): 0 = parent 1 = uncle 2 = child 3 = EOS 4 = PAD """ import os import argparse import pickle import numpy as np import torch import torch.nn.functional as F from plyfile import PlyData, PlyElement # ───────────────────────────────────────────── # 常量(与 build_sequences / train_transformer 一致) # ───────────────────────────────────────────── ROLE_PARENT = 0 ROLE_UNCLE = 1 ROLE_CHILD = 2 ROLE_EOS = 3 ROLE_PAD = 4 MAX_CHILDREN = 32 MAX_UNCLES = 4 MAX_SEQ_LEN = 1 + MAX_UNCLES + MAX_CHILDREN + 1 # = 38 N_SCALE = 16384 N_ROT = 16384 N_DC = 4096 N_SH = 4096 N_ROLE = 4 TOKEN_DTYPE = np.dtype([ ('dx', np.float32), ('dy', np.float32), ('dz', np.float32), ('scale_idx', np.int32), ('rot_idx', np.int32), ('dc_idx', np.int32), ('sh_idx', np.int32), ('opacity', np.float32), ('role', np.uint8), ]) # ───────────────────────────────────────────── # 1. 加载模型 # ───────────────────────────────────────────── def load_model(ckpt_path: str, device: str = 'cpu'): from train_transformer import SplitTransformer ckpt = torch.load(ckpt_path, map_location=device) config = ckpt.get('config', {}) model = SplitTransformer(**config).to(device) state = ckpt.get('model_state', ckpt) model.load_state_dict(state) model.eval() print(f"[load] {os.path.basename(ckpt_path)} " f"d_model={config.get('d_model')}, " f"n_layers={config.get('n_layers')}") return model # ───────────────────────────────────────────── # 2. 加载 codebook # ───────────────────────────────────────────── def load_codebooks(codebook_dir: str) -> dict: cbs = {} for name in ['scale', 'rotation', 'dc', 'sh']: path = os.path.join(codebook_dir, f"{name}_codebook.npz") cbs[name] = np.load(path)['codebook'].astype(np.float32) print(f"[load] {name}_codebook: {cbs[name].shape}") return cbs # ───────────────────────────────────────────── # 3. 加载量化数据 # ───────────────────────────────────────────── def load_quantized(npz_path: str) -> dict: npz = np.load(npz_path) return { 'scale_indices': npz['scale_indices'], 'rotation_indices': npz['rotation_indices'], 'dc_indices': npz['dc_indices'], 'sh_indices': npz['sh_indices'], 'positions': npz['positions'], 'opacities': npz['opacities'].squeeze(), } # ───────────────────────────────────────────── # 4. 构造前缀 batch(parent + uncles) # ───────────────────────────────────────────── def make_prefix_batch( p_idx: int, quant: dict, max_uncles: int = MAX_UNCLES, device: str = 'cpu', ) -> tuple: """ 构造粗节点 p_idx 的前缀 batch(parent + uncles), 返回 (batch_dict, parent_pos)。 batch_dict 中每个张量 shape (1, prefix_len)。 """ N = quant['positions'].shape[0] parent_pos = quant['positions'][p_idx] tokens = [] # ── parent(坐标置零)───────────────────── t = _make_np_token(p_idx, quant, parent_pos, ROLE_PARENT) t['dx'] = t['dy'] = t['dz'] = 0.0 tokens.append(t) # ── uncle ────────────────────────────────── half = max_uncles // 2 added_uncles = 0 for offset in list(range(-half, 0)) + list(range(1, half + 1)): u_idx = p_idx + offset if 0 <= u_idx < N and added_uncles < max_uncles: tokens.append(_make_np_token(u_idx, quant, parent_pos, ROLE_UNCLE)) added_uncles += 1 seq = np.array(tokens, dtype=TOKEN_DTYPE) return _seq_to_batch(seq, device), parent_pos def _make_np_token(gauss_idx: int, quant: dict, parent_pos: np.ndarray, role: int) -> np.ndarray: pos = quant['positions'][gauss_idx] delta = pos - parent_pos token = np.zeros(1, dtype=TOKEN_DTYPE) token['dx'] = delta[0] token['dy'] = delta[1] token['dz'] = delta[2] token['scale_idx'] = quant['scale_indices'][gauss_idx] token['rot_idx'] = quant['rotation_indices'][gauss_idx] token['dc_idx'] = quant['dc_indices'][gauss_idx] token['sh_idx'] = quant['sh_indices'][gauss_idx] token['opacity'] = quant['opacities'][gauss_idx] token['role'] = role return token[0] def _seq_to_batch(seq: np.ndarray, device: str) -> dict: """将 numpy 序列转为模型输入 dict,batch_size=1。""" L = len(seq) xyz = np.stack([seq['dx'], seq['dy'], seq['dz']], axis=1) # (L, 3) return { 'xyz': torch.tensor(xyz, device=device).float().unsqueeze(0), 'scale': torch.tensor(seq['scale_idx'].astype(np.int64), device=device).unsqueeze(0), 'rot': torch.tensor(seq['rot_idx'].astype(np.int64), device=device).unsqueeze(0), 'dc': torch.tensor(seq['dc_idx'].astype(np.int64), device=device).unsqueeze(0), 'sh': torch.tensor(seq['sh_idx'].astype(np.int64), device=device).unsqueeze(0), 'opacity': torch.tensor(seq['opacity'].astype(np.float32), device=device).unsqueeze(0), 'role': torch.tensor(seq['role'].astype(np.int64), device=device).unsqueeze(0), 'attn_mask': torch.ones(1, L, dtype=torch.bool, device=device), # Dataset 里的两个 loss_mask 推断时不需要,但 forward 不用它们,可省略 } def _append_token(batch: dict, token_np: np.ndarray, device: str) -> dict: """将新预测的 token 拼接到 batch 末尾,用于下一步自回归。""" new_xyz = torch.tensor( [[[token_np['dx'], token_np['dy'], token_np['dz']]]], dtype=torch.float32, device=device ) def cat(key, val, dtype): new = torch.tensor([[val]], dtype=dtype, device=device) return torch.cat([batch[key], new], dim=1) return { 'xyz': torch.cat([batch['xyz'], new_xyz], dim=1), 'scale': cat('scale', int(token_np['scale_idx']), torch.int64), 'rot': cat('rot', int(token_np['rot_idx']), torch.int64), 'dc': cat('dc', int(token_np['dc_idx']), torch.int64), 'sh': cat('sh', int(token_np['sh_idx']), torch.int64), 'opacity': cat('opacity', float(token_np['opacity']), torch.float32), 'role': cat('role', int(token_np['role']), torch.int64), 'attn_mask': torch.cat([ batch['attn_mask'], torch.ones(1, 1, dtype=torch.bool, device=device) ], dim=1), } # ───────────────────────────────────────────── # 5. 自回归生成子节点 # ───────────────────────────────────────────── def generate_children( model: object, prefix_batch: dict, parent_pos: np.ndarray, max_children: int = MAX_CHILDREN, temperature: float = 0.8, top_k: int = 50, device: str = 'cpu', ) -> list: """ 给定前缀 batch(parent + uncles),自回归采样子节点。 每步先预测 role: role=2(child) → 继续预测特征,加入序列 role=3(EOS) → 提前终止 其他 → 异常,强制终止 返回 list of dict,每个 dict 包含子节点所有字段 + world_pos。 """ current_batch = prefix_batch children = [] def _sample_cls(logits: torch.Tensor, n_classes: int) -> int: logits = logits / temperature if top_k > 0: k = min(top_k, n_classes) topk_vals, _ = torch.topk(logits, k) threshold = topk_vals[-1] logits = logits.masked_fill(logits < threshold, float('-inf')) probs = F.softmax(logits, dim=-1) return int(torch.multinomial(probs, 1).item()) for _ in range(max_children): with torch.no_grad(): pred = model(current_batch) # ── 先预测 role ──────────────────────── role_logits = pred['role'][0, -1, :] # (4,) pred_role = _sample_cls(role_logits, N_ROLE) if pred_role == ROLE_EOS: break # 模型预测到结束符,停止 if pred_role != ROLE_CHILD: # 预测出了 parent/uncle,模型异常,强制终止 break # ── role=child,预测其他特征 ──────────── pred_scale = _sample_cls(pred['scale'][0, -1, :], N_SCALE) pred_rot = _sample_cls(pred['rot'][0, -1, :], N_ROT) pred_dc = _sample_cls(pred['dc'][0, -1, :], N_DC) pred_sh = _sample_cls(pred['sh'][0, -1, :], N_SH) pred_xyz = pred['xyz'][0, -1, :].cpu().numpy() # (3,) 相对偏移 pred_opa = float(pred['opacity'][0, -1, 0].cpu()) # 记录子节点信息 child = { 'dx': float(pred_xyz[0]), 'dy': float(pred_xyz[1]), 'dz': float(pred_xyz[2]), 'scale_idx': pred_scale, 'rot_idx': pred_rot, 'dc_idx': pred_dc, 'sh_idx': pred_sh, 'opacity': float(np.clip(pred_opa, -10, 10)), 'role': ROLE_CHILD, 'world_pos': parent_pos + pred_xyz, # 世界坐标 } children.append(child) # 将新 token 加入序列(供下一步生成) np_token = np.zeros(1, dtype=TOKEN_DTYPE) np_token['dx'] = child['dx'] np_token['dy'] = child['dy'] np_token['dz'] = child['dz'] np_token['scale_idx'] = pred_scale np_token['rot_idx'] = pred_rot np_token['dc_idx'] = pred_dc np_token['sh_idx'] = pred_sh np_token['opacity'] = child['opacity'] np_token['role'] = ROLE_CHILD current_batch = _append_token(current_batch, np_token[0], device) return children # ───────────────────────────────────────────── # 6. 写出 .ply # ───────────────────────────────────────────── def children_to_ply( all_children: list, codebooks: dict, save_path: str, n_sh_rest: int = 45, ) -> None: N = len(all_children) if N == 0: print("[write_ply] 警告:没有任何子节点,跳过写出") return print(f"[write_ply] 共 {N} 个子节点,解码并写出 {save_path} ...") positions = np.array([c['world_pos'] for c in all_children], dtype=np.float32) opacities = np.array([c['opacity'] for c in all_children], dtype=np.float32) scale_idx = np.array([c['scale_idx'] for c in all_children], dtype=np.int32) rot_idx = np.array([c['rot_idx'] for c in all_children], dtype=np.int32) dc_idx = np.array([c['dc_idx'] for c in all_children], dtype=np.int32) sh_idx = np.array([c['sh_idx'] for c in all_children], dtype=np.int32) # 量化索引 → 真实属性(codebook 查表) scales = codebooks['scale'][scale_idx] # (N, 3) rotations = codebooks['rotation'][rot_idx] # (N, 4) dc = codebooks['dc'][dc_idx] # (N, 3) sh_rest = codebooks['sh'][sh_idx] # (N, 45) # 构造 PLY vertex 结构 fields = ( [('x','f4'), ('y','f4'), ('z','f4'), ('opacity','f4'), ('scale_0','f4'), ('scale_1','f4'), ('scale_2','f4'), ('rot_0','f4'), ('rot_1','f4'), ('rot_2','f4'), ('rot_3','f4'), ('f_dc_0','f4'), ('f_dc_1','f4'), ('f_dc_2','f4')] + [(f'f_rest_{i}', 'f4') for i in range(n_sh_rest)] ) vd = np.zeros(N, dtype=np.dtype(fields)) vd['x'] = positions[:, 0] vd['y'] = positions[:, 1] vd['z'] = positions[:, 2] vd['opacity'] = opacities vd['scale_0'] = scales[:, 0] vd['scale_1'] = scales[:, 1] vd['scale_2'] = scales[:, 2] vd['rot_0'] = rotations[:, 0] vd['rot_1'] = rotations[:, 1] vd['rot_2'] = rotations[:, 2] vd['rot_3'] = rotations[:, 3] vd['f_dc_0'] = dc[:, 0] vd['f_dc_1'] = dc[:, 1] vd['f_dc_2'] = dc[:, 2] for i in range(n_sh_rest): vd[f'f_rest_{i}'] = sh_rest[:, i] os.makedirs(os.path.dirname(os.path.abspath(save_path)), exist_ok=True) PlyData([PlyElement.describe(vd, 'vertex')]).write(save_path) size_mb = os.path.getsize(save_path) / 1024 / 1024 print(f"[write_ply] 完成 {size_mb:.2f} MB") # ───────────────────────────────────────────── # 7. 主推断流程 # ───────────────────────────────────────────── def infer_upsample( ckpt_path: str, quant_npz: str, codebook_dir: str, save_path: str, max_uncles: int = MAX_UNCLES, max_children: int = MAX_CHILDREN, temperature: float = 0.8, top_k: int = 50, device: str = 'auto', max_gaussians: int = -1, ) -> None: if device == 'auto': device = 'cuda' if torch.cuda.is_available() else 'cpu' print(f"[infer] device={device}") model = load_model(ckpt_path, device) codebooks = load_codebooks(codebook_dir) quant = load_quantized(quant_npz) N = quant['positions'].shape[0] if max_gaussians > 0: N = min(N, max_gaussians) print(f"[infer] 处理 {N} 个粗节点,最多生成 {N * max_children} 个子节点") all_children = [] total_generated = 0 early_stop_count = 0 for p_idx in range(N): if p_idx % 5000 == 0: print(f" 进度:{p_idx}/{N} 已生成子节点:{total_generated}") prefix_batch, parent_pos = make_prefix_batch( p_idx, quant, max_uncles=max_uncles, device=device ) children = generate_children( model, prefix_batch, parent_pos, max_children=max_children, temperature=temperature, top_k=top_k, device=device, ) if len(children) < max_children: early_stop_count += 1 all_children.extend(children) total_generated += len(children) print(f"\n[infer] 生成完成") print(f" 总子节点数:{total_generated}") print(f" 平均每粗节点子节点数:{total_generated / max(N, 1):.2f}") print(f" EOS 提前终止次数:{early_stop_count} / {N} " f"({100 * early_stop_count / max(N, 1):.1f}%)") children_to_ply(all_children, codebooks, save_path) print(f"\n[infer] 完成!输出 → {save_path}") # ───────────────────────────────────────────── # 8. CLI # ───────────────────────────────────────────── def parse_args(): p = argparse.ArgumentParser(description="用 Transformer 从粗尺度生成细尺度 3DGS") p.add_argument('--ckpt', required=True, help='模型 checkpoint 路径') p.add_argument('--quant_npz', required=True, help='粗尺度量化数据 .npz') p.add_argument('--codebook_dir', required=True, help='codebook 目录') p.add_argument('--save_path', required=True, help='输出 .ply 路径') p.add_argument('--max_uncles', type=int, default=MAX_UNCLES) p.add_argument('--max_children', type=int, default=MAX_CHILDREN) p.add_argument('--temperature', type=float, default=0.8) p.add_argument('--top_k', type=int, default=50) p.add_argument('--device', default='auto') p.add_argument('--max_gaussians', type=int, default=-1, help='调试用:只处理前 N 个粗节点') return p.parse_args() if __name__ == '__main__': args = parse_args() infer_upsample( ckpt_path=args.ckpt, quant_npz=args.quant_npz, codebook_dir=args.codebook_dir, save_path=args.save_path, max_uncles=args.max_uncles, max_children=args.max_children, temperature=args.temperature, top_k=args.top_k, device=args.device, max_gaussians=args.max_gaussians, )