| """
|
| infer_upsample.py
|
| =================
|
| ไฝฟ็จ่ฎญ็ปๅฅฝ็ Transformer๏ผไป็ฒๅฐบๅบฆ๏ผLn๏ผ่ชๅๅฝ็ๆ็ปๅฐบๅบฆ๏ผL(n-1)๏ผใ
|
|
|
| ๆต็จ๏ผ
|
| 1. ่ฏปๅ็ฒๅฐบๅบฆ้ๅๆฐๆฎ๏ผ.npz๏ผ
|
| 2. ไธบๆฏไธช็ฒ่็นๆ้ ๅ็ผๅบๅ๏ผparent + uncles๏ผ
|
| 3. ่ชๅๅฝ็ๆๅญ่็น๏ผ้ๅฐ role=EOS ๆ่ถ
่ฟ MAX_CHILDREN ๅๅๆญข๏ผ
|
| 4. ๅฐๅญ่็น้ๅ็ดขๅผ่งฃ็ ไธบ็ๅฎๅฑๆง๏ผๆฅ codebook๏ผ
|
| 5. ๅๅบๆฐ็ .ply ๆไปถ
|
|
|
| role ็ผ็ ๏ผไธ่ฎญ็ปไธ่ด๏ผ๏ผ
|
| 0 = parent 1 = uncle 2 = child 3 = EOS 4 = PAD
|
| """
|
|
|
| import os
|
| import argparse
|
| import pickle
|
| import numpy as np
|
|
|
| import torch
|
| import torch.nn.functional as F
|
| from plyfile import PlyData, PlyElement
|
|
|
|
|
|
|
|
|
|
|
| ROLE_PARENT = 0
|
| ROLE_UNCLE = 1
|
| ROLE_CHILD = 2
|
| ROLE_EOS = 3
|
| ROLE_PAD = 4
|
|
|
| MAX_CHILDREN = 32
|
| MAX_UNCLES = 4
|
| MAX_SEQ_LEN = 1 + MAX_UNCLES + MAX_CHILDREN + 1
|
|
|
| N_SCALE = 16384
|
| N_ROT = 16384
|
| N_DC = 4096
|
| N_SH = 4096
|
| N_ROLE = 4
|
|
|
| TOKEN_DTYPE = np.dtype([
|
| ('dx', np.float32),
|
| ('dy', np.float32),
|
| ('dz', np.float32),
|
| ('scale_idx', np.int32),
|
| ('rot_idx', np.int32),
|
| ('dc_idx', np.int32),
|
| ('sh_idx', np.int32),
|
| ('opacity', np.float32),
|
| ('role', np.uint8),
|
| ])
|
|
|
|
|
|
|
|
|
|
|
|
|
| def load_model(ckpt_path: str, device: str = 'cpu'):
|
| from train_transformer import SplitTransformer
|
|
|
| ckpt = torch.load(ckpt_path, map_location=device)
|
| config = ckpt.get('config', {})
|
| model = SplitTransformer(**config).to(device)
|
| state = ckpt.get('model_state', ckpt)
|
| model.load_state_dict(state)
|
| model.eval()
|
| print(f"[load] {os.path.basename(ckpt_path)} "
|
| f"d_model={config.get('d_model')}, "
|
| f"n_layers={config.get('n_layers')}")
|
| return model
|
|
|
|
|
|
|
|
|
|
|
|
|
| def load_codebooks(codebook_dir: str) -> dict:
|
| cbs = {}
|
| for name in ['scale', 'rotation', 'dc', 'sh']:
|
| path = os.path.join(codebook_dir, f"{name}_codebook.npz")
|
| cbs[name] = np.load(path)['codebook'].astype(np.float32)
|
| print(f"[load] {name}_codebook: {cbs[name].shape}")
|
| return cbs
|
|
|
|
|
|
|
|
|
|
|
|
|
| def load_quantized(npz_path: str) -> dict:
|
| npz = np.load(npz_path)
|
| return {
|
| 'scale_indices': npz['scale_indices'],
|
| 'rotation_indices': npz['rotation_indices'],
|
| 'dc_indices': npz['dc_indices'],
|
| 'sh_indices': npz['sh_indices'],
|
| 'positions': npz['positions'],
|
| 'opacities': npz['opacities'].squeeze(),
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
| def make_prefix_batch(
|
| p_idx: int,
|
| quant: dict,
|
| max_uncles: int = MAX_UNCLES,
|
| device: str = 'cpu',
|
| ) -> tuple:
|
| """
|
| ๆ้ ็ฒ่็น p_idx ็ๅ็ผ batch๏ผparent + uncles๏ผ๏ผ
|
| ่ฟๅ (batch_dict, parent_pos)ใ
|
| batch_dict ไธญๆฏไธชๅผ ้ shape (1, prefix_len)ใ
|
| """
|
| N = quant['positions'].shape[0]
|
| parent_pos = quant['positions'][p_idx]
|
|
|
| tokens = []
|
|
|
|
|
| t = _make_np_token(p_idx, quant, parent_pos, ROLE_PARENT)
|
| t['dx'] = t['dy'] = t['dz'] = 0.0
|
| tokens.append(t)
|
|
|
|
|
| half = max_uncles // 2
|
| added_uncles = 0
|
| for offset in list(range(-half, 0)) + list(range(1, half + 1)):
|
| u_idx = p_idx + offset
|
| if 0 <= u_idx < N and added_uncles < max_uncles:
|
| tokens.append(_make_np_token(u_idx, quant, parent_pos, ROLE_UNCLE))
|
| added_uncles += 1
|
|
|
| seq = np.array(tokens, dtype=TOKEN_DTYPE)
|
| return _seq_to_batch(seq, device), parent_pos
|
|
|
|
|
| def _make_np_token(gauss_idx: int, quant: dict,
|
| parent_pos: np.ndarray, role: int) -> np.ndarray:
|
| pos = quant['positions'][gauss_idx]
|
| delta = pos - parent_pos
|
| token = np.zeros(1, dtype=TOKEN_DTYPE)
|
| token['dx'] = delta[0]
|
| token['dy'] = delta[1]
|
| token['dz'] = delta[2]
|
| token['scale_idx'] = quant['scale_indices'][gauss_idx]
|
| token['rot_idx'] = quant['rotation_indices'][gauss_idx]
|
| token['dc_idx'] = quant['dc_indices'][gauss_idx]
|
| token['sh_idx'] = quant['sh_indices'][gauss_idx]
|
| token['opacity'] = quant['opacities'][gauss_idx]
|
| token['role'] = role
|
| return token[0]
|
|
|
|
|
| def _seq_to_batch(seq: np.ndarray, device: str) -> dict:
|
| """ๅฐ numpy ๅบๅ่ฝฌไธบๆจกๅ่พๅ
ฅ dict๏ผbatch_size=1ใ"""
|
| L = len(seq)
|
| xyz = np.stack([seq['dx'], seq['dy'], seq['dz']], axis=1)
|
| return {
|
| 'xyz': torch.tensor(xyz, device=device).float().unsqueeze(0),
|
| 'scale': torch.tensor(seq['scale_idx'].astype(np.int64), device=device).unsqueeze(0),
|
| 'rot': torch.tensor(seq['rot_idx'].astype(np.int64), device=device).unsqueeze(0),
|
| 'dc': torch.tensor(seq['dc_idx'].astype(np.int64), device=device).unsqueeze(0),
|
| 'sh': torch.tensor(seq['sh_idx'].astype(np.int64), device=device).unsqueeze(0),
|
| 'opacity': torch.tensor(seq['opacity'].astype(np.float32), device=device).unsqueeze(0),
|
| 'role': torch.tensor(seq['role'].astype(np.int64), device=device).unsqueeze(0),
|
| 'attn_mask': torch.ones(1, L, dtype=torch.bool, device=device),
|
|
|
| }
|
|
|
|
|
| def _append_token(batch: dict, token_np: np.ndarray, device: str) -> dict:
|
| """ๅฐๆฐ้ขๆต็ token ๆผๆฅๅฐ batch ๆซๅฐพ๏ผ็จไบไธไธๆญฅ่ชๅๅฝใ"""
|
| new_xyz = torch.tensor(
|
| [[[token_np['dx'], token_np['dy'], token_np['dz']]]],
|
| dtype=torch.float32, device=device
|
| )
|
| def cat(key, val, dtype):
|
| new = torch.tensor([[val]], dtype=dtype, device=device)
|
| return torch.cat([batch[key], new], dim=1)
|
|
|
| return {
|
| 'xyz': torch.cat([batch['xyz'], new_xyz], dim=1),
|
| 'scale': cat('scale', int(token_np['scale_idx']), torch.int64),
|
| 'rot': cat('rot', int(token_np['rot_idx']), torch.int64),
|
| 'dc': cat('dc', int(token_np['dc_idx']), torch.int64),
|
| 'sh': cat('sh', int(token_np['sh_idx']), torch.int64),
|
| 'opacity': cat('opacity', float(token_np['opacity']), torch.float32),
|
| 'role': cat('role', int(token_np['role']), torch.int64),
|
| 'attn_mask': torch.cat([
|
| batch['attn_mask'],
|
| torch.ones(1, 1, dtype=torch.bool, device=device)
|
| ], dim=1),
|
| }
|
|
|
|
|
|
|
|
|
|
|
|
|
| def generate_children(
|
| model: object,
|
| prefix_batch: dict,
|
| parent_pos: np.ndarray,
|
| max_children: int = MAX_CHILDREN,
|
| temperature: float = 0.8,
|
| top_k: int = 50,
|
| device: str = 'cpu',
|
| ) -> list:
|
| """
|
| ็ปๅฎๅ็ผ batch๏ผparent + uncles๏ผ๏ผ่ชๅๅฝ้ๆ ทๅญ่็นใ
|
|
|
| ๆฏๆญฅๅ
้ขๆต role๏ผ
|
| role=2(child) โ ็ปง็ปญ้ขๆต็นๅพ๏ผๅ ๅ
ฅๅบๅ
|
| role=3(EOS) โ ๆๅ็ปๆญข
|
| ๅ
ถไป โ ๅผๅธธ๏ผๅผบๅถ็ปๆญข
|
|
|
| ่ฟๅ list of dict๏ผๆฏไธช dict ๅ
ๅซๅญ่็นๆๆๅญๆฎต + world_posใ
|
| """
|
| current_batch = prefix_batch
|
| children = []
|
|
|
| def _sample_cls(logits: torch.Tensor, n_classes: int) -> int:
|
| logits = logits / temperature
|
| if top_k > 0:
|
| k = min(top_k, n_classes)
|
| topk_vals, _ = torch.topk(logits, k)
|
| threshold = topk_vals[-1]
|
| logits = logits.masked_fill(logits < threshold, float('-inf'))
|
| probs = F.softmax(logits, dim=-1)
|
| return int(torch.multinomial(probs, 1).item())
|
|
|
| for _ in range(max_children):
|
| with torch.no_grad():
|
| pred = model(current_batch)
|
|
|
|
|
| role_logits = pred['role'][0, -1, :]
|
| pred_role = _sample_cls(role_logits, N_ROLE)
|
|
|
| if pred_role == ROLE_EOS:
|
| break
|
|
|
| if pred_role != ROLE_CHILD:
|
|
|
| break
|
|
|
|
|
| pred_scale = _sample_cls(pred['scale'][0, -1, :], N_SCALE)
|
| pred_rot = _sample_cls(pred['rot'][0, -1, :], N_ROT)
|
| pred_dc = _sample_cls(pred['dc'][0, -1, :], N_DC)
|
| pred_sh = _sample_cls(pred['sh'][0, -1, :], N_SH)
|
|
|
| pred_xyz = pred['xyz'][0, -1, :].cpu().numpy()
|
| pred_opa = float(pred['opacity'][0, -1, 0].cpu())
|
|
|
|
|
| child = {
|
| 'dx': float(pred_xyz[0]),
|
| 'dy': float(pred_xyz[1]),
|
| 'dz': float(pred_xyz[2]),
|
| 'scale_idx': pred_scale,
|
| 'rot_idx': pred_rot,
|
| 'dc_idx': pred_dc,
|
| 'sh_idx': pred_sh,
|
| 'opacity': float(np.clip(pred_opa, -10, 10)),
|
| 'role': ROLE_CHILD,
|
| 'world_pos': parent_pos + pred_xyz,
|
| }
|
| children.append(child)
|
|
|
|
|
| np_token = np.zeros(1, dtype=TOKEN_DTYPE)
|
| np_token['dx'] = child['dx']
|
| np_token['dy'] = child['dy']
|
| np_token['dz'] = child['dz']
|
| np_token['scale_idx'] = pred_scale
|
| np_token['rot_idx'] = pred_rot
|
| np_token['dc_idx'] = pred_dc
|
| np_token['sh_idx'] = pred_sh
|
| np_token['opacity'] = child['opacity']
|
| np_token['role'] = ROLE_CHILD
|
| current_batch = _append_token(current_batch, np_token[0], device)
|
|
|
| return children
|
|
|
|
|
|
|
|
|
|
|
|
|
| def children_to_ply(
|
| all_children: list,
|
| codebooks: dict,
|
| save_path: str,
|
| n_sh_rest: int = 45,
|
| ) -> None:
|
| N = len(all_children)
|
| if N == 0:
|
| print("[write_ply] ่ญฆๅ๏ผๆฒกๆไปปไฝๅญ่็น๏ผ่ทณ่ฟๅๅบ")
|
| return
|
|
|
| print(f"[write_ply] ๅ
ฑ {N} ไธชๅญ่็น๏ผ่งฃ็ ๅนถๅๅบ {save_path} ...")
|
|
|
| positions = np.array([c['world_pos'] for c in all_children], dtype=np.float32)
|
| opacities = np.array([c['opacity'] for c in all_children], dtype=np.float32)
|
| scale_idx = np.array([c['scale_idx'] for c in all_children], dtype=np.int32)
|
| rot_idx = np.array([c['rot_idx'] for c in all_children], dtype=np.int32)
|
| dc_idx = np.array([c['dc_idx'] for c in all_children], dtype=np.int32)
|
| sh_idx = np.array([c['sh_idx'] for c in all_children], dtype=np.int32)
|
|
|
|
|
| scales = codebooks['scale'][scale_idx]
|
| rotations = codebooks['rotation'][rot_idx]
|
| dc = codebooks['dc'][dc_idx]
|
| sh_rest = codebooks['sh'][sh_idx]
|
|
|
|
|
| fields = (
|
| [('x','f4'), ('y','f4'), ('z','f4'),
|
| ('opacity','f4'),
|
| ('scale_0','f4'), ('scale_1','f4'), ('scale_2','f4'),
|
| ('rot_0','f4'), ('rot_1','f4'), ('rot_2','f4'), ('rot_3','f4'),
|
| ('f_dc_0','f4'), ('f_dc_1','f4'), ('f_dc_2','f4')] +
|
| [(f'f_rest_{i}', 'f4') for i in range(n_sh_rest)]
|
| )
|
| vd = np.zeros(N, dtype=np.dtype(fields))
|
|
|
| vd['x'] = positions[:, 0]
|
| vd['y'] = positions[:, 1]
|
| vd['z'] = positions[:, 2]
|
| vd['opacity'] = opacities
|
| vd['scale_0'] = scales[:, 0]
|
| vd['scale_1'] = scales[:, 1]
|
| vd['scale_2'] = scales[:, 2]
|
| vd['rot_0'] = rotations[:, 0]
|
| vd['rot_1'] = rotations[:, 1]
|
| vd['rot_2'] = rotations[:, 2]
|
| vd['rot_3'] = rotations[:, 3]
|
| vd['f_dc_0'] = dc[:, 0]
|
| vd['f_dc_1'] = dc[:, 1]
|
| vd['f_dc_2'] = dc[:, 2]
|
| for i in range(n_sh_rest):
|
| vd[f'f_rest_{i}'] = sh_rest[:, i]
|
|
|
| os.makedirs(os.path.dirname(os.path.abspath(save_path)), exist_ok=True)
|
| PlyData([PlyElement.describe(vd, 'vertex')]).write(save_path)
|
| size_mb = os.path.getsize(save_path) / 1024 / 1024
|
| print(f"[write_ply] ๅฎๆ {size_mb:.2f} MB")
|
|
|
|
|
|
|
|
|
|
|
|
|
| def infer_upsample(
|
| ckpt_path: str,
|
| quant_npz: str,
|
| codebook_dir: str,
|
| save_path: str,
|
| max_uncles: int = MAX_UNCLES,
|
| max_children: int = MAX_CHILDREN,
|
| temperature: float = 0.8,
|
| top_k: int = 50,
|
| device: str = 'auto',
|
| max_gaussians: int = -1,
|
| ) -> None:
|
| if device == 'auto':
|
| device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| print(f"[infer] device={device}")
|
|
|
| model = load_model(ckpt_path, device)
|
| codebooks = load_codebooks(codebook_dir)
|
| quant = load_quantized(quant_npz)
|
|
|
| N = quant['positions'].shape[0]
|
| if max_gaussians > 0:
|
| N = min(N, max_gaussians)
|
| print(f"[infer] ๅค็ {N} ไธช็ฒ่็น๏ผๆๅค็ๆ {N * max_children} ไธชๅญ่็น")
|
|
|
| all_children = []
|
| total_generated = 0
|
| early_stop_count = 0
|
|
|
| for p_idx in range(N):
|
| if p_idx % 5000 == 0:
|
| print(f" ่ฟๅบฆ๏ผ{p_idx}/{N} ๅทฒ็ๆๅญ่็น๏ผ{total_generated}")
|
|
|
| prefix_batch, parent_pos = make_prefix_batch(
|
| p_idx, quant, max_uncles=max_uncles, device=device
|
| )
|
| children = generate_children(
|
| model, prefix_batch, parent_pos,
|
| max_children=max_children,
|
| temperature=temperature,
|
| top_k=top_k,
|
| device=device,
|
| )
|
|
|
| if len(children) < max_children:
|
| early_stop_count += 1
|
|
|
| all_children.extend(children)
|
| total_generated += len(children)
|
|
|
| print(f"\n[infer] ็ๆๅฎๆ")
|
| print(f" ๆปๅญ่็นๆฐ๏ผ{total_generated}")
|
| print(f" ๅนณๅๆฏ็ฒ่็นๅญ่็นๆฐ๏ผ{total_generated / max(N, 1):.2f}")
|
| print(f" EOS ๆๅ็ปๆญขๆฌกๆฐ๏ผ{early_stop_count} / {N} "
|
| f"({100 * early_stop_count / max(N, 1):.1f}%)")
|
|
|
| children_to_ply(all_children, codebooks, save_path)
|
| print(f"\n[infer] ๅฎๆ๏ผ่พๅบ โ {save_path}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| def parse_args():
|
| p = argparse.ArgumentParser(description="็จ Transformer ไป็ฒๅฐบๅบฆ็ๆ็ปๅฐบๅบฆ 3DGS")
|
| p.add_argument('--ckpt', required=True, help='ๆจกๅ checkpoint ่ทฏๅพ')
|
| p.add_argument('--quant_npz', required=True, help='็ฒๅฐบๅบฆ้ๅๆฐๆฎ .npz')
|
| p.add_argument('--codebook_dir', required=True, help='codebook ็ฎๅฝ')
|
| p.add_argument('--save_path', required=True, help='่พๅบ .ply ่ทฏๅพ')
|
| p.add_argument('--max_uncles', type=int, default=MAX_UNCLES)
|
| p.add_argument('--max_children', type=int, default=MAX_CHILDREN)
|
| p.add_argument('--temperature', type=float, default=0.8)
|
| p.add_argument('--top_k', type=int, default=50)
|
| p.add_argument('--device', default='auto')
|
| p.add_argument('--max_gaussians', type=int, default=-1,
|
| help='่ฐ่ฏ็จ๏ผๅชๅค็ๅ N ไธช็ฒ่็น')
|
| return p.parse_args()
|
|
|
|
|
| if __name__ == '__main__':
|
| args = parse_args()
|
| infer_upsample(
|
| ckpt_path=args.ckpt,
|
| quant_npz=args.quant_npz,
|
| codebook_dir=args.codebook_dir,
|
| save_path=args.save_path,
|
| max_uncles=args.max_uncles,
|
| max_children=args.max_children,
|
| temperature=args.temperature,
|
| top_k=args.top_k,
|
| device=args.device,
|
| max_gaussians=args.max_gaussians,
|
| ) |