backup / POMA_BENCH /eval_scene_retrieval.py
MatchLab's picture
Upload folder using huggingface_hub
c94c8c9 verified
import os
import glob
import json
import argparse
from typing import Dict, List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from safetensors.torch import load_file
from transformers import AutoImageProcessor, AutoModelForCausalLM, AutoTokenizer
from huggingface_hub import hf_hub_download
from peft import LoraConfig, get_peft_model
# -----------------------------
# Utils
# -----------------------------
def load_json(path: str) -> dict:
with open(path, "r") as f:
return json.load(f)
def find_scan_safetensor(scan_root: str, scan_id: str) -> str:
direct = os.path.join(scan_root, f"{scan_id}.safetensors")
if os.path.exists(direct):
return direct
pattern = os.path.join(scan_root, "**", f"{scan_id}.safetensors")
matches = glob.glob(pattern, recursive=True)
if not matches:
raise FileNotFoundError(f"Cannot find safetensor for scan_id={scan_id} under {scan_root}")
matches = sorted(matches, key=len)
return matches[0]
def to_vchw(point_map: torch.Tensor) -> torch.Tensor:
"""
Convert point_map to (V, 3, H, W) float tensor.
Accepts:
(V, 3, H, W)
(V, H, W, 3)
"""
if point_map.dim() != 4:
raise ValueError(f"Expected 4D point_map, got shape={tuple(point_map.shape)}")
V, a, b, c = point_map.shape
if a == 3:
out = point_map
elif c == 3:
out = point_map.permute(0, 3, 1, 2).contiguous()
else:
raise ValueError(f"Unrecognized point_map layout: shape={tuple(point_map.shape)}")
return out.float()
def load_safetensor_from_hf(repo_id, filename, repo_type="dataset"):
cached_path = hf_hub_download(
repo_id=repo_id,
filename=filename,
repo_type=repo_type,
local_files_only=False
)
return load_file(cached_path)
def load_pretrain(model, pretrain_ckpt_path: str):
print(f"📂 Loading pretrained weights from: {str(pretrain_ckpt_path)}")
model_weight_path_pattern = os.path.join(pretrain_ckpt_path, "model*.safetensors")
model_weight_paths = glob.glob(model_weight_path_pattern)
if len(model_weight_paths) == 0:
raise FileNotFoundError(f"❌ Cannot find any model*.safetensors in {str(pretrain_ckpt_path)}")
weights = {}
for model_weight_path in model_weight_paths:
print(f"📥 Loading weights from: {model_weight_path}")
weights.update(load_file(model_weight_path, device="cpu"))
result = model.load_state_dict(weights, strict=False)
model_keys = set(model.state_dict().keys())
loaded_keys = model_keys.intersection(weights.keys())
print(f"✅ Loaded keys: {len(loaded_keys)} / {len(model_keys)}")
print(f"❌ Missing keys: {len(result.missing_keys)}")
print(f"⚠️ Unexpected keys: {len(result.unexpected_keys)}")
# -----------------------------
# Model wrapper
# -----------------------------
class RepModel(nn.Module):
def __init__(self, model_root: str = "fg-clip-base"):
super().__init__()
self.pm_encoder = AutoModelForCausalLM.from_pretrained(f'../{model_root}', trust_remote_code=True)
self.tokenizer = AutoTokenizer.from_pretrained(f'../{model_root}', trust_remote_code=True, use_fast=True)
self.image_processor = AutoImageProcessor.from_pretrained(f'../{model_root}')
# Optional: print trainable params
try:
self.pm_encoder.print_trainable_parameters()
except Exception:
pass
def encode_views_batched(self, pm_vchw: torch.Tensor, batch_views: int = 32) -> torch.Tensor:
"""
pm_vchw: (V,3,H,W) on device
returns: (V,D) normalized
"""
feats_all = []
V = pm_vchw.shape[0]
for s in range(0, V, batch_views):
chunk = pm_vchw[s : s + batch_views] # (b,3,H,W)
_, feats = self.pm_encoder.get_image_features(chunk)
feats = F.normalize(feats.float(), dim=-1)
feats_all.append(feats)
return torch.cat(feats_all, dim=0)
@torch.no_grad()
def encode_text(self, texts: List[str]) -> torch.Tensor:
"""
texts: list[str]
returns: (B,D) normalized
"""
tok = self.tokenizer(
texts,
padding="max_length",
truncation=True,
max_length=248,
return_tensors="pt",
).to(next(self.parameters()).device)
feats = self.pm_encoder.get_text_features(tok["input_ids"], walk_short_pos=False)
feats = F.normalize(feats.float(), dim=-1)
return feats
# -----------------------------
# Scene retrieval
# -----------------------------
def build_queries_from_caption_json(caption_json: dict) -> List[dict]:
"""
Convert:
{ scene_id: { "captions": [c1,c2,...] }, ... }
into:
[ { "scene_id": scene_id, "caption": c }, ... ]
"""
queries = []
for scene_id, payload in caption_json.items():
caps = payload.get("captions", [])
for c in caps:
c = (c or "").strip()
if c:
queries.append({"scene_id": scene_id, "caption": c})
return queries
@torch.no_grad()
def eval_scene_retrieval(
model: RepModel,
caption_json: dict,
scan_root: str,
device: str = "cuda",
batch_views: int = 32,
recall_ks: Tuple[int, ...] = (1, 5, 10),
) -> Dict[str, float]:
"""
For each caption, retrieve the correct scene among all scenes in caption_json.
Scene embedding = mean pooling over view embeddings.
"""
model.eval().to(device)
scene_ids = sorted(list(caption_json.keys()))
if len(scene_ids) == 0:
return {"n": 0}
# Cache: scene_id -> pooled scene feature (D,) on CPU
scene_feat_cache: Dict[str, torch.Tensor] = {}
# Precompute all scene pooled features once (so retrieval is fast)
for sid in scene_ids:
filename = f'light_scannet/{sid}.safetensors'
data = load_safetensor_from_hf('MatchLab/ScenePoint', filename, repo_type="dataset")
pm = to_vchw(data["point_map"]) # (V,3,H,W) on CPU
pm = pm.to(device, non_blocking=True)
view_feats = model.encode_views_batched(pm, batch_views=batch_views) # (V,D) on GPU
scene_feat = view_feats.mean(dim=0) # (D,)
scene_feat = F.normalize(scene_feat, dim=-1)
scene_feat_cache[sid] = scene_feat.detach().cpu()
# Stack gallery: (N,D)
gallery = torch.stack([scene_feat_cache[sid] for sid in scene_ids], dim=0) # CPU
gallery = gallery.to(device)
# Build queries
queries = build_queries_from_caption_json(caption_json)
total = 0
top1_correct = 0
recall_correct = {k: 0 for k in recall_ks}
for q in queries:
gt_sid = q["scene_id"]
caption = q["caption"]
if gt_sid not in scene_feat_cache:
continue
text_feat = model.encode_text([caption])[0] # (D,) on GPU
# similarity over all scenes: (N,)
sims = gallery @ text_feat.unsqueeze(-1) # (N,1)
sims = sims.squeeze(-1)
ranked = torch.argsort(sims, descending=True) # indices into scene_ids
pred_sid = scene_ids[int(ranked[0].item())]
total += 1
if pred_sid == gt_sid:
top1_correct += 1
for k in recall_ks:
k_eff = min(k, len(scene_ids))
topk_idx = ranked[:k_eff].tolist()
topk_sids = [scene_ids[i] for i in topk_idx]
if gt_sid in topk_sids:
recall_correct[k] += 1
# optional debug print
print(f"[Q] GT={gt_sid} | Pred={pred_sid} | caption={caption[:80]}...")
if total == 0:
return {"n": 0}
out = {"n": total, "top1_acc": top1_correct / total}
for k in recall_ks:
out[f"recall@{k}"] = recall_correct[k] / total
return out
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--caption_json", type=str, required=True, help="JSON mapping scene_id -> {captions:[...]}")
ap.add_argument("--scan_root", type=str, required=True, help="Root dir containing scene safetensors")
ap.add_argument("--ckpt", type=str, default="", help="Optional: dir with model*.safetensors")
ap.add_argument("--model_root", type=str, default="fg-clip-base")
ap.add_argument("--device", type=str, default="cuda")
ap.add_argument("--batch_views", type=int, default=32)
args = ap.parse_args()
caption_json = load_json(args.caption_json)
model = RepModel(model_root=args.model_root)
if args.ckpt:
load_pretrain(model, args.ckpt)
metrics = eval_scene_retrieval(
model=model,
caption_json=caption_json,
scan_root=args.scan_root,
device=args.device,
batch_views=args.batch_views,
recall_ks=(1, 5, 10),
)
print("\n=== Scene Retrieval Results ===")
for k, v in metrics.items():
if isinstance(v, float):
print(f"{k:>10}: {v:.4f}")
else:
print(f"{k:>10}: {v}")
if __name__ == "__main__":
main()