Delete metrics.py
Browse files- metrics.py +0 -139
metrics.py
DELETED
|
@@ -1,139 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import json
|
| 3 |
-
import torch
|
| 4 |
-
import numpy as np
|
| 5 |
-
import torchvision
|
| 6 |
-
from tqdm import tqdm
|
| 7 |
-
|
| 8 |
-
from gaussian_renderer import render, GaussianModel
|
| 9 |
-
from scene.cameras import Camera
|
| 10 |
-
|
| 11 |
-
import lpips
|
| 12 |
-
import piq
|
| 13 |
-
from pytorch_fid import fid_score
|
| 14 |
-
from skimage.metrics import structural_similarity as ssim_fn
|
| 15 |
-
|
| 16 |
-
# --------------------------------------------------
|
| 17 |
-
# Camera loading (same cameras for both ply)
|
| 18 |
-
# --------------------------------------------------
|
| 19 |
-
def load_cameras(camera_json):
|
| 20 |
-
with open(camera_json, 'r') as f:
|
| 21 |
-
cam_data = json.load(f)
|
| 22 |
-
|
| 23 |
-
cameras = []
|
| 24 |
-
for cam in cam_data:
|
| 25 |
-
camera = Camera(
|
| 26 |
-
colmap_id=cam["id"],
|
| 27 |
-
R=np.array(cam["R"]),
|
| 28 |
-
T=np.array(cam["T"]),
|
| 29 |
-
FoVx=cam["FoVx"],
|
| 30 |
-
FoVy=cam["FoVy"],
|
| 31 |
-
image=torch.zeros(3, cam["height"], cam["width"]),
|
| 32 |
-
image_name=cam["image_name"],
|
| 33 |
-
uid=cam["id"]
|
| 34 |
-
)
|
| 35 |
-
cameras.append(camera.cuda())
|
| 36 |
-
|
| 37 |
-
return cameras
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
# --------------------------------------------------
|
| 41 |
-
# Render a ply under fixed cameras
|
| 42 |
-
# --------------------------------------------------
|
| 43 |
-
@torch.no_grad()
|
| 44 |
-
def render_ply(ply_path, cameras, out_dir, sh_degree=3):
|
| 45 |
-
os.makedirs(out_dir, exist_ok=True)
|
| 46 |
-
|
| 47 |
-
gaussians = GaussianModel(sh_degree)
|
| 48 |
-
gaussians.load_ply(ply_path)
|
| 49 |
-
gaussians = gaussians.cuda()
|
| 50 |
-
|
| 51 |
-
bg = torch.zeros(3, device="cuda")
|
| 52 |
-
|
| 53 |
-
rendered = {}
|
| 54 |
-
|
| 55 |
-
for cam in tqdm(cameras, desc=f"Rendering {os.path.basename(ply_path)}"):
|
| 56 |
-
img = render(
|
| 57 |
-
cam,
|
| 58 |
-
gaussians,
|
| 59 |
-
pipeline=None,
|
| 60 |
-
background=bg
|
| 61 |
-
)["render"].clamp(0, 1)
|
| 62 |
-
|
| 63 |
-
torchvision.utils.save_image(
|
| 64 |
-
img, os.path.join(out_dir, cam.image_name + ".png")
|
| 65 |
-
)
|
| 66 |
-
rendered[cam.image_name] = img
|
| 67 |
-
|
| 68 |
-
return rendered
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
# --------------------------------------------------
|
| 72 |
-
# Metrics: A = GT, B = Pred
|
| 73 |
-
# --------------------------------------------------
|
| 74 |
-
def compute_metrics(gt_imgs, pred_imgs):
|
| 75 |
-
psnr, ssim, lpips_v, niqe = [], [], [], []
|
| 76 |
-
|
| 77 |
-
lpips_fn = lpips.LPIPS(net='alex').cuda()
|
| 78 |
-
|
| 79 |
-
for name in gt_imgs.keys():
|
| 80 |
-
gt = gt_imgs[name]
|
| 81 |
-
pred = pred_imgs[name]
|
| 82 |
-
|
| 83 |
-
psnr.append(piq.psnr(pred, gt).item())
|
| 84 |
-
|
| 85 |
-
ssim.append(
|
| 86 |
-
ssim_fn(
|
| 87 |
-
gt.permute(1,2,0).cpu().numpy(),
|
| 88 |
-
pred.permute(1,2,0).cpu().numpy(),
|
| 89 |
-
channel_axis=2,
|
| 90 |
-
data_range=1.0
|
| 91 |
-
)
|
| 92 |
-
)
|
| 93 |
-
|
| 94 |
-
lpips_v.append(
|
| 95 |
-
lpips_fn(pred.unsqueeze(0), gt.unsqueeze(0)).item()
|
| 96 |
-
)
|
| 97 |
-
|
| 98 |
-
niqe.append(
|
| 99 |
-
piq.niqe(pred.unsqueeze(0)).item()
|
| 100 |
-
)
|
| 101 |
-
|
| 102 |
-
return {
|
| 103 |
-
"PSNR": np.mean(psnr),
|
| 104 |
-
"SSIM": np.mean(ssim),
|
| 105 |
-
"LPIPS": np.mean(lpips_v),
|
| 106 |
-
"NIQE": np.mean(niqe)
|
| 107 |
-
}
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
def compute_fid(dir_a, dir_b):
|
| 111 |
-
return fid_score.calculate_fid_given_paths(
|
| 112 |
-
[dir_a, dir_b],
|
| 113 |
-
batch_size=8,
|
| 114 |
-
device="cuda",
|
| 115 |
-
dims=2048
|
| 116 |
-
)
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
# --------------------------------------------------
|
| 120 |
-
# Main
|
| 121 |
-
# --------------------------------------------------
|
| 122 |
-
if __name__ == "__main__":
|
| 123 |
-
|
| 124 |
-
ply_gt = "A.ply" # reference
|
| 125 |
-
ply_pred = "B.ply" # compared model
|
| 126 |
-
camera_json = "cameras.json"
|
| 127 |
-
|
| 128 |
-
cameras = load_cameras(camera_json)
|
| 129 |
-
|
| 130 |
-
gt_imgs = render_ply(ply_gt, cameras, "render_A")
|
| 131 |
-
pred_imgs = render_ply(ply_pred, cameras, "render_B")
|
| 132 |
-
|
| 133 |
-
metrics = compute_metrics(gt_imgs, pred_imgs)
|
| 134 |
-
fid = compute_fid("render_A", "render_B")
|
| 135 |
-
|
| 136 |
-
print("\n===== A (GT) vs B (Pred) =====")
|
| 137 |
-
for k, v in metrics.items():
|
| 138 |
-
print(f"{k}: {v:.4f}")
|
| 139 |
-
print(f"FID: {fid:.4f}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|