import os import torch import numpy as np from pathlib import Path from PIL import Image import json from tqdm import tqdm import sys # 导入你的渲染相关模块 from gaussian_renderer import render, GaussianModel from utils.graphics_utils import getWorld2View2, getProjectionMatrix, focal2fov from scene.cameras import Camera import torchvision # 评估指标 from skimage.metrics import peak_signal_noise_ratio as psnr from skimage.metrics import structural_similarity as ssim import lpips from scipy import linalg class MetricsCalculator: """评估指标计算器""" def __init__(self, device='cuda'): self.device = device # LPIPS模型 self.lpips_fn = lpips.LPIPS(net='alex').to(device) def calculate_psnr(self, img1, img2): """计算PSNR""" return psnr(img1, img2, data_range=1.0) def calculate_ssim(self, img1, img2): """计算SSIM""" return ssim(img1, img2, data_range=1.0, channel_axis=2, multichannel=True) def calculate_lpips(self, img1, img2): """计算LPIPS""" # 转换为torch tensor img1_tensor = torch.from_numpy(img1).permute(2, 0, 1).unsqueeze(0).float().to(self.device) img2_tensor = torch.from_numpy(img2).permute(2, 0, 1).unsqueeze(0).float().to(self.device) # 归一化到[-1, 1] img1_tensor = img1_tensor * 2 - 1 img2_tensor = img2_tensor * 2 - 1 with torch.no_grad(): lpips_value = self.lpips_fn(img1_tensor, img2_tensor) return lpips_value.item() def calculate_niqe(self, img): """计算NIQE (无参考图像质量评估)""" try: import pyiqa if not hasattr(self, 'niqe_metric'): self.niqe_metric = pyiqa.create_metric('niqe', device=self.device) img_tensor = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0).float().to(self.device) score = self.niqe_metric(img_tensor).item() return score except ImportError: print("警告: pyiqa未安装,无法计算NIQE。请运行: pip install pyiqa") return None def calculate_fid_features(self, img): """提取FID特征""" from torchvision.models import inception_v3 if not hasattr(self, 'inception_model'): self.inception_model = inception_v3(pretrained=True, transform_input=False).to(self.device) self.inception_model.eval() self.inception_model.fc = torch.nn.Identity() # 调整大小到299x299 img_pil = Image.fromarray((img * 255).astype(np.uint8)) img_pil = img_pil.resize((299, 299), Image.BILINEAR) img_array = np.array(img_pil) / 255.0 # 转换为tensor并归一化 img_tensor = torch.from_numpy(img_array).permute(2, 0, 1).unsqueeze(0).float().to(self.device) img_tensor = (img_tensor - 0.5) / 0.5 with torch.no_grad(): features = self.inception_model(img_tensor) return features.cpu().numpy().flatten() @staticmethod def calculate_fid(features1, features2): """计算FID分数""" mu1, sigma1 = features1.mean(axis=0), np.cov(features1, rowvar=False) mu2, sigma2 = features2.mean(axis=0), np.cov(features2, rowvar=False) diff = mu1 - mu2 covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) if np.iscomplexobj(covmean): covmean = covmean.real fid = diff.dot(diff) + np.trace(sigma1 + sigma2 - 2 * covmean) return fid def load_cameras_from_json(camera_json_path, device='cuda'): """ 从cameras.json加载相机参数,创建Camera对象 Args: camera_json_path: cameras.json文件路径 device: 计算设备 Returns: cameras: Camera对象列表 """ with open(camera_json_path, 'r') as f: camera_data = json.load(f) cameras = [] for cam_info in camera_data: uid = cam_info['id'] img_name = cam_info['img_name'] width = cam_info['width'] height = cam_info['height'] # 焦距 fx = cam_info['fx'] fy = cam_info['fy'] # 相机位置和旋转(相机到世界) position = np.array(cam_info['position']) rotation = np.array(cam_info['rotation']) # 转换为世界到相机 R_w2c = rotation.T T_w2c = -R_w2c @ position # 构建变换矩阵 trans = np.array([0.0, 0.0, 0.0]) scale = 1.0 world_view_transform = torch.tensor( getWorld2View2(R_w2c, T_w2c, trans, scale) ).transpose(0, 1).to(device) # 计算投影矩阵 znear = 0.01 zfar = 100.0 FovX = focal2fov(fx, width) FovY = focal2fov(fy, height) projection_matrix = getProjectionMatrix( znear=znear, zfar=zfar, fovX=FovX, fovY=FovY ).transpose(0, 1).to(device) full_proj_transform = ( world_view_transform.unsqueeze(0).bmm(projection_matrix.unsqueeze(0)) ).squeeze(0) camera_center = world_view_transform.inverse()[3, :3] # 创建Camera对象 camera = Camera( colmap_id=uid, R=R_w2c, T=T_w2c, FoVx=FovX, FoVy=FovY, image=torch.zeros((3, height, width)), gt_alpha_mask=None, image_name=img_name, uid=uid ) # 手动设置必要的属性 camera.world_view_transform = world_view_transform camera.projection_matrix = projection_matrix camera.full_proj_transform = full_proj_transform camera.camera_center = camera_center camera.image_width = width camera.image_height = height cameras.append(camera) return cameras def render_and_evaluate(original_ply, compressed_ply, cameras_json, output_dir, sh_degree=3, kernel_size=0.1, ground_truth_dir=None): """ 渲染并评估压缩前后的3DGS Args: original_ply: 原始.ply文件路径 compressed_ply: 压缩后.ply文件路径 cameras_json: cameras.json文件路径 output_dir: 输出目录 sh_degree: 球谐函数阶数 kernel_size: 渲染kernel大小 ground_truth_dir: 真实图像目录(可选) """ device = 'cuda' output_dir = Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) # 创建子目录 original_render_dir = output_dir / "original" compressed_render_dir = output_dir / "compressed" original_render_dir.mkdir(exist_ok=True) compressed_render_dir.mkdir(exist_ok=True) # 背景颜色 bg_color = torch.tensor([1, 1, 1], dtype=torch.float32, device=device) # Pipeline参数(根据你的代码设置) class PipelineParams: def __init__(self): self.convert_SHs_python = False self.compute_cov3D_python = False self.debug = False pipeline = PipelineParams() # 加载原始模型 print("加载原始模型...") gaussians_original = GaussianModel(sh_degree) gaussians_original.load_ply(original_ply) print(f" - 原始高斯点数: {len(gaussians_original.get_xyz)}") # 加载压缩模型 print("加载压缩模型...") gaussians_compressed = GaussianModel(sh_degree) gaussians_compressed.load_ply(compressed_ply) print(f" - 压缩后高斯点数: {len(gaussians_compressed.get_xyz)}") print(f" - 压缩率: {len(gaussians_compressed.get_xyz)/len(gaussians_original.get_xyz)*100:.2f}%") # 加载相机 print("加载相机参数...") cameras = load_cameras_from_json(cameras_json, device=device) print(f"加载了 {len(cameras)} 个相机视角") # 初始化评估器 metrics_calc = MetricsCalculator(device=device) # 存储指标 results = { 'psnr': [], 'ssim': [], 'lpips': [], 'niqe_original': [], 'niqe_compressed': [] } if ground_truth_dir: results['psnr_vs_gt_original'] = [] results['psnr_vs_gt_compressed'] = [] results['ssim_vs_gt_original'] = [] results['ssim_vs_gt_compressed'] = [] results['lpips_vs_gt_original'] = [] results['lpips_vs_gt_compressed'] = [] # FID特征收集 original_features = [] compressed_features = [] print("\n开始渲染和评估...") with torch.no_grad(): for i, camera in enumerate(tqdm(cameras, desc="渲染进度")): # 渲染原始模型 rendering_original = render(camera, gaussians_original, pipeline, bg_color, kernel_size=kernel_size) img_original = rendering_original["render"] # 渲染压缩模型 rendering_compressed = render(camera, gaussians_compressed, pipeline, bg_color, kernel_size=kernel_size) img_compressed = rendering_compressed["render"] # 保存渲染图像 torchvision.utils.save_image( img_original, original_render_dir / f"{camera.image_name}.png" ) torchvision.utils.save_image( img_compressed, compressed_render_dir / f"{camera.image_name}.png" ) # 转换为numpy数组用于评估 (CHW -> HWC) img_original_np = img_original.permute(1, 2, 0).cpu().numpy() img_compressed_np = img_compressed.permute(1, 2, 0).cpu().numpy() # 确保值域在[0, 1] img_original_np = np.clip(img_original_np, 0, 1) img_compressed_np = np.clip(img_compressed_np, 0, 1) # 计算压缩前后的对比指标 results['psnr'].append(metrics_calc.calculate_psnr(img_original_np, img_compressed_np)) results['ssim'].append(metrics_calc.calculate_ssim(img_original_np, img_compressed_np)) results['lpips'].append(metrics_calc.calculate_lpips(img_original_np, img_compressed_np)) # NIQE(无参考) niqe_orig = metrics_calc.calculate_niqe(img_original_np) niqe_comp = metrics_calc.calculate_niqe(img_compressed_np) if niqe_orig is not None: results['niqe_original'].append(niqe_orig) results['niqe_compressed'].append(niqe_comp) # 提取FID特征 original_features.append(metrics_calc.calculate_fid_features(img_original_np)) compressed_features.append(metrics_calc.calculate_fid_features(img_compressed_np)) # 如果有ground truth图像 if ground_truth_dir: possible_names = [ f"{camera.image_name}.png", f"{camera.image_name}.jpg", f"{camera.image_name}.PNG", f"{camera.image_name}.JPG" ] gt_img = None for name in possible_names: gt_path = Path(ground_truth_dir) / name if gt_path.exists(): gt_img = np.array(Image.open(gt_path).convert('RGB')) / 255.0 break if gt_img is not None: results['psnr_vs_gt_original'].append( metrics_calc.calculate_psnr(gt_img, img_original_np) ) results['psnr_vs_gt_compressed'].append( metrics_calc.calculate_psnr(gt_img, img_compressed_np) ) results['ssim_vs_gt_original'].append( metrics_calc.calculate_ssim(gt_img, img_original_np) ) results['ssim_vs_gt_compressed'].append( metrics_calc.calculate_ssim(gt_img, img_compressed_np) ) results['lpips_vs_gt_original'].append( metrics_calc.calculate_lpips(gt_img, img_original_np) ) results['lpips_vs_gt_compressed'].append( metrics_calc.calculate_lpips(gt_img, img_compressed_np) ) # 计算FID print("\n计算FID...") original_features = np.array(original_features) compressed_features = np.array(compressed_features) fid_score = MetricsCalculator.calculate_fid(original_features, compressed_features) # 打印结果 print("\n" + "="*60) print("评估结果 (压缩后 vs 原始)") print("="*60) print(f"PSNR: {np.mean(results['psnr']):.2f} ± {np.std(results['psnr']):.2f} dB") print(f"SSIM: {np.mean(results['ssim']):.4f} ± {np.std(results['ssim']):.4f}") print(f"LPIPS: {np.mean(results['lpips']):.4f} ± {np.std(results['lpips']):.4f}") if results['niqe_original']: print(f"NIQE (原始): {np.mean(results['niqe_original']):.4f} ± {np.std(results['niqe_original']):.4f}") print(f"NIQE (压缩): {np.mean(results['niqe_compressed']):.4f} ± {np.std(results['niqe_compressed']):.4f}") print(f"FID: {fid_score:.4f}") if ground_truth_dir and results['psnr_vs_gt_original']: print("\n" + "="*60) print("与Ground Truth对比") print("="*60) print("原始模型 vs GT:") print(f" PSNR: {np.mean(results['psnr_vs_gt_original']):.2f} ± {np.std(results['psnr_vs_gt_original']):.2f} dB") print(f" SSIM: {np.mean(results['ssim_vs_gt_original']):.4f} ± {np.std(results['ssim_vs_gt_original']):.4f}") print(f" LPIPS: {np.mean(results['lpips_vs_gt_original']):.4f} ± {np.std(results['lpips_vs_gt_original']):.4f}") print("\n压缩模型 vs GT:") print(f" PSNR: {np.mean(results['psnr_vs_gt_compressed']):.2f} ± {np.std(results['psnr_vs_gt_compressed']):.2f} dB") print(f" SSIM: {np.mean(results['ssim_vs_gt_compressed']):.4f} ± {np.std(results['ssim_vs_gt_compressed']):.4f}") print(f" LPIPS: {np.mean(results['lpips_vs_gt_compressed']):.4f} ± {np.std(results['lpips_vs_gt_compressed']):.4f}") # 保存结果 results_summary = { 'compression_comparison': { 'psnr_mean': float(np.mean(results['psnr'])), 'psnr_std': float(np.std(results['psnr'])), 'ssim_mean': float(np.mean(results['ssim'])), 'ssim_std': float(np.std(results['ssim'])), 'lpips_mean': float(np.mean(results['lpips'])), 'lpips_std': float(np.std(results['lpips'])), 'fid': float(fid_score), 'num_gaussians_original': len(gaussians_original.get_xyz), 'num_gaussians_compressed': len(gaussians_compressed.get_xyz), 'compression_ratio': float(len(gaussians_compressed.get_xyz) / len(gaussians_original.get_xyz)) } } if results['niqe_original']: results_summary['compression_comparison']['niqe_original_mean'] = float(np.mean(results['niqe_original'])) results_summary['compression_comparison']['niqe_original_std'] = float(np.std(results['niqe_original'])) results_summary['compression_comparison']['niqe_compressed_mean'] = float(np.mean(results['niqe_compressed'])) results_summary['compression_comparison']['niqe_compressed_std'] = float(np.std(results['niqe_compressed'])) if ground_truth_dir and results['psnr_vs_gt_original']: results_summary['vs_ground_truth'] = { 'original': { 'psnr_mean': float(np.mean(results['psnr_vs_gt_original'])), 'psnr_std': float(np.std(results['psnr_vs_gt_original'])), 'ssim_mean': float(np.mean(results['ssim_vs_gt_original'])), 'ssim_std': float(np.std(results['ssim_vs_gt_original'])), 'lpips_mean': float(np.mean(results['lpips_vs_gt_original'])), 'lpips_std': float(np.std(results['lpips_vs_gt_original'])) }, 'compressed': { 'psnr_mean': float(np.mean(results['psnr_vs_gt_compressed'])), 'psnr_std': float(np.std(results['psnr_vs_gt_compressed'])), 'ssim_mean': float(np.mean(results['ssim_vs_gt_compressed'])), 'ssim_std': float(np.std(results['ssim_vs_gt_compressed'])), 'lpips_mean': float(np.mean(results['lpips_vs_gt_compressed'])), 'lpips_std': float(np.std(results['lpips_vs_gt_compressed'])) } } with open(output_dir / "metrics.json", 'w') as f: json.dump(results_summary, f, indent=2) # 保存详细数据 results_for_json = {} for key, value in results.items(): if isinstance(value, list) and len(value) > 0: results_for_json[key] = [float(v) for v in value] with open(output_dir / "detailed_metrics.json", 'w') as f: json.dump(results_for_json, f, indent=2) print(f"\n结果已保存到: {output_dir}") print(f" - 原始渲染图像: {original_render_dir}") print(f" - 压缩渲染图像: {compressed_render_dir}") print(f" - 评估指标摘要: {output_dir / 'metrics.json'}") print(f" - 详细指标数据: {output_dir / 'detailed_metrics.json'}") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="评估3DGS压缩前后的渲染质量") parser.add_argument("--original_ply", type=str, required=True, help="原始.ply文件路径") parser.add_argument("--compressed_ply", type=str, required=True, help="压缩后.ply文件路径") parser.add_argument("--cameras_json", type=str, required=True, help="cameras.json文件路径") parser.add_argument("--output_dir", type=str, default="evaluation_results", help="输出目录") parser.add_argument("--ground_truth_dir", type=str, default=None, help="真实图像目录(可选)") parser.add_argument("--sh_degree", type=int, default=3, help="球谐函数阶数") parser.add_argument("--kernel_size", type=float, default=0.1, help="渲染kernel大小") args = parser.parse_args() # 检查文件 if not os.path.exists(args.original_ply): print(f"错误: 找不到原始PLY文件: {args.original_ply}") sys.exit(1) if not os.path.exists(args.compressed_ply): print(f"错误: 找不到压缩PLY文件: {args.compressed_ply}") sys.exit(1) if not os.path.exists(args.cameras_json): print(f"错误: 找不到相机参数文件: {args.cameras_json}") sys.exit(1) render_and_evaluate( original_ply=args.original_ply, compressed_ply=args.compressed_ply, cameras_json=args.cameras_json, output_dir=args.output_dir, sh_degree=args.sh_degree, kernel_size=args.kernel_size, ground_truth_dir=args.ground_truth_dir )