""" Ablation Evaluation Script for GliomaSAM3-MoE Implements: - Table 4: ET-absent subset evaluation - Table 7: Boundary-band Dice (3-voxel band) Usage: cd /root/githubs/gliomasam3_moe PYTHONPATH=/root/githubs/sam3:$PYTHONPATH python eval_ablation.py \ --config configs/train.yaml \ --checkpoint logs/segmamba/model/ckpt_step3000.pt \ --eval table7 # or table4, or both Author: GliomaSAM3-MoE Team """ import argparse import os import sys import json from typing import Dict, List, Tuple, Optional from collections import defaultdict import numpy as np import torch import yaml from tqdm import tqdm # Add project paths ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) SRC_DIR = os.path.join(ROOT_DIR, "src") if SRC_DIR not in sys.path: sys.path.insert(0, SRC_DIR) from scipy import ndimage as ndi from scipy.ndimage import binary_dilation, binary_erosion from sklearn.metrics import roc_auc_score, accuracy_score, roc_curve # ============================================================================ # Configuration # ============================================================================ DEFAULT_CONFIG = { "data_dir": "/data/yty/brats23_segmamba_processed", "seed": 20251225, # Fixed seed as per spec "train_rate": 0.7, "val_rate": 0.1, "test_rate": 0.2, "threshold": 0.5, "et_cc_min_size": 50, "boundary_band_radius": 3, "hd95_empty_value": 50.0, } # ============================================================================ # Utility Functions # ============================================================================ def load_yaml(path: str) -> Dict: with open(path, "r") as f: return yaml.safe_load(f) def split_npz_paths(data_dir: str, train_rate: float, val_rate: float, test_rate: float, seed: int) -> Tuple[List[str], List[str], List[str]]: """Split data paths into train/val/test sets with fixed seed.""" import glob import random all_paths = sorted(glob.glob(os.path.join(data_dir, "*.npz"))) random.seed(seed) random.shuffle(all_paths) n = len(all_paths) n_train = int(n * train_rate) n_val = int(n * val_rate) train_paths = all_paths[:n_train] val_paths = all_paths[n_train:n_train + n_val] test_paths = all_paths[n_train + n_val:] return train_paths, val_paths, test_paths def load_case(npz_path: str) -> Dict: """Load a single case from npz/npy files.""" npy_path = npz_path[:-4] + ".npy" seg_path = npz_path[:-4] + "_seg.npy" # Load image if os.path.isfile(npy_path): image = np.load(npy_path, mmap_mode="r") else: data = np.load(npz_path) image = data["data"] image = np.asarray(image, dtype=np.float32) if image.ndim == 5 and image.shape[0] == 1: image = image[0] if image.ndim == 4 and image.shape[0] != 4 and image.shape[-1] == 4: image = image.transpose(3, 0, 1, 2) # Load label if os.path.isfile(seg_path): label = np.load(seg_path, mmap_mode="r") else: data = np.load(npz_path) label = data["seg"] if "seg" in data else None if label is not None: label = np.asarray(label, dtype=np.int16) if label.ndim == 4 and label.shape[0] == 1: label = label[0] # Map ET label 3 -> 4 if needed (BraTS convention) if label.max() == 3 and (label == 4).sum() == 0: label = label.copy() label[label == 3] = 4 case_id = os.path.basename(npz_path)[:-4] return {"image": image, "label": label, "case_id": case_id} def label_to_regions(label: np.ndarray) -> np.ndarray: """Convert BraTS label to [WT, TC, ET] regions.""" label = np.asarray(label) wt = label > 0 tc = (label == 1) | (label == 4) et = label == 4 return np.stack([wt, tc, et], axis=0).astype(np.uint8) def remove_small_components(mask: np.ndarray, min_size: int, connectivity: int = 3) -> np.ndarray: """Remove connected components smaller than min_size. Args: mask: Binary mask [D, H, W] min_size: Minimum voxel count to keep connectivity: 1 for 6-connectivity, 2 for 18, 3 for 26 """ struct = ndi.generate_binary_structure(3, connectivity) labeled, num = ndi.label(mask.astype(np.uint8), structure=struct) if num == 0: return mask.astype(np.uint8) sizes = ndi.sum(mask.astype(np.uint8), labeled, index=np.arange(1, num + 1)) keep = np.zeros_like(mask, dtype=np.uint8) for i, s in enumerate(sizes, start=1): if s >= min_size: keep[labeled == i] = 1 return keep def count_connected_components(mask: np.ndarray, connectivity: int = 3) -> int: """Count number of connected components.""" struct = ndi.generate_binary_structure(3, connectivity) _, num = ndi.label(mask.astype(np.uint8), structure=struct) return num # ============================================================================ # Model Inference # ============================================================================ class ModelPredictor: """Predictor for GliomaSAM3-MoE model.""" def __init__(self, config_path: str, checkpoint_path: str, device: str = "cuda"): self.device = torch.device(device if torch.cuda.is_available() else "cpu") self.cfg = load_yaml(config_path) from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE self.model = GliomaSAM3_MoE(**self.cfg["model"]).to(self.device) ckpt = torch.load(checkpoint_path, map_location="cpu") state_dict = {k: v for k, v in ckpt["model"].items() if "freqs_cis" not in k} self.model.load_state_dict(state_dict, strict=False) self.model.eval() print(f"Loaded checkpoint: {checkpoint_path}") def predict(self, image: np.ndarray) -> Dict: """Run inference and return predictions with aux outputs. Returns: { "probs": np.ndarray [3, D, H, W] - raw probabilities before gating "probs_gated": np.ndarray [3, D, H, W] - probabilities after ET gating "pi_et": float - ET presence probability "regions_bin": np.ndarray [3, D, H, W] - binary predictions (after gating + threshold) } """ if image.ndim == 4: x = torch.from_numpy(image.copy()).float().unsqueeze(0) else: raise ValueError(f"Invalid image shape: {image.shape}") x = x.to(self.device) with torch.no_grad(): logits, aux = self.model(x) probs = torch.sigmoid(logits) # Get pi_et pi_et = aux["pi_et"] pi_et_value = float(pi_et.item()) # Probs before gating probs_np = probs[0].cpu().numpy() # Apply ET gating probs_gated = probs.clone() probs_gated[:, 2:3] = probs[:, 2:3] * pi_et.view(-1, 1, 1, 1, 1) probs_gated_np = probs_gated[0].cpu().numpy() # Binary prediction with threshold threshold = self.cfg.get("infer", {}).get("threshold", 0.5) regions_bin = (probs_gated_np > threshold).astype(np.uint8) return { "probs": probs_np, "probs_gated": probs_gated_np, "pi_et": pi_et_value, "regions_bin": regions_bin, } # ============================================================================ # Table 7: Boundary-band Dice # ============================================================================ def compute_boundary_band(mask: np.ndarray, radius: int = 3) -> np.ndarray: """Compute 3D boundary band using morphological operations. Args: mask: Binary mask [D, H, W] radius: Dilation/erosion radius in voxels Returns: band: Binary mask of boundary band """ struct = ndi.generate_binary_structure(3, 3) # 26-connectivity mask_bool = mask.astype(bool) if mask_bool.sum() == 0: return np.zeros_like(mask, dtype=np.uint8) # Dilate and erode dilated = binary_dilation(mask_bool, structure=struct, iterations=radius) eroded = binary_erosion(mask_bool, structure=struct, iterations=radius) # Band = dilated XOR eroded band = np.logical_xor(dilated, eroded).astype(np.uint8) return band def compute_boundary_band_dice(pred: np.ndarray, gt: np.ndarray, radius: int = 3) -> float: """Compute Boundary-band Dice score. Args: pred: Binary prediction [D, H, W] gt: Binary ground truth [D, H, W] radius: Band radius in voxels Returns: Dice score for boundary band region """ eps = 1e-7 pred = pred.astype(bool) gt = gt.astype(bool) # Handle empty cases if gt.sum() == 0 and pred.sum() == 0: return 1.0 if gt.sum() == 0 and pred.sum() > 0: return 0.0 # Compute boundary band from GT band = compute_boundary_band(gt, radius=radius) # Restrict pred and gt to band pred_band = pred & band.astype(bool) gt_band = gt & band.astype(bool) # Dice on band intersection = (pred_band & gt_band).sum() dice = 2 * intersection / (pred_band.sum() + gt_band.sum() + eps) return float(dice) def eval_table7(predictor: ModelPredictor, val_paths: List[str], config: Dict, output_dir: str) -> Dict: """Evaluate Table 7: Boundary-band Dice (3-voxel band). Returns: Results dict with per-region and mean boundary dice """ print("\n" + "=" * 60) print("Table 7: Boundary-band Dice Evaluation") print("=" * 60) radius = config.get("boundary_band_radius", 3) min_size = config.get("et_cc_min_size", 50) results = { "WT": [], "TC": [], "ET": [], "config": {"radius": radius, "min_size": min_size} } for npz_path in tqdm(val_paths, desc="Evaluating"): case = load_case(npz_path) if case["label"] is None: continue # Get predictions pred_out = predictor.predict(case["image"]) pred_regions = pred_out["regions_bin"].copy() # Post-process ET: remove small components pred_regions[2] = remove_small_components(pred_regions[2], min_size, connectivity=3) # Get GT regions gt_regions = label_to_regions(case["label"]) # Compute boundary-band dice for each region for i, region in enumerate(["WT", "TC", "ET"]): dice = compute_boundary_band_dice(pred_regions[i], gt_regions[i], radius=radius) results[region].append(dice) # Compute statistics stats = {} for region in ["WT", "TC", "ET"]: scores = results[region] stats[region] = { "mean": float(np.mean(scores)), "std": float(np.std(scores)), "n": len(scores), } stats["Mean"] = { "mean": float(np.mean([stats[r]["mean"] for r in ["WT", "TC", "ET"]])), } # Print results print(f"\nBoundary-band Dice (radius={radius} voxels):") print("-" * 40) print(f"{'Region':<10} {'Mean':>10} {'Std':>10} {'N':>8}") print("-" * 40) for region in ["WT", "TC", "ET"]: s = stats[region] print(f"{region:<10} {s['mean']:>10.4f} {s['std']:>10.4f} {s['n']:>8}") print("-" * 40) print(f"{'Mean':<10} {stats['Mean']['mean']:>10.4f}") # Save results output_path = os.path.join(output_dir, "table7_boundary_dice.json") with open(output_path, "w") as f: json.dump({"stats": stats, "config": results["config"]}, f, indent=2) print(f"\nResults saved to: {output_path}") return stats # ============================================================================ # Table 4: ET-absent Subset Evaluation # ============================================================================ def eval_table4(predictor: ModelPredictor, val_paths: List[str], config: Dict, output_dir: str) -> Dict: """Evaluate Table 4: ET-absent subset evaluation. Metrics: - ET-absent subset (n cases where GT ET voxels = 0): - FP volume (mm³) - FP components (count) - Full validation set: - ET presence classification: AUROC, Acc, Sens, Spec """ print("\n" + "=" * 60) print("Table 4: ET-absent Subset Evaluation") print("=" * 60) min_size = config.get("et_cc_min_size", 50) threshold = config.get("threshold", 0.5) # Collect results et_absent_results = [] # For ET-absent subset classification_results = [] # For full validation set for npz_path in tqdm(val_paths, desc="Evaluating"): case = load_case(npz_path) if case["label"] is None: continue # Get predictions pred_out = predictor.predict(case["image"]) # Get GT regions gt_regions = label_to_regions(case["label"]) gt_et = gt_regions[2] gt_et_voxels = int(gt_et.sum()) # Classification labels: y=1 if ET present, 0 otherwise y_true = 1 if gt_et_voxels > 0 else 0 # Score for classification: pi_et s_score = pred_out["pi_et"] # Binary ET prediction (after gating + threshold + post-process) pred_et_prob = pred_out["probs_gated"][2] pred_et_bin = (pred_et_prob > threshold).astype(np.uint8) pred_et_bin = remove_small_components(pred_et_bin, min_size, connectivity=3) # Store classification data classification_results.append({ "case_id": case["case_id"], "y_true": y_true, "s_score": s_score, "y_pred": 1 if s_score > 0.5 else 0, }) # For ET-absent cases, compute FP metrics if gt_et_voxels == 0: fp_voxels = int(pred_et_bin.sum()) fp_components = count_connected_components(pred_et_bin, connectivity=3) et_absent_results.append({ "case_id": case["case_id"], "fp_volume_mm3": fp_voxels, # spacing=1mm "fp_components": fp_components, }) # ------------------------- # ET-absent subset metrics # ------------------------- n_et_absent = len(et_absent_results) if n_et_absent > 0: fp_volumes = [r["fp_volume_mm3"] for r in et_absent_results] fp_components = [r["fp_components"] for r in et_absent_results] et_absent_stats = { "n": n_et_absent, "fp_volume_mm3": { "mean": float(np.mean(fp_volumes)), "std": float(np.std(fp_volumes)), "min": float(np.min(fp_volumes)), "max": float(np.max(fp_volumes)), }, "fp_components": { "mean": float(np.mean(fp_components)), "std": float(np.std(fp_components)), "min": int(np.min(fp_components)), "max": int(np.max(fp_components)), }, } else: et_absent_stats = {"n": 0, "fp_volume_mm3": None, "fp_components": None} # ------------------------- # Classification metrics (full validation set) # ------------------------- y_true = np.array([r["y_true"] for r in classification_results]) s_score = np.array([r["s_score"] for r in classification_results]) # AUROC and optimal threshold using Youden's J statistic if len(np.unique(y_true)) > 1: auroc = roc_auc_score(y_true, s_score) # Find optimal threshold using Youden's J = Sens + Spec - 1 fpr, tpr, thresholds = roc_curve(y_true, s_score) j_scores = tpr - fpr # Youden's J statistic best_idx = np.argmax(j_scores) optimal_threshold = thresholds[best_idx] # Use optimal threshold for predictions y_pred_optimal = (s_score >= optimal_threshold).astype(int) else: auroc = float("nan") optimal_threshold = 0.5 y_pred_optimal = (s_score >= 0.5).astype(int) # Compute metrics at optimal threshold tp = int(((y_true == 1) & (y_pred_optimal == 1)).sum()) tn = int(((y_true == 0) & (y_pred_optimal == 0)).sum()) fp = int(((y_true == 0) & (y_pred_optimal == 1)).sum()) fn = int(((y_true == 1) & (y_pred_optimal == 0)).sum()) acc_optimal = (tp + tn) / len(y_true) if len(y_true) > 0 else float("nan") sens_optimal = tp / (tp + fn) if (tp + fn) > 0 else float("nan") spec_optimal = tn / (tn + fp) if (tn + fp) > 0 else float("nan") classification_stats = { "n": len(classification_results), "n_et_present": int(y_true.sum()), "n_et_absent": int((1 - y_true).sum()), "auroc": float(auroc), "optimal_threshold": float(optimal_threshold), "accuracy_optimal": float(acc_optimal), "sensitivity_optimal": float(sens_optimal), "specificity_optimal": float(spec_optimal), } # Print results print(f"\nET Presence Classification (n={classification_stats['n']}):") print("-" * 50) print(f"ET-present: {classification_stats['n_et_present']}, " f"ET-absent: {classification_stats['n_et_absent']}") print(f"AUROC: {classification_stats['auroc']:.4f}") print(f"Optimal Threshold: {classification_stats['optimal_threshold']:.4f}") print(f"Accuracy: {classification_stats['accuracy_optimal']:.4f}") print(f"Sensitivity: {classification_stats['sensitivity_optimal']:.4f}") print(f"Specificity: {classification_stats['specificity_optimal']:.4f}") # Save results results = { "et_absent_subset": et_absent_stats, "et_absent_cases": et_absent_results, "classification": classification_stats, "config": {"min_size": min_size, "threshold": threshold}, } output_path = os.path.join(output_dir, "table4_et_absent.json") with open(output_path, "w") as f: json.dump(results, f, indent=2) print(f"\nResults saved to: {output_path}") return results # ============================================================================ # Main # ============================================================================ def main(): parser = argparse.ArgumentParser(description="Ablation Evaluation for GliomaSAM3-MoE") parser.add_argument("--config", type=str, default="configs/train.yaml", help="Model config path") parser.add_argument("--checkpoint", type=str, required=True, help="Model checkpoint path") parser.add_argument("--eval", type=str, default="both", choices=["table4", "table7", "both"], help="Which evaluation to run") parser.add_argument("--seed", type=int, default=20251225, help="Random seed for data split") parser.add_argument("--output_dir", type=str, default="./eval_results", help="Output directory for results") parser.add_argument("--device", type=str, default="cuda", help="Device to use") parser.add_argument("--data_dir", type=str, default=None, help="Override data directory") parser.add_argument("--use_all", action="store_true", help="Use all data instead of validation split only") args = parser.parse_args() # Setup os.makedirs(args.output_dir, exist_ok=True) # Load model config model_cfg = load_yaml(args.config) # Setup evaluation config config = DEFAULT_CONFIG.copy() config["seed"] = args.seed if args.data_dir: config["data_dir"] = args.data_dir else: config["data_dir"] = model_cfg.get("data", {}).get("root_dir", config["data_dir"]) print("=" * 60) print("GliomaSAM3-MoE Ablation Evaluation") print("=" * 60) print(f"Config: {args.config}") print(f"Checkpoint: {args.checkpoint}") print(f"Data dir: {config['data_dir']}") print(f"Seed: {config['seed']}") print(f"Evaluation: {args.eval}") print(f"Use all data: {args.use_all}") # Get data paths import glob all_paths = sorted(glob.glob(os.path.join(config["data_dir"], "*.npz"))) if args.use_all: # Use all data for evaluation val_paths = all_paths print(f"\nUsing all data: {len(val_paths)} cases") else: # Split data print("\nSplitting data...") train_paths, val_paths, test_paths = split_npz_paths( config["data_dir"], train_rate=config["train_rate"], val_rate=config["val_rate"], test_rate=config["test_rate"], seed=config["seed"], ) print(f"Train: {len(train_paths)}, Val: {len(val_paths)}, Test: {len(test_paths)}") # Initialize predictor print("\nLoading model...") predictor = ModelPredictor(args.config, args.checkpoint, args.device) # Run evaluations results = {} if args.eval in ["table7", "both"]: results["table7"] = eval_table7(predictor, val_paths, config, args.output_dir) if args.eval in ["table4", "both"]: results["table4"] = eval_table4(predictor, val_paths, config, args.output_dir) print("\n" + "=" * 60) print("Evaluation Complete!") print("=" * 60) return results if __name__ == "__main__": main()