|
|
""" |
|
|
Method comparison visualization for GliomaSAM3-MoE vs SegMamba. |
|
|
Generates separate images for: |
|
|
- Original input (4 modalities) |
|
|
- Ground truth |
|
|
- Predictions from different checkpoints |
|
|
|
|
|
Usage: |
|
|
cd /root/githubs/gliomasam3_moe |
|
|
PYTHONPATH=/root/githubs/sam3:$PYTHONPATH python visualizations/vis_method_comparison.py |
|
|
""" |
|
|
|
|
|
import argparse |
|
|
import os |
|
|
import sys |
|
|
from typing import Dict, List, Optional, Tuple |
|
|
|
|
|
import numpy as np |
|
|
import torch |
|
|
import yaml |
|
|
|
|
|
import matplotlib |
|
|
matplotlib.use("Agg") |
|
|
import matplotlib.pyplot as plt |
|
|
from matplotlib.colors import ListedColormap |
|
|
from scipy.ndimage import zoom |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
STYLE = { |
|
|
"dpi": 300, |
|
|
"font_size": 12, |
|
|
"color_WT": "#00BBD4", |
|
|
"color_TC": "#D81B60", |
|
|
"color_ET": "#FBC02D", |
|
|
"alpha_mask": 0.45, |
|
|
} |
|
|
|
|
|
def hex_to_rgb(hex_color: str) -> Tuple[float, float, float]: |
|
|
h = hex_color.lstrip("#") |
|
|
return tuple(int(h[i:i+2], 16) / 255.0 for i in (0, 2, 4)) |
|
|
|
|
|
COLORS = { |
|
|
"WT": hex_to_rgb(STYLE["color_WT"]), |
|
|
"TC": hex_to_rgb(STYLE["color_TC"]), |
|
|
"ET": hex_to_rgb(STYLE["color_ET"]), |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) |
|
|
SRC_DIR = os.path.join(ROOT_DIR, "src") |
|
|
SEGMAMBA_DIR = "/root/githubs/SegMamba" |
|
|
|
|
|
if SRC_DIR not in sys.path: |
|
|
sys.path.insert(0, SRC_DIR) |
|
|
if SEGMAMBA_DIR not in sys.path: |
|
|
sys.path.insert(0, SEGMAMBA_DIR) |
|
|
|
|
|
from scipy import ndimage as ndi |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CONFIG = { |
|
|
|
|
|
"data_dir": "/data/yty/brats23_segmamba_processed", |
|
|
"modalities": ["t1n", "t1c", "t2f", "t2w"], |
|
|
"modality_names": ["T1", "T1ce", "FLAIR", "T2"], |
|
|
|
|
|
|
|
|
"cases": [ |
|
|
"BraTS-GLI-00005-000", |
|
|
"BraTS-GLI-00006-000", |
|
|
"BraTS-GLI-00012-000", |
|
|
"BraTS-GLI-00017-000", |
|
|
"BraTS-GLI-00018-000", |
|
|
"BraTS-GLI-00020-000", |
|
|
], |
|
|
|
|
|
|
|
|
"gliomasam_ckpts": [ |
|
|
"/root/githubs/gliomasam3_moe/logs/segmamba/model/ckpt_step2000.pt", |
|
|
"/root/githubs/gliomasam3_moe/logs/segmamba/model/ckpt_step2600.pt", |
|
|
"/root/githubs/gliomasam3_moe/logs/segmamba/model/ckpt_step3000.pt", |
|
|
], |
|
|
"gliomasam_names": ["step2000", "step2600", "step3000"], |
|
|
|
|
|
|
|
|
|
|
|
"segmamba_pred_dirs": [ |
|
|
"/root/githubs/SegMamba/prediction_results/segmamba_brats23", |
|
|
"/root/githubs/SegMamba/prediction_results/segmamba_brats23_ep799", |
|
|
], |
|
|
"segmamba_names": ["segmamba_default", "segmamba_ep799"], |
|
|
|
|
|
|
|
|
"segmamba_ckpts": [ |
|
|
"/root/githubs/SegMamba/logs/segmamba_brats23/model/tmp_model_ep299_0.8274.pt", |
|
|
"/root/githubs/SegMamba/logs/segmamba_brats23/model/tmp_model_ep599_0.8295.pt", |
|
|
"/root/githubs/SegMamba/logs/segmamba_brats23/model/tmp_model_ep799_0.8498.pt", |
|
|
], |
|
|
|
|
|
|
|
|
"gliomasam_config": "/root/githubs/gliomasam3_moe/configs/train.yaml", |
|
|
|
|
|
|
|
|
"output_dir": "/root/githubs/gliomasam3_moe/vis_res/method_comparison", |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def ensure_dir(path: str) -> None: |
|
|
os.makedirs(path, exist_ok=True) |
|
|
|
|
|
def load_yaml(path: str) -> Dict: |
|
|
with open(path, "r") as f: |
|
|
return yaml.safe_load(f) |
|
|
|
|
|
def normalize_volume(vol: np.ndarray, eps: float = 1e-6) -> np.ndarray: |
|
|
"""Normalize volume to [0, 1] using percentile clipping.""" |
|
|
x = np.asarray(vol, dtype=np.float32) |
|
|
x = np.nan_to_num(x, nan=0.0, posinf=0.0, neginf=0.0) |
|
|
flat = x.reshape(-1) |
|
|
if flat.size == 0: |
|
|
return np.zeros_like(x, dtype=np.float32) |
|
|
lo, hi = np.percentile(flat, [1, 99]) |
|
|
if hi - lo < eps: |
|
|
return np.zeros_like(x, dtype=np.float32) |
|
|
x = np.clip(x, lo, hi) |
|
|
x = (x - lo) / (hi - lo + eps) |
|
|
return x |
|
|
|
|
|
def label_to_regions(label: np.ndarray) -> np.ndarray: |
|
|
"""Convert BraTS label to [WT, TC, ET] regions.""" |
|
|
label = np.asarray(label) |
|
|
wt = label > 0 |
|
|
tc = (label == 1) | (label == 4) |
|
|
et = label == 4 |
|
|
return np.stack([wt, tc, et], axis=0).astype(np.uint8) |
|
|
|
|
|
def regions_to_label(regions: np.ndarray) -> np.ndarray: |
|
|
"""Convert [WT, TC, ET] regions back to BraTS label.""" |
|
|
if regions.ndim == 4 and regions.shape[0] == 3: |
|
|
wt = regions[0] > 0.5 |
|
|
tc = regions[1] > 0.5 |
|
|
et = regions[2] > 0.5 |
|
|
elif regions.ndim == 3: |
|
|
|
|
|
return regions.astype(np.int16) |
|
|
else: |
|
|
raise ValueError(f"Invalid regions shape: {regions.shape}") |
|
|
|
|
|
label = np.zeros_like(wt, dtype=np.int16) |
|
|
label[wt] = 2 |
|
|
label[tc] = 1 |
|
|
label[et] = 4 |
|
|
return label |
|
|
|
|
|
def extract_slice(vol: np.ndarray, plane: str, idx: int) -> np.ndarray: |
|
|
"""Extract 2D slice from 3D volume.""" |
|
|
if plane == "axial": |
|
|
img = vol[idx, :, :] |
|
|
elif plane == "coronal": |
|
|
img = vol[:, idx, :] |
|
|
elif plane == "sagittal": |
|
|
img = vol[:, :, idx] |
|
|
else: |
|
|
raise ValueError(f"Unknown plane: {plane}") |
|
|
return np.rot90(img) |
|
|
|
|
|
def select_best_slice(mask: np.ndarray) -> Dict[str, int]: |
|
|
"""Select slice with maximum tumor content.""" |
|
|
if mask is None or mask.sum() == 0: |
|
|
return {"axial": mask.shape[0] // 2 if mask is not None else 64} |
|
|
m = mask.astype(np.uint8) |
|
|
axial = int(np.argmax(m.sum(axis=(1, 2)))) |
|
|
return {"axial": axial} |
|
|
|
|
|
def mask_boundary(mask2d: np.ndarray, iterations: int = 1) -> np.ndarray: |
|
|
"""Extract boundary of a binary mask.""" |
|
|
if mask2d.sum() == 0: |
|
|
return mask2d.astype(bool) |
|
|
eroded = ndi.binary_erosion(mask2d.astype(bool), iterations=iterations) |
|
|
return np.logical_xor(mask2d.astype(bool), eroded) |
|
|
|
|
|
def overlay_masks_publication( |
|
|
base2d: np.ndarray, |
|
|
masks: Dict[str, np.ndarray], |
|
|
alpha: float = STYLE["alpha_mask"], |
|
|
draw_boundary: bool = True, |
|
|
boundary_width: int = 2, |
|
|
) -> np.ndarray: |
|
|
"""Overlay masks with publication-quality colors and boundaries.""" |
|
|
base = np.clip(base2d, 0.0, 1.0) |
|
|
rgb = np.stack([base, base, base], axis=-1).astype(np.float32) |
|
|
|
|
|
|
|
|
order = ["WT", "TC", "ET"] |
|
|
for key in order: |
|
|
if key not in masks: |
|
|
continue |
|
|
m = masks[key].astype(bool) |
|
|
if m.shape != base.shape: |
|
|
zoom_factors = (base.shape[0] / m.shape[0], base.shape[1] / m.shape[1]) |
|
|
m = zoom(m.astype(float), zoom_factors, order=0) > 0.5 |
|
|
if m.sum() == 0: |
|
|
continue |
|
|
color = np.array(COLORS.get(key, (1.0, 0.0, 0.0)), dtype=np.float32) |
|
|
rgb[m] = (1.0 - alpha) * rgb[m] + alpha * color |
|
|
|
|
|
if draw_boundary: |
|
|
b = mask_boundary(m, iterations=boundary_width) |
|
|
rgb[b] = color |
|
|
|
|
|
return np.clip(rgb, 0, 1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_case(data_dir: str, case_id: str) -> Dict: |
|
|
"""Load a single case from the segmamba processed data.""" |
|
|
npz_path = os.path.join(data_dir, case_id + ".npz") |
|
|
npy_path = os.path.join(data_dir, case_id + ".npy") |
|
|
seg_path = os.path.join(data_dir, case_id + "_seg.npy") |
|
|
|
|
|
|
|
|
if os.path.isfile(npy_path): |
|
|
image = np.load(npy_path, mmap_mode="r") |
|
|
else: |
|
|
data = np.load(npz_path) |
|
|
image = data["data"] |
|
|
|
|
|
image = np.asarray(image, dtype=np.float32) |
|
|
if image.ndim == 5 and image.shape[0] == 1: |
|
|
image = image[0] |
|
|
if image.ndim == 4 and image.shape[0] != 4 and image.shape[-1] == 4: |
|
|
image = image.transpose(3, 0, 1, 2) |
|
|
|
|
|
|
|
|
if os.path.isfile(seg_path): |
|
|
label = np.load(seg_path, mmap_mode="r") |
|
|
else: |
|
|
data = np.load(npz_path) |
|
|
label = data["seg"] if "seg" in data else None |
|
|
|
|
|
if label is not None: |
|
|
label = np.asarray(label, dtype=np.int16) |
|
|
if label.ndim == 4 and label.shape[0] == 1: |
|
|
label = label[0] |
|
|
|
|
|
if label.max() == 3 and (label == 4).sum() == 0: |
|
|
label = label.copy() |
|
|
label[label == 3] = 4 |
|
|
|
|
|
return {"image": image, "label": label} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class GliomaSAMPredictor: |
|
|
"""Predictor for GliomaSAM3-MoE model.""" |
|
|
|
|
|
def __init__(self, config_path: str, device: str = "cuda"): |
|
|
self.device = torch.device(device if torch.cuda.is_available() else "cpu") |
|
|
self.cfg = load_yaml(config_path) |
|
|
self.model = None |
|
|
self.current_ckpt = None |
|
|
|
|
|
def load_checkpoint(self, ckpt_path: str): |
|
|
"""Load model checkpoint.""" |
|
|
if self.current_ckpt == ckpt_path: |
|
|
return |
|
|
|
|
|
from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE |
|
|
|
|
|
if self.model is None: |
|
|
self.model = GliomaSAM3_MoE(**self.cfg["model"]).to(self.device) |
|
|
|
|
|
ckpt = torch.load(ckpt_path, map_location="cpu") |
|
|
state_dict = {k: v for k, v in ckpt["model"].items() if "freqs_cis" not in k} |
|
|
self.model.load_state_dict(state_dict, strict=False) |
|
|
self.model.eval() |
|
|
self.current_ckpt = ckpt_path |
|
|
print(f" Loaded GliomaSAM checkpoint: {os.path.basename(ckpt_path)}") |
|
|
|
|
|
def predict(self, image: np.ndarray) -> np.ndarray: |
|
|
"""Run inference on a single case.""" |
|
|
|
|
|
if image.ndim == 4: |
|
|
x = torch.from_numpy(image).float().unsqueeze(0) |
|
|
else: |
|
|
raise ValueError(f"Invalid image shape: {image.shape}") |
|
|
|
|
|
x = x.to(self.device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
logits, aux = self.model(x) |
|
|
probs = torch.sigmoid(logits) |
|
|
|
|
|
|
|
|
pi_et = aux["pi_et"].view(probs.shape[0], 1, 1, 1, 1) |
|
|
probs[:, 2:3] = probs[:, 2:3] * pi_et |
|
|
|
|
|
|
|
|
regions_bin = (probs > 0.5).float() |
|
|
|
|
|
return regions_bin[0].cpu().numpy() |
|
|
|
|
|
|
|
|
class SegMambaPredictor: |
|
|
"""Predictor for SegMamba model.""" |
|
|
|
|
|
def __init__(self, device: str = "cuda"): |
|
|
self.device = torch.device(device if torch.cuda.is_available() else "cpu") |
|
|
self.model = None |
|
|
self.current_ckpt = None |
|
|
|
|
|
def load_checkpoint(self, ckpt_path: str): |
|
|
"""Load model checkpoint.""" |
|
|
if self.current_ckpt == ckpt_path: |
|
|
return |
|
|
|
|
|
from model_segmamba.segmamba import SegMamba |
|
|
|
|
|
if self.model is None: |
|
|
self.model = SegMamba( |
|
|
in_chans=4, |
|
|
out_chans=4, |
|
|
depths=[2, 2, 2, 2], |
|
|
feat_size=[48, 96, 192, 384], |
|
|
).to(self.device) |
|
|
|
|
|
ckpt = torch.load(ckpt_path, map_location="cpu") |
|
|
|
|
|
if "model" in ckpt: |
|
|
state_dict = ckpt["model"] |
|
|
elif "state_dict" in ckpt: |
|
|
state_dict = ckpt["state_dict"] |
|
|
else: |
|
|
state_dict = ckpt |
|
|
|
|
|
self.model.load_state_dict(state_dict, strict=True) |
|
|
self.model.eval() |
|
|
self.current_ckpt = ckpt_path |
|
|
print(f" Loaded SegMamba checkpoint: {os.path.basename(ckpt_path)}") |
|
|
|
|
|
def predict(self, image: np.ndarray) -> np.ndarray: |
|
|
"""Run inference on a single case.""" |
|
|
|
|
|
if image.ndim == 4: |
|
|
x = torch.from_numpy(image).float().unsqueeze(0) |
|
|
else: |
|
|
raise ValueError(f"Invalid image shape: {image.shape}") |
|
|
|
|
|
x = x.to(self.device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
logits = self.model(x) |
|
|
pred_lbl = logits.argmax(dim=1) |
|
|
|
|
|
|
|
|
|
|
|
labels = pred_lbl[0].cpu().numpy() |
|
|
tc = (labels == 1) | (labels == 3) |
|
|
wt = (labels == 1) | (labels == 2) | (labels == 3) |
|
|
et = labels == 3 |
|
|
|
|
|
regions = np.stack([wt, tc, et], axis=0).astype(np.uint8) |
|
|
|
|
|
return regions |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def save_single_image( |
|
|
arr2d: np.ndarray, |
|
|
out_path: str, |
|
|
cmap: str = "gray", |
|
|
title: str = None, |
|
|
is_overlay: bool = False, |
|
|
): |
|
|
"""Save a single 2D image.""" |
|
|
fig, ax = plt.subplots(figsize=(5, 5)) |
|
|
|
|
|
if is_overlay: |
|
|
ax.imshow(arr2d, aspect="equal") |
|
|
else: |
|
|
ax.imshow(arr2d, cmap=cmap, aspect="equal") |
|
|
|
|
|
ax.axis("off") |
|
|
if title: |
|
|
ax.set_title(title, fontsize=STYLE["font_size"], fontweight="bold") |
|
|
|
|
|
fig.tight_layout(pad=0.1) |
|
|
fig.savefig(out_path, dpi=STYLE["dpi"], bbox_inches="tight", facecolor="white") |
|
|
plt.close(fig) |
|
|
|
|
|
def visualize_case( |
|
|
case_id: str, |
|
|
case_data: Dict, |
|
|
gliomasam_predictor: GliomaSAMPredictor, |
|
|
segmamba_predictor: SegMambaPredictor, |
|
|
output_dir: str, |
|
|
): |
|
|
"""Generate all visualizations for a single case.""" |
|
|
print(f"\nProcessing case: {case_id}") |
|
|
|
|
|
image = case_data["image"] |
|
|
label = case_data["label"] |
|
|
|
|
|
|
|
|
if label is not None: |
|
|
gt_regions = label_to_regions(label) |
|
|
slice_info = select_best_slice(gt_regions[2]) |
|
|
else: |
|
|
slice_info = {"axial": image.shape[1] // 2} |
|
|
|
|
|
slice_idx = slice_info["axial"] |
|
|
plane = "axial" |
|
|
|
|
|
case_dir = os.path.join(output_dir, case_id) |
|
|
ensure_dir(case_dir) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(" Saving original modalities...") |
|
|
for i, (mod, mod_name) in enumerate(zip(CONFIG["modalities"], CONFIG["modality_names"])): |
|
|
vol = normalize_volume(image[i]) |
|
|
slice_2d = extract_slice(vol, plane, slice_idx) |
|
|
out_path = os.path.join(case_dir, f"input_{mod_name}.png") |
|
|
save_single_image(slice_2d, out_path, cmap="gray", title=mod_name) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(" Saving ground truth...") |
|
|
base_vol = normalize_volume(image[1]) |
|
|
base_2d = extract_slice(base_vol, plane, slice_idx) |
|
|
|
|
|
if label is not None: |
|
|
gt_regions = label_to_regions(label) |
|
|
gt_masks = { |
|
|
"WT": extract_slice(gt_regions[0], plane, slice_idx) > 0, |
|
|
"TC": extract_slice(gt_regions[1], plane, slice_idx) > 0, |
|
|
"ET": extract_slice(gt_regions[2], plane, slice_idx) > 0, |
|
|
} |
|
|
gt_overlay = overlay_masks_publication(base_2d, gt_masks) |
|
|
out_path = os.path.join(case_dir, "gt_overlay.png") |
|
|
save_single_image(gt_overlay, out_path, is_overlay=True, title="Ground Truth") |
|
|
|
|
|
|
|
|
for region_name in ["WT", "TC", "ET"]: |
|
|
region_overlay = overlay_masks_publication(base_2d, {region_name: gt_masks[region_name]}) |
|
|
out_path = os.path.join(case_dir, f"gt_{region_name}.png") |
|
|
save_single_image(region_overlay, out_path, is_overlay=True, title=f"GT {region_name}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(" Running GliomaSAM3-MoE predictions...") |
|
|
for ckpt_path, ckpt_name in zip(CONFIG["gliomasam_ckpts"], CONFIG["gliomasam_names"]): |
|
|
if not os.path.exists(ckpt_path): |
|
|
print(f" Checkpoint not found: {ckpt_path}") |
|
|
continue |
|
|
|
|
|
try: |
|
|
gliomasam_predictor.load_checkpoint(ckpt_path) |
|
|
pred_regions = gliomasam_predictor.predict(image) |
|
|
|
|
|
|
|
|
pred_masks = { |
|
|
"WT": extract_slice(pred_regions[0], plane, slice_idx) > 0, |
|
|
"TC": extract_slice(pred_regions[1], plane, slice_idx) > 0, |
|
|
"ET": extract_slice(pred_regions[2], plane, slice_idx) > 0, |
|
|
} |
|
|
pred_overlay = overlay_masks_publication(base_2d, pred_masks) |
|
|
out_path = os.path.join(case_dir, f"pred_gliomasam_{ckpt_name}_overlay.png") |
|
|
save_single_image(pred_overlay, out_path, is_overlay=True, title=f"GliomaSAM3-MoE ({ckpt_name})") |
|
|
|
|
|
|
|
|
for region_name in ["WT", "TC", "ET"]: |
|
|
region_overlay = overlay_masks_publication(base_2d, {region_name: pred_masks[region_name]}) |
|
|
out_path = os.path.join(case_dir, f"pred_gliomasam_{ckpt_name}_{region_name}.png") |
|
|
save_single_image(region_overlay, out_path, is_overlay=True, title=f"GliomaSAM {ckpt_name} {region_name}") |
|
|
except Exception as e: |
|
|
print(f" Error with GliomaSAM {ckpt_name}: {e}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(" Loading SegMamba predictions from files...") |
|
|
import nibabel as nib |
|
|
for pred_dir, pred_name in zip(CONFIG["segmamba_pred_dirs"], CONFIG["segmamba_names"]): |
|
|
if not os.path.exists(pred_dir): |
|
|
print(f" Prediction dir not found: {pred_dir}") |
|
|
continue |
|
|
|
|
|
try: |
|
|
pred_path = os.path.join(pred_dir, f"{case_id}.nii.gz") |
|
|
if not os.path.exists(pred_path): |
|
|
print(f" Prediction file not found: {pred_path}") |
|
|
continue |
|
|
|
|
|
pred_nii = nib.load(pred_path) |
|
|
pred_arr = np.asarray(pred_nii.get_fdata()) |
|
|
|
|
|
|
|
|
if pred_arr.ndim == 4 and pred_arr.shape[-1] == 3: |
|
|
pred_regions = pred_arr.transpose(3, 0, 1, 2) |
|
|
elif pred_arr.ndim == 4 and pred_arr.shape[0] == 3: |
|
|
pred_regions = pred_arr |
|
|
else: |
|
|
print(f" Unexpected prediction shape: {pred_arr.shape}") |
|
|
continue |
|
|
|
|
|
|
|
|
pred_regions_reordered = np.stack([ |
|
|
pred_regions[1], |
|
|
pred_regions[0], |
|
|
pred_regions[2], |
|
|
], axis=0) |
|
|
|
|
|
|
|
|
pred_masks = { |
|
|
"WT": extract_slice(pred_regions_reordered[0], plane, slice_idx) > 0, |
|
|
"TC": extract_slice(pred_regions_reordered[1], plane, slice_idx) > 0, |
|
|
"ET": extract_slice(pred_regions_reordered[2], plane, slice_idx) > 0, |
|
|
} |
|
|
pred_overlay = overlay_masks_publication(base_2d, pred_masks) |
|
|
out_path = os.path.join(case_dir, f"pred_segmamba_{pred_name}_overlay.png") |
|
|
save_single_image(pred_overlay, out_path, is_overlay=True, title=f"SegMamba ({pred_name})") |
|
|
|
|
|
|
|
|
for region_name in ["WT", "TC", "ET"]: |
|
|
region_overlay = overlay_masks_publication(base_2d, {region_name: pred_masks[region_name]}) |
|
|
out_path = os.path.join(case_dir, f"pred_segmamba_{pred_name}_{region_name}.png") |
|
|
save_single_image(region_overlay, out_path, is_overlay=True, title=f"SegMamba {pred_name} {region_name}") |
|
|
print(f" Loaded: {pred_name}") |
|
|
except Exception as e: |
|
|
print(f" Error with SegMamba {pred_name}: {e}") |
|
|
|
|
|
print(f" Saved to: {case_dir}") |
|
|
|
|
|
|
|
|
def create_comparison_grid(output_dir: str, cases: List[str]): |
|
|
"""Create a summary comparison grid for all cases.""" |
|
|
print("\nCreating comparison summary grid...") |
|
|
|
|
|
|
|
|
first_case_dir = os.path.join(output_dir, cases[0]) |
|
|
if not os.path.exists(first_case_dir): |
|
|
print(" No case directories found, skipping grid generation.") |
|
|
return |
|
|
|
|
|
|
|
|
n_cases = len(cases) |
|
|
n_gliomasam = len(CONFIG["gliomasam_names"]) |
|
|
n_segmamba = len(CONFIG["segmamba_names"]) |
|
|
n_cols = 1 + n_gliomasam + n_segmamba |
|
|
|
|
|
fig, axes = plt.subplots(n_cases, n_cols, figsize=(3 * n_cols, 3 * n_cases)) |
|
|
if n_cases == 1: |
|
|
axes = axes.reshape(1, -1) |
|
|
|
|
|
col_titles = ["Ground Truth"] |
|
|
col_titles += [f"GliomaSAM3-MoE\n({n})" for n in CONFIG["gliomasam_names"]] |
|
|
col_titles += [f"SegMamba\n({n})" for n in CONFIG["segmamba_names"]] |
|
|
|
|
|
for row_idx, case_id in enumerate(cases): |
|
|
case_dir = os.path.join(output_dir, case_id) |
|
|
|
|
|
|
|
|
ax = axes[row_idx, 0] |
|
|
gt_path = os.path.join(case_dir, "gt_overlay.png") |
|
|
if os.path.exists(gt_path): |
|
|
img = plt.imread(gt_path) |
|
|
ax.imshow(img) |
|
|
ax.axis("off") |
|
|
if row_idx == 0: |
|
|
ax.set_title(col_titles[0], fontsize=10, fontweight="bold") |
|
|
ax.set_ylabel(case_id.split("-")[-1], fontsize=10, rotation=0, ha="right", va="center") |
|
|
|
|
|
|
|
|
col = 1 |
|
|
for ckpt_name in CONFIG["gliomasam_names"]: |
|
|
ax = axes[row_idx, col] |
|
|
pred_path = os.path.join(case_dir, f"pred_gliomasam_{ckpt_name}_overlay.png") |
|
|
if os.path.exists(pred_path): |
|
|
img = plt.imread(pred_path) |
|
|
ax.imshow(img) |
|
|
ax.axis("off") |
|
|
if row_idx == 0: |
|
|
ax.set_title(col_titles[col], fontsize=10, fontweight="bold") |
|
|
col += 1 |
|
|
|
|
|
|
|
|
for ckpt_name in CONFIG["segmamba_names"]: |
|
|
ax = axes[row_idx, col] |
|
|
pred_path = os.path.join(case_dir, f"pred_segmamba_{ckpt_name}_overlay.png") |
|
|
if os.path.exists(pred_path): |
|
|
img = plt.imread(pred_path) |
|
|
ax.imshow(img) |
|
|
ax.axis("off") |
|
|
if row_idx == 0: |
|
|
ax.set_title(col_titles[col], fontsize=10, fontweight="bold") |
|
|
col += 1 |
|
|
|
|
|
fig.suptitle("Method Comparison: GliomaSAM3-MoE vs SegMamba\n(Different Checkpoints)", |
|
|
fontsize=14, fontweight="bold", y=0.98) |
|
|
fig.tight_layout(rect=[0, 0, 1, 0.95]) |
|
|
|
|
|
grid_path = os.path.join(output_dir, "comparison_grid.png") |
|
|
fig.savefig(grid_path, dpi=200, bbox_inches="tight", facecolor="white") |
|
|
plt.close(fig) |
|
|
print(f" Saved: {grid_path}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description="Method comparison visualization") |
|
|
parser.add_argument("--device", default="cuda", help="Device to use") |
|
|
parser.add_argument("--cases", nargs="+", default=None, help="Override case IDs") |
|
|
args = parser.parse_args() |
|
|
|
|
|
output_dir = CONFIG["output_dir"] |
|
|
ensure_dir(output_dir) |
|
|
|
|
|
cases = args.cases if args.cases else CONFIG["cases"] |
|
|
|
|
|
print("=" * 60) |
|
|
print("Method Comparison Visualization") |
|
|
print("=" * 60) |
|
|
print(f"Cases: {len(cases)}") |
|
|
print(f"GliomaSAM3-MoE checkpoints: {len(CONFIG['gliomasam_ckpts'])}") |
|
|
print(f"SegMamba checkpoints: {len(CONFIG['segmamba_ckpts'])}") |
|
|
print(f"Output directory: {output_dir}") |
|
|
|
|
|
|
|
|
print("\nInitializing predictors...") |
|
|
gliomasam_predictor = GliomaSAMPredictor(CONFIG["gliomasam_config"], args.device) |
|
|
segmamba_predictor = SegMambaPredictor(args.device) |
|
|
|
|
|
|
|
|
for case_id in cases: |
|
|
try: |
|
|
case_data = load_case(CONFIG["data_dir"], case_id) |
|
|
visualize_case( |
|
|
case_id, |
|
|
case_data, |
|
|
gliomasam_predictor, |
|
|
segmamba_predictor, |
|
|
output_dir, |
|
|
) |
|
|
except Exception as e: |
|
|
print(f" Error processing {case_id}: {e}") |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
|
|
|
|
|
|
create_comparison_grid(output_dir, cases) |
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print(f"All visualizations saved to: {output_dir}") |
|
|
print("=" * 60) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|