ChipYTY's picture
Add files using upload-large-folder tool
fe8202e verified
"""
Publication-quality visualization suite for GliomaSAM3-MoE.
Follows detailed spec for figures.
"""
import argparse
import os
import sys
from typing import Dict, List, Optional, Tuple, Any
import numpy as np
import yaml
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.font_manager as fm
# ============================================================================
# Global Style Configuration (Spec A)
# ============================================================================
STYLE = {
# Canvas
"aspect_main": (16, 9),
"aspect_panel": (4, 3),
"dpi": 300,
"bg_color": "white",
# Typography
"font_family": "sans-serif",
"font_title": 18,
"font_subtitle": 12,
"font_label": 10,
# Lines
"linewidth_contour": 1.5,
"linewidth_boundary": 2.0,
# Colors (fixed palette)
"color_WT": "#00BBD4", # cyan
"color_TC": "#D81B60", # magenta
"color_ET": "#FBC02D", # yellow
"alpha_mask": 0.40,
# Error colormap: blue -> white -> red
"cmap_error": "RdBu_r",
}
def hex_to_rgb(hex_color: str) -> Tuple[float, float, float]:
h = hex_color.lstrip("#")
return tuple(int(h[i:i+2], 16) / 255.0 for i in (0, 2, 4))
COLORS = {
"WT": hex_to_rgb(STYLE["color_WT"]),
"TC": hex_to_rgb(STYLE["color_TC"]),
"ET": hex_to_rgb(STYLE["color_ET"]),
}
# ============================================================================
# Setup matplotlib defaults
# ============================================================================
def setup_mpl_style():
plt.rcParams.update({
"font.family": STYLE["font_family"],
"font.size": STYLE["font_label"],
"axes.titlesize": STYLE["font_subtitle"],
"axes.labelsize": STYLE["font_label"],
"figure.facecolor": STYLE["bg_color"],
"axes.facecolor": STYLE["bg_color"],
"savefig.facecolor": STYLE["bg_color"],
"savefig.dpi": STYLE["dpi"],
"figure.dpi": 100,
})
setup_mpl_style()
# ============================================================================
# Imports from project
# ============================================================================
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
SRC_DIR = os.path.join(ROOT_DIR, "src")
if SRC_DIR not in sys.path:
sys.path.append(SRC_DIR)
from scipy import ndimage as ndi
from scipy.ndimage import zoom
from gliomasam3_moe.data.brats_dataset import SegMambaNPZDataset
from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE
# ============================================================================
# Utility Functions
# ============================================================================
def ensure_dir(path: str) -> None:
os.makedirs(path, exist_ok=True)
def load_config(path: str) -> Dict:
with open(path, "r") as f:
return yaml.safe_load(f)
def normalize_volume(vol: np.ndarray, eps: float = 1e-6) -> np.ndarray:
x = np.asarray(vol, dtype=np.float32)
x = np.nan_to_num(x, nan=0.0, posinf=0.0, neginf=0.0)
flat = x.reshape(-1)
if flat.size == 0:
return np.zeros_like(x, dtype=np.float32)
lo, hi = np.percentile(flat, [1, 99])
if hi - lo < eps:
return np.zeros_like(x, dtype=np.float32)
x = np.clip(x, lo, hi)
x = (x - lo) / (hi - lo + eps)
return x
def label_to_regions(label: np.ndarray) -> np.ndarray:
label = np.asarray(label)
wt = label > 0
tc = (label == 1) | (label == 4)
et = label == 4
return np.stack([wt, tc, et], axis=0).astype(np.uint8)
def regions_to_label(regions: np.ndarray) -> np.ndarray:
if regions.ndim != 4 or regions.shape[0] != 3:
raise ValueError("regions must be [3, D, H, W]")
wt = regions[0] > 0.5
tc = regions[1] > 0.5
et = regions[2] > 0.5
label = np.zeros_like(wt, dtype=np.int16)
label[wt] = 2
label[tc] = 1
label[et] = 4
return label
def extract_slice(vol: np.ndarray, plane: str, idx: int) -> np.ndarray:
if plane == "axial":
img = vol[idx, :, :]
elif plane == "coronal":
img = vol[:, idx, :]
elif plane == "sagittal":
img = vol[:, :, idx]
else:
raise ValueError(f"Unknown plane: {plane}")
return np.rot90(img)
def select_slices_from_mask(mask: Optional[np.ndarray]) -> Dict[str, int]:
if mask is None or mask.sum() == 0:
return {"axial": None, "coronal": None, "sagittal": None}
m = mask.astype(np.uint8)
axial = int(np.argmax(m.sum(axis=(1, 2))))
coronal = int(np.argmax(m.sum(axis=(0, 2))))
sagittal = int(np.argmax(m.sum(axis=(0, 1))))
return {"axial": axial, "coronal": coronal, "sagittal": sagittal}
def fallback_slices(shape: Tuple[int, int, int]) -> Dict[str, int]:
d, h, w = shape
return {"axial": d // 2, "coronal": h // 2, "sagittal": w // 2}
def get_slices(mask_ref: Optional[np.ndarray], vol_shape: Tuple[int, int, int]) -> Dict[str, int]:
idx = select_slices_from_mask(mask_ref)
if any(v is None for v in idx.values()):
idx = fallback_slices(vol_shape)
return idx
def mask_boundary(mask2d: np.ndarray, iterations: int = 1) -> np.ndarray:
if mask2d.sum() == 0:
return mask2d.astype(bool)
eroded = ndi.binary_erosion(mask2d.astype(bool), iterations=iterations)
return np.logical_xor(mask2d.astype(bool), eroded)
def signed_distance(mask: np.ndarray) -> np.ndarray:
mask = mask.astype(bool)
if mask.sum() == 0:
return np.zeros_like(mask, dtype=np.float32)
outside = ndi.distance_transform_edt(~mask)
inside = ndi.distance_transform_edt(mask)
return (inside - outside).astype(np.float32)
def boundary_error_map(pred: np.ndarray, gt: np.ndarray) -> np.ndarray:
pred = pred.astype(bool)
gt = gt.astype(bool)
dist_gt = signed_distance(gt)
err = np.zeros_like(dist_gt, dtype=np.float32)
# False positive: predicted but not GT
fp = pred & ~gt
# False negative: GT but not predicted
fn = ~pred & gt
err[fp] = np.abs(dist_gt[fp])
err[fn] = -np.abs(dist_gt[fn])
return err
def connected_components(mask: np.ndarray) -> Tuple[np.ndarray, int]:
labeled, num = ndi.label(mask.astype(np.uint8))
return labeled, int(num)
def fft_amplitude_slice(vol: np.ndarray, plane: str = "axial") -> np.ndarray:
fft = np.fft.fftn(vol)
amp = np.abs(fft)
amp = np.fft.fftshift(amp)
d, h, w = amp.shape
if plane == "axial":
sl = amp[d // 2, :, :]
elif plane == "coronal":
sl = amp[:, h // 2, :]
else:
sl = amp[:, :, w // 2]
sl = np.log1p(sl)
return normalize_volume(sl)
def fourier_amplitude_mix(a: np.ndarray, b: np.ndarray, lam: float) -> np.ndarray:
if a.shape != b.shape:
b_resized = np.zeros_like(a)
for c in range(min(a.shape[0], b.shape[0])):
zoom_factors = tuple(a.shape[i+1] / b.shape[i+1] for i in range(3))
b_resized[c] = zoom(b[c], zoom_factors, order=1)
b = b_resized
fft_a = np.fft.fftn(a, axes=(1, 2, 3))
fft_b = np.fft.fftn(b, axes=(1, 2, 3))
amp_a = np.abs(fft_a)
amp_b = np.abs(fft_b)
phase = np.exp(1j * np.angle(fft_a))
amp_mix = (1.0 - lam) * amp_a + lam * amp_b
mixed = np.fft.ifftn(amp_mix * phase, axes=(1, 2, 3)).real
return mixed.astype(np.float32)
# ============================================================================
# Overlay Functions (Spec compliant)
# ============================================================================
def overlay_masks_publication(
base2d: np.ndarray,
masks: Dict[str, np.ndarray],
alpha: float = STYLE["alpha_mask"],
draw_boundary: bool = True,
boundary_width: int = 2,
) -> np.ndarray:
"""Overlay masks with publication-quality colors and boundaries."""
base = np.clip(base2d, 0.0, 1.0)
rgb = np.stack([base, base, base], axis=-1).astype(np.float32)
# Draw order: WT -> TC -> ET (ET on top)
order = ["WT", "TC", "ET"]
for key in order:
if key not in masks:
continue
m = masks[key].astype(bool)
# Handle shape mismatch
if m.shape != base.shape:
zoom_factors = (base.shape[0] / m.shape[0], base.shape[1] / m.shape[1])
m = zoom(m.astype(float), zoom_factors, order=0) > 0.5
if m.sum() == 0:
continue
color = np.array(COLORS.get(key, (1.0, 0.0, 0.0)), dtype=np.float32)
rgb[m] = (1.0 - alpha) * rgb[m] + alpha * color
if draw_boundary:
b = mask_boundary(m, iterations=boundary_width)
rgb[b] = color
return np.clip(rgb, 0, 1)
def draw_contour(ax, mask2d: np.ndarray, color: str, linewidth: float = STYLE["linewidth_contour"]):
"""Draw contour line on axis."""
if mask2d.sum() == 0:
return
ax.contour(mask2d.astype(float), levels=[0.5], colors=[color], linewidths=[linewidth])
# ============================================================================
# Data Loading
# ============================================================================
class CaseLoader:
def __init__(self, cfg: Dict):
self.data_cfg = cfg.get("data", {})
self.cache: Dict[Tuple[str, bool], Dict] = {}
def get_case(self, case_id: str, include_label: bool = True) -> Dict:
key = (case_id, include_label)
if key in self.cache:
return self.cache[key]
data_format = self.data_cfg.get("format", "nifti")
if data_format == "segmamba_npz":
npz_dir = self.data_cfg.get("npz_dir") or self.data_cfg.get("root_dir", "")
if case_id.endswith(".npz"):
npz_path = case_id
else:
npz_path = os.path.join(npz_dir, case_id + ".npz")
npy_path = npz_path[:-3] + "npy"
seg_path = npz_path[:-4] + "_seg.npy"
if os.path.isfile(npy_path):
image = np.load(npy_path, mmap_mode="r")
else:
data = np.load(npz_path)
image = data["data"]
image = np.asarray(image, dtype=np.float32)
if image.ndim == 5 and image.shape[0] == 1:
image = image[0]
if image.ndim == 4 and image.shape[0] != 4 and image.shape[-1] == 4:
image = image.transpose(3, 0, 1, 2)
label = None
if include_label:
if os.path.isfile(seg_path):
label = np.load(seg_path, mmap_mode="r")
else:
data = np.load(npz_path)
label = data["seg"] if "seg" in data else None
if label is not None:
label = np.asarray(label, dtype=np.int16)
if label.ndim == 4 and label.shape[0] == 1:
label = label[0]
# Map ET label 3 -> 4 if needed
if label.max() == 3 and (label == 4).sum() == 0:
label = label.copy()
label[label == 3] = 4
# Create images dict with modality names
modalities = self.data_cfg.get("modalities", ["t1n", "t1c", "t2f", "t2w"])
images = {}
for i, mod in enumerate(modalities):
if i < image.shape[0]:
images[mod] = normalize_volume(image[i])
out = {"images": images, "label": label, "affine": np.eye(4)}
self.cache[key] = out
return out
raise NotImplementedError("Only segmamba_npz format is currently supported")
class PredictionLoader:
def __init__(self, cfg: Dict):
pred_cfg = cfg.get("predictions", {})
self.ours = pred_cfg.get("ours", {})
self.baselines = pred_cfg.get("baselines", [])
def get_all_methods(self) -> List[Dict]:
methods = []
if self.ours:
methods.append(self.ours)
methods.extend(self.baselines)
return methods
def load_method(self, method_cfg: Dict, case_id: str) -> Dict:
import nibabel as nib
pred_dir = method_cfg.get("dir", "")
pred_type = method_cfg.get("type", "auto")
def _find(base: str) -> Optional[str]:
for ext in [".nii.gz", ".nii"]:
path = os.path.join(pred_dir, base + ext)
if os.path.isfile(path):
return path
return None
paths = {
"regions_prob": _find(f"{case_id}_regions_prob"),
"regions_bin": _find(f"{case_id}_regions_bin"),
"label": _find(f"{case_id}_label"),
"segmamba_3c": _find(f"{case_id}"),
}
if pred_type == "auto":
for key in ["regions_prob", "regions_bin", "label", "segmamba_3c"]:
if paths[key] is not None:
pred_type = key
break
path = paths.get(pred_type)
if path is None:
raise FileNotFoundError(f"No prediction found for {case_id} in {pred_dir}")
img = nib.load(path)
arr = np.asarray(img.get_fdata())
out: Dict[str, Optional[np.ndarray]] = {"label": None, "regions": None, "prob": None}
if pred_type in {"regions_prob", "regions_bin"}:
if arr.ndim == 4 and arr.shape[-1] == 3:
regions = arr.transpose(3, 0, 1, 2)
else:
regions = arr
out["prob"] = regions.astype(np.float32) if pred_type == "regions_prob" else None
out["regions"] = (regions > 0.5).astype(np.uint8) if pred_type == "regions_prob" else regions.astype(np.uint8)
out["label"] = regions_to_label(out["regions"])
elif pred_type == "segmamba_3c":
if arr.ndim == 4 and arr.shape[-1] == 3:
regions = arr.transpose(3, 0, 1, 2).astype(np.uint8)
else:
regions = arr.astype(np.uint8)
out["regions"] = regions
out["label"] = regions_to_label(regions)
else:
label = arr.astype(np.int16)
out["label"] = label
out["regions"] = label_to_regions(label)
return out
class AuxCache:
def __init__(self, aux_dir: Optional[str]):
self.aux_dir = aux_dir
def path(self, case_id: str) -> Optional[str]:
if not self.aux_dir:
return None
return os.path.join(self.aux_dir, f"{case_id}_aux.npz")
def load(self, case_id: str) -> Optional[Dict]:
path = self.path(case_id)
if path and os.path.isfile(path):
data = np.load(path)
return {k: data[k] for k in data.files}
return None
def save(self, case_id: str, data: Dict) -> None:
if not self.aux_dir:
return
ensure_dir(self.aux_dir)
path = self.path(case_id)
np.savez_compressed(path, **data)
class ModelRunner:
def __init__(self, vis_cfg: Dict, model_cfg_path: str, ckpt_path: str, device: str):
import torch
import torch.nn.functional as F
self.torch = torch
self.F = F
self.vis_cfg = vis_cfg
self.cfg = load_config(model_cfg_path)
self.device = torch.device(device if torch.cuda.is_available() else "cpu")
self.model = GliomaSAM3_MoE(**self.cfg["model"]).to(self.device)
ckpt = torch.load(ckpt_path, map_location="cpu")
state_dict = {k: v for k, v in ckpt["model"].items() if "freqs_cis" not in k}
self.model.load_state_dict(state_dict, strict=False)
self.model.eval()
def load_case_tensor(self, case_id: str):
data_cfg = self.vis_cfg.get("data", {})
data_dir = data_cfg.get("npz_dir") or data_cfg.get("root_dir", "")
if case_id.endswith(".npz"):
npz_path = case_id
else:
npz_path = os.path.join(data_dir, case_id + ".npz")
dataset = SegMambaNPZDataset(data_dir=data_dir, npz_paths=[npz_path], test=True, ensure_npy=True)
sample = dataset[0]
image = sample["image"].unsqueeze(0)
case = sample["case_id"]
return image, case
def forward_intermediate(self, image):
torch = self.torch
F = self.F
model = self.model
with torch.no_grad():
b, c, d, h, w = image.shape
orig_h, orig_w = h, w
pad_h = (model.patch_size - (h % model.patch_size)) % model.patch_size
pad_w = (model.patch_size - (w % model.patch_size)) % model.patch_size
ph0, ph1 = pad_h // 2, pad_h - pad_h // 2
pw0, pw1 = pad_w // 2, pad_w - pad_w // 2
if pad_h > 0 or pad_w > 0:
image = F.pad(image, (pw0, pw1, ph0, ph1, 0, 0))
h, w = image.shape[-2:]
image = image.to(self.device)
x_plus, _ = model.hfdi(image)
x_spec, spectral_stats = model.spectral(image)
x2d = x_plus.permute(0, 2, 1, 3, 4).reshape(b * d, 7, h, w)
tokens, (gh, gw) = model.encoder2d(x2d)
n = gh * gw
tokens = tokens.view(b, d, n, -1)
tokens = model.slice_adapter(tokens, direction="forward")
z = tokens.mean(dim=(1, 2))
pi_et = model.attr_head(z)["pi_et"]
token_ids = model._select_concept_tokens(pi_et, label=None)
prompt = model.prompt_encoder(token_ids)
tokens = model.prompt_film(tokens, prompt)
u = tokens.view(b, d, gh, gw, -1).permute(0, 4, 1, 2, 3)
u_msda = model.dual_enhance.msda(u)
u_lv1 = model.dual_enhance.fa_level(u)
u_fa = model.dual_enhance.fa_fuse(torch.cat([u, u_lv1], dim=1))
pool = torch.cat([u_fa, u_msda], dim=1).mean(dim=(2, 3, 4))
eta = torch.sigmoid(model.dual_enhance.fcf_mlp(pool)).view(b, 1, 1, 1, 1)
u_fuse = eta * u_fa + (1.0 - eta) * u_msda
u_spec = model.dual_enhance.spec_stem(x_spec)
u_out = model.dual_enhance.fuse_conv(torch.cat([u_fuse, u_spec], dim=1))
logits, gamma = model.moe_decoder(u_out, z, prompt, spectral_stats, target_size=(d, h, w))
if pad_h > 0 or pad_w > 0:
logits = logits[:, :, :, ph0 : ph0 + orig_h, pw0 : pw0 + orig_w]
et_pre = torch.sigmoid(logits[:, 2:3])
et_post = et_pre * pi_et.view(b, 1, 1, 1, 1)
u_up = F.interpolate(u_out, size=(d, h, w), mode="trilinear", align_corners=False)
logits_all = torch.stack([exp(u_up) for exp in model.moe_decoder.experts], dim=1)
prob_all = torch.sigmoid(logits_all)
mean_prob = prob_all.mean(dim=(3, 4, 5))
contrib = gamma.view(b, -1, 1) * mean_prob
return {
"pi_et": pi_et,
"moe_gamma": gamma,
"spectral_stats": spectral_stats,
"et_pre": et_pre,
"et_post": et_post,
"expert_contrib": contrib,
"x_spec": x_spec,
"u_fuse": u_fuse,
"u_spec": u_spec,
"logits": logits,
}
# ============================================================================
# Figure Saving (Spec L)
# ============================================================================
def save_figure(fig, out_dir: str, name: str, close: bool = True):
"""Save figure as PNG and PDF at 300 dpi."""
ensure_dir(out_dir)
png_path = os.path.join(out_dir, f"{name}.png")
pdf_path = os.path.join(out_dir, f"{name}.pdf")
# Use pad_inches to prevent axis/image overlap
fig.savefig(png_path, dpi=STYLE["dpi"], bbox_inches="tight", pad_inches=0.1, facecolor=STYLE["bg_color"])
fig.savefig(pdf_path, dpi=STYLE["dpi"], bbox_inches="tight", pad_inches=0.1, facecolor=STYLE["bg_color"])
if close:
plt.close(fig)
print(f" Saved: {png_path}")
def finalize_figure(fig, title: str = None):
"""Finalize figure layout to prevent axis overlap."""
if title:
fig.suptitle(title, fontsize=STYLE["font_title"], fontweight="bold", y=0.98)
# Use constrained layout or manual adjustment
try:
fig.tight_layout(rect=[0, 0.02, 1, 0.95] if title else [0, 0, 1, 1])
except Exception:
pass
fig.subplots_adjust(wspace=0.3, hspace=0.3)
# ============================================================================
# B) Main Qualitative Comparison (Spec B)
# ============================================================================
def make_qualitative(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None:
"""
Main qualitative comparison figure.
Layout: Each row = one case
Columns: [T1n | T1ce | T2f | FLAIR | GT | Ours | Baseline...]
Small inset for coronal/sagittal view
"""
cases = cfg.get("cases", {}).get("qualitative", [])
if not cases:
return
methods = pred_loader.get_all_methods()
modalities = cfg.get("data", {}).get("modalities", ["t1n", "t1c", "t2f", "t2w"])
mod_labels = {"t1n": "T1", "t1c": "T1ce", "t2f": "FLAIR", "t2w": "T2"}
n_cols = len(modalities) + 1 + len(methods) # modalities + GT + methods
n_rows = len(cases)
fig_width = 2.0 * n_cols
fig_height = 2.0 * n_rows
fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_width, fig_height))
if n_rows == 1:
axes = axes.reshape(1, -1)
for row_idx, case_id in enumerate(cases):
case = case_loader.get_case(case_id, include_label=True)
images = case["images"]
label = case["label"]
# Use T1ce as reference for slice selection
ref_mod = "t1c" if "t1c" in images else list(images.keys())[0]
base = images[ref_mod]
# Find best slice based on tumor
mask_ref = label_to_regions(label)[2] if label is not None else None
idx = get_slices(mask_ref, base.shape)
plane = "axial"
slice_idx = idx[plane]
col_idx = 0
# Plot modalities
for mod in modalities:
ax = axes[row_idx, col_idx]
if mod in images:
img2d = extract_slice(images[mod], plane, slice_idx)
ax.imshow(img2d, cmap="gray", aspect="equal")
ax.axis("off")
if row_idx == 0:
ax.set_title(mod_labels.get(mod, mod.upper()), fontsize=STYLE["font_subtitle"], fontweight="bold")
col_idx += 1
# Plot GT
ax = axes[row_idx, col_idx]
base2d = extract_slice(base, plane, slice_idx)
if label is not None:
gt_regions = label_to_regions(label)
masks = {
"WT": extract_slice(gt_regions[0], plane, slice_idx) > 0,
"TC": extract_slice(gt_regions[1], plane, slice_idx) > 0,
"ET": extract_slice(gt_regions[2], plane, slice_idx) > 0,
}
overlay = overlay_masks_publication(base2d, masks)
ax.imshow(overlay, aspect="equal")
else:
ax.imshow(base2d, cmap="gray", aspect="equal")
ax.axis("off")
if row_idx == 0:
ax.set_title("GT", fontsize=STYLE["font_subtitle"], fontweight="bold")
col_idx += 1
# Plot methods
for method in methods:
ax = axes[row_idx, col_idx]
try:
pred = pred_loader.load_method(method, case_id)
pred_regions = pred["regions"]
masks = {
"WT": extract_slice(pred_regions[0], plane, slice_idx) > 0,
"TC": extract_slice(pred_regions[1], plane, slice_idx) > 0,
"ET": extract_slice(pred_regions[2], plane, slice_idx) > 0,
}
overlay = overlay_masks_publication(base2d, masks)
ax.imshow(overlay, aspect="equal")
except Exception as e:
ax.imshow(base2d, cmap="gray", aspect="equal")
ax.text(0.5, 0.5, "N/A", transform=ax.transAxes, ha="center", va="center", fontsize=STYLE["font_label"])
ax.axis("off")
if row_idx == 0:
ax.set_title(method.get("name", "Method"), fontsize=STYLE["font_subtitle"], fontweight="bold")
# Add small inset for coronal view (top-right corner)
inset_ax = ax.inset_axes([0.65, 0.65, 0.33, 0.33])
try:
cor_idx = idx["coronal"]
base_cor = extract_slice(base, "coronal", cor_idx)
if "pred_regions" in dir():
masks_cor = {
"WT": extract_slice(pred_regions[0], "coronal", cor_idx) > 0,
"TC": extract_slice(pred_regions[1], "coronal", cor_idx) > 0,
"ET": extract_slice(pred_regions[2], "coronal", cor_idx) > 0,
}
overlay_cor = overlay_masks_publication(base_cor, masks_cor)
inset_ax.imshow(overlay_cor, aspect="equal")
else:
inset_ax.imshow(base_cor, cmap="gray", aspect="equal")
except:
pass
inset_ax.axis("off")
inset_ax.patch.set_edgecolor("white")
inset_ax.patch.set_linewidth(1)
col_idx += 1
# Add case ID on the left
axes[row_idx, 0].text(-0.15, 0.5, case_id.split("-")[-1], transform=axes[row_idx, 0].transAxes,
rotation=90, va="center", ha="right", fontsize=STYLE["font_label"])
finalize_figure(fig, "Main Qualitative Comparison")
save_figure(fig, out_dir, "Fig1_qualitative_comparison")
# ============================================================================
# C) ET-absent Case Study (Spec C)
# ============================================================================
def make_et_absent(cfg: Dict, case_loader: CaseLoader, aux: AuxCache, runner: Optional[ModelRunner], out_dir: str) -> None:
"""
ET-absent case study.
Three columns: Before gate | After gate | π_ET value with colorbar
"""
cases = cfg.get("cases", {}).get("et_absent", [])
if not cases:
return
for case_idx, case_id in enumerate(cases):
case = case_loader.get_case(case_id, include_label=False)
images = case["images"]
ref_mod = "t1c" if "t1c" in images else list(images.keys())[0]
base = images[ref_mod]
# Load or compute aux data
aux_data = aux.load(case_id)
needed_keys = ["pi_et", "et_pre", "et_post"]
if aux_data is None or not all(k in aux_data for k in needed_keys):
if runner is None:
continue
image, _ = runner.load_case_tensor(case_id)
out = runner.forward_intermediate(image)
new_data = {
"pi_et": out["pi_et"].detach().cpu().numpy(),
"et_pre": out["et_pre"].detach().cpu().numpy(),
"et_post": out["et_post"].detach().cpu().numpy(),
}
if aux_data is not None:
aux_data.update(new_data)
else:
aux_data = new_data
aux.save(case_id, aux_data)
if aux_data is None:
continue
et_pre = aux_data["et_pre"][0, 0]
et_post = aux_data["et_post"][0, 0]
pi_et = float(np.asarray(aux_data["pi_et"]).reshape(-1)[0])
idx = get_slices(et_pre > 0.3, base.shape)
plane = "axial"
slice_idx = idx[plane]
fig = plt.figure(figsize=(14, 5))
gs = fig.add_gridspec(1, 4, width_ratios=[1, 1, 0.6, 0.05], wspace=0.25)
base2d = extract_slice(base, plane, slice_idx)
pre2d = extract_slice(et_pre, plane, slice_idx)
post2d = extract_slice(et_post, plane, slice_idx)
# Before gate
ax0 = fig.add_subplot(gs[0])
ax0.imshow(base2d, cmap="gray", aspect="equal")
im = ax0.imshow(pre2d, cmap="YlOrRd", alpha=0.6, vmin=0, vmax=1, aspect="equal")
draw_contour(ax0, pre2d > 0.5, STYLE["color_ET"], linewidth=STYLE["linewidth_boundary"])
ax0.set_title("ET Before Gate", fontsize=STYLE["font_subtitle"], fontweight="bold")
ax0.axis("off")
# After gate
ax1 = fig.add_subplot(gs[1])
ax1.imshow(base2d, cmap="gray", aspect="equal")
ax1.imshow(post2d, cmap="YlOrRd", alpha=0.6, vmin=0, vmax=1, aspect="equal")
draw_contour(ax1, post2d > 0.5, STYLE["color_ET"], linewidth=STYLE["linewidth_boundary"])
ax1.set_title("ET After Gate", fontsize=STYLE["font_subtitle"], fontweight="bold")
ax1.axis("off")
# π_ET value with bar and stats
ax2 = fig.add_subplot(gs[2])
ax2.barh(0.5, pi_et, height=0.25, color=STYLE["color_ET"], edgecolor="black", linewidth=1.5)
ax2.axvline(0.5, color="gray", linestyle="--", linewidth=1.5, label="Threshold")
ax2.set_xlim(0, 1)
ax2.set_ylim(0, 1)
ax2.set_xlabel("π_ET value", fontsize=STYLE["font_label"])
ax2.set_title(f"π_ET = {pi_et:.3f}", fontsize=STYLE["font_subtitle"], fontweight="bold",
color="green" if pi_et > 0.5 else "red")
ax2.set_yticks([])
ax2.spines["top"].set_visible(False)
ax2.spines["right"].set_visible(False)
ax2.spines["left"].set_visible(False)
# Stats text
pre_vox = int((pre2d > 0.5).sum())
post_vox = int((post2d > 0.5).sum())
diff_pct = (pre_vox - post_vox) / max(pre_vox, 1) * 100
ax2.text(0.5, 0.2, f"Before: {pre_vox}\nAfter: {post_vox}\nΔ: {diff_pct:+.1f}%",
transform=ax2.transAxes, fontsize=STYLE["font_label"], ha="center", va="bottom",
bbox=dict(boxstyle="round,pad=0.3", facecolor="lightyellow", edgecolor="gray", alpha=0.9))
# Colorbar
ax_cbar = fig.add_subplot(gs[3])
cbar = fig.colorbar(im, cax=ax_cbar)
cbar.set_label("ET Prob", fontsize=STYLE["font_label"])
fig.suptitle(f"ET Gate Study: {case_id}", fontsize=STYLE["font_title"], fontweight="bold", y=0.98)
fig.tight_layout(rect=[0, 0, 1, 0.93])
save_figure(fig, out_dir, f"Fig2_{chr(ord('a')+case_idx)}_et_absent_{case_id}")
# ============================================================================
# D) Boundary Error Visualization (Spec D)
# ============================================================================
def make_boundary(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None:
"""
Boundary error visualization.
Shows GT boundary (white) + Pred boundary (black) + signed error heatmap
"""
cases = cfg.get("cases", {}).get("boundary", [])
if not cases:
return
region_name = cfg.get("visualization", {}).get("boundary_region", "ET")
region_idx = {"WT": 0, "TC": 1, "ET": 2}[region_name]
for case_idx, case_id in enumerate(cases):
case = case_loader.get_case(case_id, include_label=True)
if case["label"] is None:
continue
images = case["images"]
ref_mod = "t1c" if "t1c" in images else list(images.keys())[0]
base = images[ref_mod]
gt_regions = label_to_regions(case["label"])
pred = pred_loader.load_method(pred_loader.ours, case_id)
pred_regions = pred["regions"]
mask_ref = gt_regions[region_idx]
idx = get_slices(mask_ref, base.shape)
plane = "axial"
slice_idx = idx[plane]
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
base2d = extract_slice(base, plane, slice_idx)
gt2d = extract_slice(gt_regions[region_idx], plane, slice_idx) > 0
pred2d = extract_slice(pred_regions[region_idx], plane, slice_idx) > 0
# Resize pred2d if needed
if pred2d.shape != gt2d.shape:
zoom_factors = (gt2d.shape[0] / pred2d.shape[0], gt2d.shape[1] / pred2d.shape[1])
pred2d = zoom(pred2d.astype(float), zoom_factors, order=0) > 0.5
err2d = boundary_error_map(pred2d, gt2d)
# Panel 1: Base image with boundaries
ax = axes[0]
ax.imshow(base2d, cmap="gray", aspect="equal")
draw_contour(ax, gt2d, "white", linewidth=STYLE["linewidth_boundary"])
draw_contour(ax, pred2d, "black", linewidth=STYLE["linewidth_contour"])
ax.set_title("Boundaries: GT (white) vs Pred (black)", fontsize=STYLE["font_subtitle"])
ax.axis("off")
# Panel 2: Overlay comparison
ax = axes[1]
overlay = overlay_masks_publication(base2d, {region_name: gt2d}, alpha=0.3)
ax.imshow(overlay, aspect="equal")
draw_contour(ax, pred2d, "black", linewidth=STYLE["linewidth_contour"])
ax.set_title(f"GT ({region_name}) + Pred Boundary", fontsize=STYLE["font_subtitle"])
ax.axis("off")
# Panel 3: Signed error heatmap
ax = axes[2]
ax.imshow(base2d, cmap="gray", aspect="equal")
max_err = max(np.abs(err2d).max(), 1.0)
im = ax.imshow(err2d, cmap=STYLE["cmap_error"], alpha=0.7, vmin=-max_err, vmax=max_err, aspect="equal")
ax.set_title("Signed Boundary Error", fontsize=STYLE["font_subtitle"])
ax.axis("off")
# Colorbar
cbar = fig.colorbar(im, ax=ax, orientation="vertical", fraction=0.046, pad=0.04)
cbar.set_label("Error (blue=FN, red=FP)", fontsize=STYLE["font_label"])
finalize_figure(fig, f"Boundary Analysis: {case_id} ({region_name})")
save_figure(fig, out_dir, f"Fig3_{chr(ord('a')+case_idx)}_boundary_{case_id}")
# ============================================================================
# E) Tiny/Fragmented ET Cases (Spec E)
# ============================================================================
def make_tiny_et(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None:
"""
Tiny/fragmented ET visualization with ROI zoom.
"""
cases = cfg.get("cases", {}).get("tiny_et", [])
if not cases:
return
methods = pred_loader.get_all_methods()
for case_idx, case_id in enumerate(cases):
case = case_loader.get_case(case_id, include_label=True)
images = case["images"]
ref_mod = "t1c" if "t1c" in images else list(images.keys())[0]
base = images[ref_mod]
gt_regions = label_to_regions(case["label"]) if case["label"] is not None else None
# Find ET centroid for ROI
if gt_regions is not None:
et_mask = gt_regions[2]
if et_mask.sum() > 0:
coords = np.where(et_mask)
centroid = [int(np.mean(c)) for c in coords]
else:
centroid = [s // 2 for s in base.shape]
else:
centroid = [s // 2 for s in base.shape]
plane = "axial"
slice_idx = centroid[0]
# Create ROI around ET (64x64 region)
roi_size = 64
cy, cx = centroid[1], centroid[2]
y_start = max(0, cy - roi_size // 2)
y_end = min(base.shape[1], cy + roi_size // 2)
x_start = max(0, cx - roi_size // 2)
x_end = min(base.shape[2], cx + roi_size // 2)
n_cols = 1 + len(methods) # GT + methods
fig, axes = plt.subplots(1, n_cols, figsize=(3 * n_cols, 3))
if n_cols == 1:
axes = [axes]
base2d = extract_slice(base, plane, slice_idx)
base_roi = base2d[y_start:y_end, x_start:x_end]
col_idx = 0
# GT
ax = axes[col_idx]
if gt_regions is not None:
et2d = extract_slice(gt_regions[2], plane, slice_idx) > 0
et_roi = et2d[y_start:y_end, x_start:x_end]
overlay = overlay_masks_publication(base_roi, {"ET": et_roi}, alpha=0.5)
ax.imshow(overlay, aspect="equal")
else:
ax.imshow(base_roi, cmap="gray", aspect="equal")
ax.set_title("GT", fontsize=STYLE["font_subtitle"], fontweight="bold")
ax.axis("off")
col_idx += 1
# Methods
for method in methods:
ax = axes[col_idx]
try:
pred = pred_loader.load_method(method, case_id)
pred_et = pred["regions"][2]
et2d = extract_slice(pred_et, plane, slice_idx) > 0
# Resize if needed
if et2d.shape != base2d.shape:
zoom_factors = (base2d.shape[0] / et2d.shape[0], base2d.shape[1] / et2d.shape[1])
et2d = zoom(et2d.astype(float), zoom_factors, order=0) > 0.5
et_roi = et2d[y_start:y_end, x_start:x_end]
overlay = overlay_masks_publication(base_roi, {"ET": et_roi}, alpha=0.5)
ax.imshow(overlay, aspect="equal")
except:
ax.imshow(base_roi, cmap="gray", aspect="equal")
ax.set_title(method.get("name", "Method"), fontsize=STYLE["font_subtitle"], fontweight="bold")
ax.axis("off")
col_idx += 1
finalize_figure(fig, f"Tiny ET ROI: {case_id}")
save_figure(fig, out_dir, f"Fig4_{chr(ord('a')+case_idx)}_tiny_et_{case_id}")
# ============================================================================
# G) MoE Routing Interpretability (Spec G)
# ============================================================================
def make_moe_routing(cfg: Dict, case_loader: CaseLoader, aux: AuxCache, runner: Optional[ModelRunner], out_dir: str) -> None:
"""
MoE routing visualization with grouped bar chart.
Shows expert contributions for WT/TC/ET.
"""
cases = cfg.get("cases", {}).get("moe", [])
if not cases:
return
for case_idx, case_id in enumerate(cases):
aux_data = aux.load(case_id)
needed_keys = ["moe_gamma", "expert_contrib"]
if aux_data is None or not all(k in aux_data for k in needed_keys):
if runner is None:
continue
image, _ = runner.load_case_tensor(case_id)
out = runner.forward_intermediate(image)
new_data = {
"moe_gamma": out["moe_gamma"].detach().cpu().numpy(),
"expert_contrib": out["expert_contrib"].detach().cpu().numpy(),
}
if aux_data is not None:
aux_data.update(new_data)
else:
aux_data = new_data
aux.save(case_id, aux_data)
if aux_data is None:
continue
gamma = np.asarray(aux_data["moe_gamma"])[0]
contrib = np.asarray(aux_data["expert_contrib"])[0]
n_experts = contrib.shape[0]
x = np.arange(n_experts)
width = 0.25
# Find active experts (top-k, usually k=2)
active_experts = np.where(gamma > 0.01)[0]
top_k = len(active_experts)
fig, ax = plt.subplots(figsize=(10, 5))
# Grouped bars for WT/TC/ET with highlight for active experts
for i in range(n_experts):
alpha = 1.0 if i in active_experts else 0.3
edge_width = 2 if i in active_experts else 0.5
ax.bar(x[i] - width, contrib[i, 0], width, color=STYLE["color_WT"],
edgecolor="black", linewidth=edge_width, alpha=alpha)
ax.bar(x[i], contrib[i, 1], width, color=STYLE["color_TC"],
edgecolor="black", linewidth=edge_width, alpha=alpha)
ax.bar(x[i] + width, contrib[i, 2], width, color=STYLE["color_ET"],
edgecolor="black", linewidth=edge_width, alpha=alpha)
# Legend for regions (place at upper left)
from matplotlib.patches import Patch
legend_elements = [
Patch(facecolor=STYLE["color_WT"], edgecolor="black", label="WT"),
Patch(facecolor=STYLE["color_TC"], edgecolor="black", label="TC"),
Patch(facecolor=STYLE["color_ET"], edgecolor="black", label="ET"),
]
# Routing weights as line with markers
ax.plot(x, gamma, "ko-", linewidth=2, markersize=8, label="Routing γ", zorder=10)
# Annotate active experts with their gamma values
for i in active_experts:
ax.annotate(f"γ={gamma[i]:.2f}", xy=(x[i], gamma[i]),
xytext=(0, 10), textcoords="offset points",
fontsize=STYLE["font_label"], fontweight="bold", ha="center",
bbox=dict(boxstyle="round,pad=0.2", facecolor="yellow", alpha=0.8))
ax.set_xlabel("Expert Index", fontsize=STYLE["font_subtitle"])
ax.set_ylabel("Contribution", fontsize=STYLE["font_subtitle"])
ax.set_xticks(x)
ax.set_xticklabels([f"E{i}\n{'(active)' if i in active_experts else ''}" for i in range(n_experts)],
fontsize=STYLE["font_label"])
# Legend at upper right
ax.legend(handles=legend_elements, loc="upper right", fontsize=STYLE["font_label"],
bbox_to_anchor=(0.99, 0.99))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_ylim(0, None)
# Add explanation text (upper right, below legend)
ax.text(0.98, 0.72, f"Top-k = {top_k} (sparse gating)\nActive: {', '.join([f'E{i}' for i in active_experts])}",
transform=ax.transAxes, fontsize=STYLE["font_label"], va="top", ha="right",
bbox=dict(boxstyle="round,pad=0.3", facecolor="lightyellow", edgecolor="gray", alpha=0.9))
finalize_figure(fig, f"MoE Expert Routing: {case_id}")
save_figure(fig, out_dir, f"Fig5_{chr(ord('a')+case_idx)}_moe_routing_{case_id}")
# ============================================================================
# H) Concept Token Interpretability (Spec H)
# ============================================================================
def make_concept_tokens(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None:
"""
Concept token visualization - split into 3 clear figures per case:
1. ET Overview: T1ce + ET mask
2. Fragmentation Analysis: Components → FRAG_BIN
3. Scale Analysis: Size → SCALE_BIN
"""
cases = cfg.get("cases", {}).get("concept_tokens", [])
if not cases:
return
frag_bins = cfg.get("visualization", {}).get("frag_bins", [1, 3, 5])
scale_bins = cfg.get("visualization", {}).get("scale_bins", [50, 200, 500])
frag_labels = ["None", "Low", "Medium", "High"]
scale_labels = ["Tiny", "Small", "Medium", "Large"]
def bin_value(value, thresholds):
for i, t in enumerate(thresholds):
if value <= t:
return i
return len(thresholds)
def analyze_et_morphology(et_mask):
"""Analyze ET morphology: size, fragmentation, component sizes."""
et = et_mask > 0
et_count = int(et.sum())
labeled, n_comp = connected_components(et)
comp_sizes = []
for i in range(1, n_comp + 1):
comp_sizes.append(int((labeled == i).sum()))
comp_sizes = sorted(comp_sizes, reverse=True)
frag_bin = bin_value(n_comp, frag_bins)
scale_bin = bin_value(et_count, scale_bins)
return {
"total_voxels": et_count,
"n_components": n_comp,
"comp_sizes": comp_sizes,
"frag_bin": frag_bin,
"frag_label": frag_labels[min(frag_bin, len(frag_labels)-1)],
"scale_bin": scale_bin,
"scale_label": scale_labels[min(scale_bin, len(scale_labels)-1)],
"labeled": labeled,
}
for case_idx, case_id in enumerate(cases):
case = case_loader.get_case(case_id, include_label=True)
pred = pred_loader.load_method(pred_loader.ours, case_id)
images = case["images"]
ref_mod = "t1c" if "t1c" in images else list(images.keys())[0]
base = images[ref_mod]
# Analyze morphology
pred_morph = analyze_et_morphology(pred["regions"][2])
gt_morph = analyze_et_morphology(label_to_regions(case["label"])[2]) if case["label"] is not None else None
# Find best slice
mask_ref = pred["regions"][2] if pred_morph["total_voxels"] > 0 else None
idx = get_slices(mask_ref, base.shape)
plane = "axial"
slice_idx = idx[plane]
base2d = extract_slice(base, plane, slice_idx)
n_comp = pred_morph["n_components"]
letter = chr(ord('a') + case_idx)
# =====================================================================
# Figure 1: ET Overview (T1ce + ET mask)
# =====================================================================
fig1, axes1 = plt.subplots(1, 2, figsize=(10, 5))
# Left: T1ce
axes1[0].imshow(base2d, cmap="gray", aspect="equal")
axes1[0].set_title("T1ce Input", fontsize=STYLE["font_subtitle"], fontweight="bold")
axes1[0].axis("off")
# Right: ET overlay
et2d = extract_slice(pred["regions"][2], plane, slice_idx)
if et2d.shape != base2d.shape:
zoom_factors = (base2d.shape[0] / et2d.shape[0], base2d.shape[1] / et2d.shape[1])
et2d = zoom(et2d.astype(float), zoom_factors, order=0)
overlay = overlay_masks_publication(base2d, {"ET": et2d > 0}, alpha=0.5)
axes1[1].imshow(overlay, aspect="equal")
axes1[1].set_title(f"Predicted ET\n({pred_morph['total_voxels']} voxels total)",
fontsize=STYLE["font_subtitle"], fontweight="bold")
axes1[1].axis("off")
fig1.suptitle(f"ET Prediction Overview: {case_id}", fontsize=STYLE["font_title"], fontweight="bold")
fig1.tight_layout(rect=[0, 0, 1, 0.92])
save_figure(fig1, out_dir, f"Fig6_{letter}1_et_overview_{case_id}")
# =====================================================================
# Figure 2: Fragmentation Analysis (Components → FRAG_BIN)
# =====================================================================
fig2, axes2 = plt.subplots(1, 3, figsize=(12, 4.5), gridspec_kw={"width_ratios": [1, 1, 0.8]})
# Left: Connected components visualization
labeled2d = extract_slice(pred_morph["labeled"], plane, slice_idx)
if labeled2d.shape != base2d.shape:
zoom_factors = (base2d.shape[0] / labeled2d.shape[0], base2d.shape[1] / labeled2d.shape[1])
labeled2d = zoom(labeled2d.astype(float), zoom_factors, order=0)
comp_rgb = np.zeros((*base2d.shape, 3), dtype=np.float32)
comp_rgb[:] = base2d[:, :, np.newaxis] * 0.3
if n_comp > 0:
colors = plt.cm.Set1(np.linspace(0, 1, max(n_comp, 3)))[:n_comp]
for i in range(1, n_comp + 1):
mask = labeled2d == i
if mask.sum() > 0:
comp_rgb[mask] = colors[i-1][:3]
axes2[0].imshow(comp_rgb, aspect="equal")
axes2[0].set_title(f"Connected Components\n(n = {n_comp})",
fontsize=STYLE["font_subtitle"], fontweight="bold")
axes2[0].axis("off")
# Middle: Component sizes bar chart
if pred_morph["comp_sizes"]:
comp_sizes = pred_morph["comp_sizes"][:8]
colors = plt.cm.Set1(np.linspace(0, 1, max(len(comp_sizes), 3)))[:len(comp_sizes)]
bars = axes2[1].barh(range(len(comp_sizes)), comp_sizes, color=colors, edgecolor="black", linewidth=0.5)
axes2[1].set_yticks(range(len(comp_sizes)))
axes2[1].set_yticklabels([f"C{i+1}" for i in range(len(comp_sizes))])
axes2[1].set_xlabel("Voxels", fontsize=STYLE["font_label"])
axes2[1].invert_yaxis()
axes2[1].spines["top"].set_visible(False)
axes2[1].spines["right"].set_visible(False)
else:
axes2[1].text(0.5, 0.5, "No ET", ha="center", va="center",
transform=axes2[1].transAxes, fontsize=STYLE["font_subtitle"])
axes2[1].axis("off")
axes2[1].set_title("Component Sizes", fontsize=STYLE["font_subtitle"], fontweight="bold")
# Right: FRAG_BIN selection
frag_colors = ["#CCCCCC", "#90EE90", "#FFD700", "#FF6347"]
frag_idx = pred_morph["frag_bin"]
bar_colors = [frag_colors[i] if i != frag_idx else "#FF0000" for i in range(4)]
bars = axes2[2].bar(range(4), [1]*4, color=bar_colors, edgecolor="black", linewidth=1.5)
axes2[2].set_xticks(range(4))
axes2[2].set_xticklabels(frag_labels, fontsize=STYLE["font_label"], rotation=45, ha="right")
axes2[2].set_ylim(0, 1.3)
axes2[2].set_yticks([])
axes2[2].spines["top"].set_visible(False)
axes2[2].spines["right"].set_visible(False)
axes2[2].spines["left"].set_visible(False)
# Arrow pointing to selected bin
axes2[2].annotate(f"n={n_comp}", xy=(frag_idx, 1.05), xytext=(frag_idx, 1.2),
fontsize=STYLE["font_label"], fontweight="bold", ha="center",
arrowprops=dict(arrowstyle="->", color="red", lw=2))
axes2[2].set_title(f"FRAG_BIN = {pred_morph['frag_label']}",
fontsize=STYLE["font_subtitle"], fontweight="bold", color="red")
# Add mapping explanation
fig2.text(0.5, 0.02,
f"Mapping: {n_comp} components → FRAG_BIN = {pred_morph['frag_label']} "
f"(thresholds: ≤{frag_bins[0]}=None, ≤{frag_bins[1]}=Low, ≤{frag_bins[2]}=Med, >{frag_bins[2]}=High)",
ha="center", fontsize=STYLE["font_label"], style="italic")
fig2.suptitle(f"Fragmentation Analysis: {case_id}", fontsize=STYLE["font_title"], fontweight="bold")
fig2.tight_layout(rect=[0, 0.06, 1, 0.92])
save_figure(fig2, out_dir, f"Fig6_{letter}2_fragmentation_{case_id}")
# =====================================================================
# Figure 3: Scale Analysis (Size → SCALE_BIN)
# =====================================================================
fig3, axes3 = plt.subplots(1, 2, figsize=(10, 5))
# Left: Size visualization (ET with size info)
axes3[0].imshow(overlay, aspect="equal")
total_voxels = pred_morph["total_voxels"]
axes3[0].set_title(f"ET Region\nTotal: {total_voxels} voxels",
fontsize=STYLE["font_subtitle"], fontweight="bold")
axes3[0].axis("off")
# Right: SCALE_BIN selection with size ruler
ax_scale = axes3[1]
scale_colors = ["#E0E0E0", "#87CEEB", "#4169E1", "#1E3A8A"]
scale_idx = pred_morph["scale_bin"]
# Create size ranges for visualization
scale_ranges = [f"≤{scale_bins[0]}", f"{scale_bins[0]+1}-{scale_bins[1]}",
f"{scale_bins[1]+1}-{scale_bins[2]}", f">{scale_bins[2]}"]
bar_colors = [scale_colors[i] if i != scale_idx else "#FF0000" for i in range(4)]
y_pos = np.arange(4)
bars = ax_scale.barh(y_pos, [scale_bins[0], scale_bins[1]-scale_bins[0],
scale_bins[2]-scale_bins[1], scale_bins[2]],
color=scale_colors, edgecolor="black", linewidth=1.5, left=[0, scale_bins[0], scale_bins[1], scale_bins[2]])
# Highlight selected bin
ax_scale.barh(scale_idx, bars[scale_idx].get_width(),
left=bars[scale_idx].get_x(), color="#FF0000", edgecolor="black", linewidth=2)
ax_scale.set_yticks(y_pos)
ax_scale.set_yticklabels([f"{scale_labels[i]}\n({scale_ranges[i]})" for i in range(4)],
fontsize=STYLE["font_label"])
ax_scale.set_xlabel("Voxels", fontsize=STYLE["font_label"])
ax_scale.spines["top"].set_visible(False)
ax_scale.spines["right"].set_visible(False)
# Mark current value
ax_scale.axvline(x=total_voxels, color="red", linestyle="--", linewidth=2, label=f"Current: {total_voxels}")
ax_scale.legend(loc="upper right", fontsize=STYLE["font_label"])
ax_scale.set_title(f"SCALE_BIN = {pred_morph['scale_label']}",
fontsize=STYLE["font_subtitle"], fontweight="bold", color="red")
# Add mapping explanation
fig3.text(0.5, 0.02,
f"Mapping: {total_voxels} voxels → SCALE_BIN = {pred_morph['scale_label']} "
f"(thresholds: ≤{scale_bins[0]}=Tiny, ≤{scale_bins[1]}=Small, ≤{scale_bins[2]}=Med, >{scale_bins[2]}=Large)",
ha="center", fontsize=STYLE["font_label"], style="italic")
fig3.suptitle(f"Scale Analysis: {case_id}", fontsize=STYLE["font_title"], fontweight="bold")
fig3.tight_layout(rect=[0, 0.06, 1, 0.92])
save_figure(fig3, out_dir, f"Fig6_{letter}3_scale_{case_id}")
# ============================================================================
# I) Dual-domain Enhancement (Spec I)
# ============================================================================
def make_dual_domain(cfg: Dict, case_loader: CaseLoader, aux: AuxCache, runner: Optional[ModelRunner], out_dir: str) -> None:
"""
Dual-domain enhancement visualization.
Left: Original amplitude spectrum, Right: Enhanced spectrum (same scale)
"""
cases = cfg.get("cases", {}).get("dual_domain", [])
if not cases:
return
for case_idx, case_id in enumerate(cases):
case = case_loader.get_case(case_id, include_label=False)
images = case["images"]
ref_mod = "t1c" if "t1c" in images else list(images.keys())[0]
base = images[ref_mod]
aux_data = aux.load(case_id)
needed_keys = ["x_spec"]
if aux_data is None or not all(k in aux_data for k in needed_keys):
if runner is None:
continue
image, _ = runner.load_case_tensor(case_id)
out = runner.forward_intermediate(image)
new_data = {
"x_spec": out["x_spec"].detach().cpu().numpy(),
}
if aux_data is not None:
aux_data.update(new_data)
else:
aux_data = new_data
aux.save(case_id, aux_data)
if aux_data is None:
continue
x_spec = aux_data["x_spec"][0]
# Compute amplitude spectra
amp_orig = fft_amplitude_slice(base, plane="axial")
amp_spec = fft_amplitude_slice(x_spec[0] if x_spec.ndim == 4 else x_spec, plane="axial")
# Use same scale for both
vmin = min(amp_orig.min(), amp_spec.min())
vmax = max(amp_orig.max(), amp_spec.max())
fig = plt.figure(figsize=(12, 5))
gs = fig.add_gridspec(1, 3, width_ratios=[1, 1, 0.05], wspace=0.15)
ax0 = fig.add_subplot(gs[0])
im = ax0.imshow(amp_orig, cmap="inferno", vmin=vmin, vmax=vmax, aspect="equal")
ax0.set_title("Original Amplitude Spectrum", fontsize=STYLE["font_subtitle"], fontweight="bold")
ax0.axis("off")
ax1 = fig.add_subplot(gs[1])
ax1.imshow(amp_spec, cmap="inferno", vmin=vmin, vmax=vmax, aspect="equal")
ax1.set_title("Enhanced Amplitude Spectrum", fontsize=STYLE["font_subtitle"], fontweight="bold")
ax1.axis("off")
# Colorbar in dedicated subplot
ax_cbar = fig.add_subplot(gs[2])
cbar = fig.colorbar(im, cax=ax_cbar)
cbar.set_label("Log Amplitude", fontsize=STYLE["font_label"])
fig.suptitle(f"Dual-domain Enhancement: {case_id}", fontsize=STYLE["font_title"], fontweight="bold", y=0.98)
fig.tight_layout(rect=[0, 0, 1, 0.93])
save_figure(fig, out_dir, f"Fig7_{chr(ord('a')+case_idx)}_dual_domain_{case_id}")
# ============================================================================
# J) AmpMix Augmentation Robustness (Spec J)
# ============================================================================
def make_ampmix(cfg: Dict, case_loader: CaseLoader, runner: Optional[ModelRunner], out_dir: str) -> None:
"""
AmpMix augmentation robustness visualization.
Three columns: Original | AmpMix | Prediction comparison
"""
pairs = cfg.get("cases", {}).get("ampmix", [])
if not pairs or runner is None:
return
for pair_idx, pair in enumerate(pairs):
case_a = pair.get("base")
case_b = pair.get("mix")
lam = float(pair.get("lam", 0.5))
if not case_a or not case_b:
continue
img_a, _ = runner.load_case_tensor(case_a)
img_b, _ = runner.load_case_tensor(case_b)
mixed = fourier_amplitude_mix(img_a[0].cpu().numpy(), img_b[0].cpu().numpy(), lam)
mixed_t = runner.torch.from_numpy(mixed).unsqueeze(0).to(runner.device)
# Get predictions
with runner.torch.no_grad():
logits_a, _ = runner.model(img_a.to(runner.device))
logits_m, _ = runner.model(mixed_t)
pred_a = (logits_a.sigmoid() > 0.5).detach().cpu().numpy()[0]
pred_m = (logits_m.sigmoid() > 0.5).detach().cpu().numpy()[0]
case = case_loader.get_case(case_a, include_label=False)
images = case["images"]
ref_mod = "t1c" if "t1c" in images else list(images.keys())[0]
base = images[ref_mod]
idx = get_slices(pred_a[2] > 0, base.shape)
plane = "axial"
slice_idx = idx[plane]
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
base2d = extract_slice(base, plane, slice_idx)
mix2d = extract_slice(normalize_volume(mixed[0]), plane, slice_idx)
# Original
ax = axes[0]
masks_a = {
"WT": extract_slice(pred_a[0], plane, slice_idx) > 0,
"TC": extract_slice(pred_a[1], plane, slice_idx) > 0,
"ET": extract_slice(pred_a[2], plane, slice_idx) > 0,
}
overlay_a = overlay_masks_publication(base2d, masks_a)
ax.imshow(overlay_a, aspect="equal")
ax.set_title("Original + Prediction", fontsize=STYLE["font_subtitle"], fontweight="bold")
ax.axis("off")
# AmpMix image
ax = axes[1]
ax.imshow(mix2d, cmap="gray", aspect="equal")
ax.set_title(f"AmpMix (λ={lam})", fontsize=STYLE["font_subtitle"], fontweight="bold")
ax.axis("off")
# AmpMix prediction
ax = axes[2]
masks_m = {
"WT": extract_slice(pred_m[0], plane, slice_idx) > 0,
"TC": extract_slice(pred_m[1], plane, slice_idx) > 0,
"ET": extract_slice(pred_m[2], plane, slice_idx) > 0,
}
overlay_m = overlay_masks_publication(mix2d, masks_m)
ax.imshow(overlay_m, aspect="equal")
ax.set_title("AmpMix + Prediction", fontsize=STYLE["font_subtitle"], fontweight="bold")
ax.axis("off")
finalize_figure(fig, f"Augmentation Robustness: {case_a}")
save_figure(fig, out_dir, f"Fig8_{chr(ord('a')+pair_idx)}_ampmix_{case_a}")
# ============================================================================
# K) Failure Cases (Spec K)
# ============================================================================
def make_failure(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None:
"""
Failure case visualization with red boxes highlighting problem areas.
"""
cases = cfg.get("cases", {}).get("failure", [])
notes = cfg.get("cases", {}).get("failure_notes", {})
if not cases:
return
for case_idx, case_id in enumerate(cases):
case = case_loader.get_case(case_id, include_label=True)
images = case["images"]
ref_mod = "t1c" if "t1c" in images else list(images.keys())[0]
base = images[ref_mod]
gt_regions = label_to_regions(case["label"]) if case["label"] is not None else None
pred = pred_loader.load_method(pred_loader.ours, case_id)
pred_regions = pred["regions"]
mask_ref = gt_regions[2] if gt_regions is not None else pred_regions[2]
idx = get_slices(mask_ref, base.shape)
plane = "axial"
slice_idx = idx[plane]
fig, axes = plt.subplots(1, 3 if gt_regions is not None else 2, figsize=(10, 4))
base2d = extract_slice(base, plane, slice_idx)
col = 0
# Base image
ax = axes[col]
ax.imshow(base2d, cmap="gray", aspect="equal")
ax.set_title("Input", fontsize=STYLE["font_subtitle"], fontweight="bold")
ax.axis("off")
col += 1
# GT
if gt_regions is not None:
ax = axes[col]
gt_masks = {
"WT": extract_slice(gt_regions[0], plane, slice_idx) > 0,
"TC": extract_slice(gt_regions[1], plane, slice_idx) > 0,
"ET": extract_slice(gt_regions[2], plane, slice_idx) > 0,
}
overlay_gt = overlay_masks_publication(base2d, gt_masks)
ax.imshow(overlay_gt, aspect="equal")
ax.set_title("Ground Truth", fontsize=STYLE["font_subtitle"], fontweight="bold")
ax.axis("off")
col += 1
# Prediction with failure region highlighted
ax = axes[col]
pred_masks = {
"WT": extract_slice(pred_regions[0], plane, slice_idx) > 0,
"TC": extract_slice(pred_regions[1], plane, slice_idx) > 0,
"ET": extract_slice(pred_regions[2], plane, slice_idx) > 0,
}
# Resize if needed
for key in pred_masks:
if pred_masks[key].shape != base2d.shape:
zoom_factors = (base2d.shape[0] / pred_masks[key].shape[0], base2d.shape[1] / pred_masks[key].shape[1])
pred_masks[key] = zoom(pred_masks[key].astype(float), zoom_factors, order=0) > 0.5
overlay_pred = overlay_masks_publication(base2d, pred_masks)
ax.imshow(overlay_pred, aspect="equal")
# Find error region and add red box
if gt_regions is not None:
gt_et = extract_slice(gt_regions[2], plane, slice_idx) > 0
pred_et = pred_masks["ET"]
error_region = np.logical_xor(gt_et, pred_et)
if error_region.sum() > 0:
coords = np.where(error_region)
y_min, y_max = coords[0].min(), coords[0].max()
x_min, x_max = coords[1].min(), coords[1].max()
margin = 10
rect = Rectangle((x_min - margin, y_min - margin),
x_max - x_min + 2*margin, y_max - y_min + 2*margin,
linewidth=2, edgecolor='red', facecolor='none')
ax.add_patch(rect)
ax.set_title("Prediction", fontsize=STYLE["font_subtitle"], fontweight="bold")
ax.axis("off")
# Add note
note = notes.get(case_id, "")
title = f"Failure Case: {case_id}" + (f"\n{note}" if note else "")
finalize_figure(fig, title)
save_figure(fig, out_dir, f"Fig9_{chr(ord('a')+case_idx)}_failure_{case_id}")
# ============================================================================
# Main
# ============================================================================
def main():
parser = argparse.ArgumentParser(description="Publication-quality visualization suite")
parser.add_argument("--config", required=True, help="Visualization config yaml")
parser.add_argument("--model-config", default=os.path.join(ROOT_DIR, "configs/train.yaml"), help="Model config yaml")
parser.add_argument("--checkpoint", default="", help="Model checkpoint for model-based visualizations")
parser.add_argument("--device", default="cuda")
parser.add_argument("--run", default="all", help="Comma-separated list or 'all'")
args = parser.parse_args()
cfg = load_config(args.config)
out_dir = cfg.get("visualization", {}).get("output_dir", os.path.join(ROOT_DIR, "vis_res"))
ensure_dir(out_dir)
case_loader = CaseLoader(cfg)
pred_loader = PredictionLoader(cfg)
aux_cache = AuxCache(cfg.get("predictions", {}).get("aux_dir"))
runner = ModelRunner(cfg, args.model_config, args.checkpoint, args.device) if args.checkpoint else None
run_set = set([s.strip() for s in args.run.split(",")]) if args.run != "all" else None
def should_run(name: str) -> bool:
return run_set is None or name in run_set
print("=" * 60)
print("Publication-Quality Visualization Suite")
print("=" * 60)
if should_run("qualitative"):
print("\n[B] Generating qualitative comparison...")
make_qualitative(cfg, case_loader, pred_loader, out_dir)
if should_run("et_absent"):
print("\n[C] Generating ET-absent case study...")
make_et_absent(cfg, case_loader, aux_cache, runner, out_dir)
if should_run("boundary"):
print("\n[D] Generating boundary error visualization...")
make_boundary(cfg, case_loader, pred_loader, out_dir)
if should_run("tiny_et"):
print("\n[E] Generating tiny/fragmented ET visualization...")
make_tiny_et(cfg, case_loader, pred_loader, out_dir)
if should_run("moe"):
print("\n[G] Generating MoE routing visualization...")
make_moe_routing(cfg, case_loader, aux_cache, runner, out_dir)
if should_run("concept_tokens"):
print("\n[H] Generating concept token visualization...")
make_concept_tokens(cfg, case_loader, pred_loader, out_dir)
if should_run("dual_domain"):
print("\n[I] Generating dual-domain enhancement visualization...")
make_dual_domain(cfg, case_loader, aux_cache, runner, out_dir)
if should_run("ampmix"):
print("\n[J] Generating AmpMix robustness visualization...")
make_ampmix(cfg, case_loader, runner, out_dir)
if should_run("failure"):
print("\n[K] Generating failure case visualization...")
make_failure(cfg, case_loader, pred_loader, out_dir)
print("\n" + "=" * 60)
print(f"All visualizations saved to: {out_dir}")
print("=" * 60)
if __name__ == "__main__":
main()