|
|
import argparse |
|
|
import os |
|
|
import sys |
|
|
import warnings |
|
|
from types import SimpleNamespace |
|
|
from typing import Any, Dict, List, Sequence |
|
|
|
|
|
import yaml |
|
|
import torch |
|
|
import torch.nn.functional as F |
|
|
from torch.utils.data import DataLoader |
|
|
from torch.cuda.amp import autocast, GradScaler |
|
|
from tqdm import tqdm |
|
|
|
|
|
sys.path.append(os.path.join(os.path.dirname(__file__), "src")) |
|
|
|
|
|
from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE |
|
|
from gliomasam3_moe.losses.brats_losses import LossComputer |
|
|
from gliomasam3_moe.data.brats_dataset import BraTSDataset, SyntheticBraTSDataset, SegMambaNPZDataset, split_npz_paths |
|
|
from gliomasam3_moe.data.transforms_segmamba_like import ( |
|
|
get_train_transforms, |
|
|
get_synthetic_transforms, |
|
|
get_npz_train_transforms, |
|
|
) |
|
|
from gliomasam3_moe.utils.brats_regions import label_to_regions |
|
|
from gliomasam3_moe.utils.postprocess import remove_small_components |
|
|
from gliomasam3_moe.utils.seed import set_seed |
|
|
|
|
|
|
|
|
def _to_namespace(obj: Any): |
|
|
if isinstance(obj, dict): |
|
|
return SimpleNamespace(**{k: _to_namespace(v) for k, v in obj.items()}) |
|
|
return obj |
|
|
|
|
|
|
|
|
def _to_dict(obj: Any): |
|
|
if isinstance(obj, SimpleNamespace): |
|
|
return {k: _to_dict(v) for k, v in obj.__dict__.items()} |
|
|
return obj |
|
|
|
|
|
|
|
|
def load_config(path: str) -> SimpleNamespace: |
|
|
with open(path, "r") as f: |
|
|
cfg = yaml.safe_load(f) |
|
|
return _to_namespace(cfg) |
|
|
|
|
|
|
|
|
def str2bool(v: str) -> bool: |
|
|
return str(v).lower() in {"1", "true", "yes", "y"} |
|
|
|
|
|
|
|
|
def fourier_amplitude_mix(x: torch.Tensor, p: float = 0.0) -> torch.Tensor: |
|
|
"""Fourier amplitude mixing (keep phase, mix amplitude within batch).""" |
|
|
if p <= 0 or x.shape[0] < 2: |
|
|
return x |
|
|
b = x.shape[0] |
|
|
device = x.device |
|
|
mask = torch.rand(b, device=device) < p |
|
|
if mask.sum() == 0: |
|
|
return x |
|
|
perm = torch.randperm(b, device=device) |
|
|
x_fft = torch.fft.fftn(x, dim=(2, 3, 4)) |
|
|
x_fft_perm = x_fft[perm] |
|
|
amp = torch.abs(x_fft) |
|
|
amp_perm = torch.abs(x_fft_perm) |
|
|
phase = torch.angle(x_fft) |
|
|
lam = torch.rand(b, device=device).view(b, 1, 1, 1, 1) |
|
|
amp_mix = amp * (1.0 - lam) + amp_perm * lam |
|
|
x_fft_mix = amp_mix * torch.exp(1j * phase) |
|
|
x_mix = torch.fft.ifftn(x_fft_mix, dim=(2, 3, 4)).real |
|
|
x_out = x.clone() |
|
|
x_out[mask] = x_mix[mask] |
|
|
return x_out |
|
|
|
|
|
|
|
|
def compute_dice(pred: torch.Tensor, gt: torch.Tensor, eps: float = 1e-5) -> torch.Tensor: |
|
|
"""Compute Dice per-channel for [WT,TC,ET].""" |
|
|
dims = (0, 2, 3, 4) |
|
|
inter = (pred * gt).sum(dims) |
|
|
union = pred.sum(dims) + gt.sum(dims) |
|
|
dice = (2.0 * inter + eps) / (union + eps) |
|
|
return dice |
|
|
|
|
|
|
|
|
def _gaussian_weight(roi_size: Sequence[int], device: torch.device) -> torch.Tensor: |
|
|
sigmas = [s * 0.125 for s in roi_size] |
|
|
grids = [] |
|
|
for size, sigma in zip(roi_size, sigmas): |
|
|
center = (size - 1) / 2.0 |
|
|
x = torch.arange(size, device=device, dtype=torch.float32) |
|
|
grids.append(torch.exp(-0.5 * ((x - center) / max(sigma, 1e-6)) ** 2)) |
|
|
w = grids[0][:, None, None] * grids[1][None, :, None] * grids[2][None, None, :] |
|
|
w = w / w.max() |
|
|
return w |
|
|
|
|
|
|
|
|
def sliding_window_inference_3d( |
|
|
inputs: torch.Tensor, |
|
|
roi_size: Sequence[int], |
|
|
overlap: float, |
|
|
predictor, |
|
|
) -> torch.Tensor: |
|
|
"""Simple 3D sliding window inference with gaussian weighting. |
|
|
|
|
|
Returns blended probabilities (after sigmoid). If the predictor returns |
|
|
(logits, aux) and aux contains "pi_et", ET probabilities are gated. |
|
|
""" |
|
|
b, c, d, h, w = inputs.shape |
|
|
if b != 1: |
|
|
raise ValueError("sliding_window_inference_3d expects batch size 1.") |
|
|
rz, ry, rx = [int(x) for x in roi_size] |
|
|
pad_d = max(0, rz - d) |
|
|
pad_h = max(0, ry - h) |
|
|
pad_w = max(0, rx - w) |
|
|
if pad_d > 0 or pad_h > 0 or pad_w > 0: |
|
|
pd0, pd1 = pad_d // 2, pad_d - pad_d // 2 |
|
|
ph0, ph1 = pad_h // 2, pad_h - pad_h // 2 |
|
|
pw0, pw1 = pad_w // 2, pad_w - pad_w // 2 |
|
|
inputs = F.pad(inputs, (pw0, pw1, ph0, ph1, pd0, pd1)) |
|
|
d, h, w = inputs.shape[-3:] |
|
|
sz = max(1, int(rz * (1.0 - overlap))) |
|
|
sy = max(1, int(ry * (1.0 - overlap))) |
|
|
sx = max(1, int(rx * (1.0 - overlap))) |
|
|
|
|
|
def _starts(dim, roi, step): |
|
|
if dim <= roi: |
|
|
return [0] |
|
|
starts = list(range(0, dim - roi + 1, step)) |
|
|
if starts[-1] != dim - roi: |
|
|
starts.append(dim - roi) |
|
|
return starts |
|
|
|
|
|
zs = _starts(d, rz, sz) |
|
|
ys = _starts(h, ry, sy) |
|
|
xs = _starts(w, rx, sx) |
|
|
|
|
|
weight = _gaussian_weight((rz, ry, rx), inputs.device)[None, None] |
|
|
out = torch.zeros((1, 3, d, h, w), device=inputs.device, dtype=torch.float32) |
|
|
count = torch.zeros((1, 1, d, h, w), device=inputs.device, dtype=torch.float32) |
|
|
|
|
|
for z in zs: |
|
|
for y in ys: |
|
|
for x in xs: |
|
|
patch = inputs[:, :, z : z + rz, y : y + ry, x : x + rx] |
|
|
pred = predictor(patch) |
|
|
aux = None |
|
|
if isinstance(pred, (tuple, list)): |
|
|
logits = pred[0] |
|
|
if len(pred) > 1: |
|
|
aux = pred[1] |
|
|
else: |
|
|
logits = pred |
|
|
probs = torch.sigmoid(logits) |
|
|
if isinstance(aux, dict) and "pi_et" in aux: |
|
|
pi_et = aux["pi_et"].view(probs.shape[0], 1, 1, 1, 1) |
|
|
probs[:, 2:3] = probs[:, 2:3] * pi_et |
|
|
out[:, :, z : z + rz, y : y + ry, x : x + rx] += probs * weight |
|
|
count[:, :, z : z + rz, y : y + ry, x : x + rx] += weight |
|
|
|
|
|
out = out / count.clamp(min=1e-6) |
|
|
if pad_d > 0 or pad_h > 0 or pad_w > 0: |
|
|
out = out[:, :, pd0 : pd0 + (d - pad_d), ph0 : ph0 + (h - pad_h), pw0 : pw0 + (w - pad_w)] |
|
|
return out |
|
|
|
|
|
|
|
|
def _compute_hd95(pred: torch.Tensor, gt: torch.Tensor, spacing: Sequence[float]) -> List[float]: |
|
|
try: |
|
|
from medpy import metric |
|
|
except Exception: |
|
|
return [float("nan")] * 3 |
|
|
|
|
|
pred_np = pred.detach().cpu().numpy() |
|
|
gt_np = gt.detach().cpu().numpy() |
|
|
out = [] |
|
|
for c in range(3): |
|
|
p = pred_np[c] > 0 |
|
|
g = gt_np[c] > 0 |
|
|
if p.sum() > 0 and g.sum() > 0: |
|
|
out.append(metric.binary.hd95(p, g, voxelspacing=spacing)) |
|
|
else: |
|
|
out.append(50.0) |
|
|
return out |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def evaluate_test( |
|
|
model: torch.nn.Module, |
|
|
loader: DataLoader, |
|
|
cfg, |
|
|
device: torch.device, |
|
|
) -> Dict[str, List[float]]: |
|
|
model.eval() |
|
|
roi = getattr(cfg.infer, "roi_size", cfg.data.crop_size) |
|
|
overlap = float(getattr(cfg.infer, "overlap", 0.5)) |
|
|
threshold = float(getattr(cfg.infer, "threshold", 0.5)) |
|
|
et_min = int(getattr(cfg.infer, "et_cc_min_size", 0)) |
|
|
spacing = getattr(cfg.data, "spacing", [1.0, 1.0, 1.0]) |
|
|
|
|
|
dice_all: List[List[float]] = [] |
|
|
hd95_all: List[List[float]] = [] |
|
|
|
|
|
max_cases = int(getattr(cfg.train, "test_max_cases", 0)) |
|
|
for idx, batch in enumerate(loader): |
|
|
if max_cases and idx >= max_cases: |
|
|
break |
|
|
image = batch["image"].to(device) |
|
|
label = batch["label"].to(device) |
|
|
|
|
|
probs = sliding_window_inference_3d( |
|
|
inputs=image, |
|
|
roi_size=roi, |
|
|
overlap=overlap, |
|
|
predictor=lambda x: model(x), |
|
|
) |
|
|
pred = (probs > threshold).float() |
|
|
if et_min > 0: |
|
|
pred[:, 2] = remove_small_components(pred[:, 2], et_min) |
|
|
|
|
|
gt = label_to_regions(label) |
|
|
dice = compute_dice(pred, gt).detach().cpu().tolist() |
|
|
hd95 = _compute_hd95(pred[0], gt[0], spacing) |
|
|
dice_all.append(dice) |
|
|
hd95_all.append(hd95) |
|
|
|
|
|
mean_dice = torch.tensor(dice_all).mean(dim=0).tolist() |
|
|
mean_hd95 = torch.tensor(hd95_all).mean(dim=0).tolist() |
|
|
return {"dice": mean_dice, "hd95": mean_hd95} |
|
|
|
|
|
|
|
|
def save_checkpoint(path: str, model: torch.nn.Module, optimizer: torch.optim.Optimizer, cfg: SimpleNamespace, step: int): |
|
|
os.makedirs(os.path.dirname(path), exist_ok=True) |
|
|
torch.save( |
|
|
{ |
|
|
"model": model.state_dict(), |
|
|
"optimizer": optimizer.state_dict(), |
|
|
"config": _to_dict(cfg), |
|
|
"step": step, |
|
|
}, |
|
|
path, |
|
|
) |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument("--config", type=str, default="configs/debug.yaml") |
|
|
parser.add_argument("--synthetic", type=str, default=None, help="Override synthetic flag.") |
|
|
args = parser.parse_args() |
|
|
|
|
|
cfg = load_config(args.config) |
|
|
if args.synthetic is not None: |
|
|
cfg.synthetic = str2bool(args.synthetic) |
|
|
|
|
|
|
|
|
warnings.filterwarnings("ignore", category=FutureWarning, message=".*GradScaler.*") |
|
|
warnings.filterwarnings("ignore", category=FutureWarning, message=".*autocast.*") |
|
|
warnings.filterwarnings("ignore", category=UserWarning, message="The given NumPy array is not writable.*") |
|
|
|
|
|
set_seed(cfg.seed) |
|
|
device = torch.device(cfg.device if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
if cfg.synthetic: |
|
|
transforms = get_synthetic_transforms(cfg) |
|
|
dataset = SyntheticBraTSDataset( |
|
|
num_cases=cfg.data.synthetic_cases, |
|
|
shape=cfg.data.synthetic_shape, |
|
|
transforms=transforms, |
|
|
seed=cfg.seed, |
|
|
) |
|
|
else: |
|
|
data_format = getattr(cfg.data, "format", "nifti") |
|
|
if data_format == "segmamba_npz": |
|
|
train_rate = getattr(cfg.data, "train_rate", 0.7) |
|
|
val_rate = getattr(cfg.data, "val_rate", 0.1) |
|
|
test_rate = getattr(cfg.data, "test_rate", 0.2) |
|
|
train_paths, _, _ = split_npz_paths( |
|
|
cfg.data.root_dir, train_rate=train_rate, val_rate=val_rate, test_rate=test_rate, seed=cfg.seed |
|
|
) |
|
|
ensure_npy = bool(getattr(cfg.data, "segmamba_unpack", True)) |
|
|
transforms = get_npz_train_transforms(cfg) |
|
|
dataset = SegMambaNPZDataset( |
|
|
data_dir=cfg.data.root_dir, |
|
|
npz_paths=train_paths, |
|
|
test=False, |
|
|
ensure_npy=ensure_npy, |
|
|
map_et_to_4=True, |
|
|
transforms=transforms, |
|
|
) |
|
|
loader = DataLoader( |
|
|
dataset, |
|
|
batch_size=cfg.data.batch_size, |
|
|
shuffle=True, |
|
|
num_workers=cfg.train.num_workers, |
|
|
) |
|
|
|
|
|
test_loader = None |
|
|
if not cfg.synthetic and getattr(cfg.train, "test_every_epochs", 0): |
|
|
data_format = getattr(cfg.data, "format", "nifti") |
|
|
if data_format == "segmamba_npz": |
|
|
train_rate = getattr(cfg.data, "train_rate", 0.7) |
|
|
val_rate = getattr(cfg.data, "val_rate", 0.1) |
|
|
test_rate = getattr(cfg.data, "test_rate", 0.2) |
|
|
_, _, test_paths = split_npz_paths( |
|
|
cfg.data.root_dir, train_rate=train_rate, val_rate=val_rate, test_rate=test_rate, seed=cfg.seed |
|
|
) |
|
|
ensure_npy = bool(getattr(cfg.data, "segmamba_unpack", True)) |
|
|
test_ds = SegMambaNPZDataset( |
|
|
data_dir=cfg.data.root_dir, |
|
|
npz_paths=test_paths, |
|
|
test=False, |
|
|
ensure_npy=ensure_npy, |
|
|
map_et_to_4=True, |
|
|
) |
|
|
test_loader = DataLoader( |
|
|
test_ds, |
|
|
batch_size=1, |
|
|
shuffle=False, |
|
|
num_workers=max(0, int(cfg.train.num_workers)), |
|
|
) |
|
|
else: |
|
|
print("[WARN] test_every_epochs is set but only segmamba_npz is supported; skipping test.") |
|
|
|
|
|
model = GliomaSAM3_MoE(**cfg.model.__dict__).to(device) |
|
|
loss_fn = LossComputer(**cfg.loss.__dict__).to(device) |
|
|
optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.train.lr, weight_decay=cfg.train.weight_decay) |
|
|
scaler = GradScaler(enabled=bool(cfg.amp and device.type == "cuda")) |
|
|
|
|
|
model.train() |
|
|
step = 0 |
|
|
for epoch in range(cfg.train.epochs): |
|
|
pbar = tqdm( |
|
|
loader, |
|
|
desc=f"Epoch {epoch + 1}/{cfg.train.epochs}", |
|
|
leave=False, |
|
|
dynamic_ncols=True, |
|
|
) |
|
|
for batch in pbar: |
|
|
step += 1 |
|
|
image = batch["image"].to(device) |
|
|
label = batch["label"].to(device) |
|
|
image = fourier_amplitude_mix(image, p=cfg.train.fourier_mix_prob) |
|
|
|
|
|
optimizer.zero_grad(set_to_none=True) |
|
|
with autocast(enabled=bool(cfg.amp and device.type == "cuda")): |
|
|
logits, aux = model(image, label=label if cfg.train.use_label_prompt else None) |
|
|
loss, logs = loss_fn(logits, aux, label) |
|
|
scaler.scale(loss).backward() |
|
|
scaler.step(optimizer) |
|
|
scaler.update() |
|
|
|
|
|
if step % cfg.train.log_every == 0: |
|
|
with torch.no_grad(): |
|
|
gt = label_to_regions(label) |
|
|
pred = (torch.sigmoid(logits) > 0.5).float() |
|
|
dice = compute_dice(pred, gt).detach().cpu().numpy().tolist() |
|
|
pbar.set_postfix( |
|
|
{ |
|
|
"step": step, |
|
|
"loss": f"{logs['loss_total']:.4f}", |
|
|
"dice": [f"{d:.3f}" for d in dice], |
|
|
} |
|
|
) |
|
|
|
|
|
if step % cfg.train.save_every == 0: |
|
|
ckpt_path = os.path.join(cfg.train.ckpt_dir, f"ckpt_step{step}.pt") |
|
|
save_checkpoint(ckpt_path, model, optimizer, cfg, step) |
|
|
|
|
|
if step >= cfg.train.max_steps: |
|
|
break |
|
|
test_every = int(getattr(cfg.train, "test_every_epochs", 0)) |
|
|
if test_loader is not None and test_every > 0 and (epoch + 1) % test_every == 0: |
|
|
metrics = evaluate_test(model, test_loader, cfg, device) |
|
|
print( |
|
|
f"[TEST] epoch={epoch + 1} dice[WT,TC,ET]={metrics['dice']} " |
|
|
f"hd95[WT,TC,ET]={metrics['hd95']}" |
|
|
) |
|
|
model.train() |
|
|
|
|
|
if step >= cfg.train.max_steps: |
|
|
break |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|