ChipYTY's picture
Add files using upload-large-folder tool
fe8202e verified
import argparse
import os
import sys
from types import SimpleNamespace
from typing import Any
import yaml
import numpy as np
import torch
import nibabel as nib
from torch.utils.data import DataLoader
# Avoid heavy MONAI import side effects.
os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1")
from monai.inferers import sliding_window_inference
sys.path.append(os.path.join(os.path.dirname(__file__), "src"))
from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE
from gliomasam3_moe.data.brats_dataset import BraTSDataset, SegMambaNPZDataset
from gliomasam3_moe.data.transforms_segmamba_like import get_infer_transforms
from gliomasam3_moe.utils.brats_regions import regions_to_label
from gliomasam3_moe.utils.postprocess import remove_small_components
def _to_namespace(obj: Any):
if isinstance(obj, dict):
return SimpleNamespace(**{k: _to_namespace(v) for k, v in obj.items()})
return obj
def load_config(path: str) -> SimpleNamespace:
with open(path, "r") as f:
cfg = yaml.safe_load(f)
return _to_namespace(cfg)
def _get_affine(meta_dict):
if meta_dict is None:
return np.eye(4)
affine = meta_dict.get("affine", None)
if isinstance(affine, torch.Tensor):
affine = affine.detach().cpu().numpy()
if isinstance(affine, np.ndarray) and affine.ndim == 3:
affine = affine[0]
if affine is None:
affine = np.eye(4)
return affine
def save_nifti(path: str, arr: np.ndarray, affine: np.ndarray):
img = nib.Nifti1Image(arr, affine)
nib.save(img, path)
def save_segmamba_3c(path: str, arr_3c: np.ndarray, affine: np.ndarray | None = None):
"""Save 3-channel mask for SegMamba metrics.
Expected input: [3, D, H, W], saved as 4D NIfTI (D,H,W,3).
"""
if affine is None:
affine = np.eye(4)
if arr_3c.ndim != 4 or arr_3c.shape[0] != 3:
raise ValueError(f"expected (3,D,H,W), got {arr_3c.shape}")
arr = arr_3c.transpose(1, 2, 3, 0) # (D,H,W,3)
save_nifti(path, arr.astype(np.uint8), affine)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="configs/train.yaml")
parser.add_argument("--input", type=str, required=True, help="Case folder or root folder.")
parser.add_argument("--output", type=str, default="./prediction_results/segmamba")
parser.add_argument("--checkpoint", type=str, required=True)
args = parser.parse_args()
cfg = load_config(args.config)
device = torch.device(cfg.device if torch.cuda.is_available() else "cpu")
model = GliomaSAM3_MoE(**cfg.model.__dict__).to(device)
ckpt = torch.load(args.checkpoint, map_location="cpu")
model.load_state_dict(ckpt["model"], strict=True)
model.eval()
data_format = getattr(cfg.data, "format", "nifti")
input_path = args.input
if data_format == "segmamba_npz":
if not os.path.isdir(input_path):
raise ValueError("Input must be a directory containing *.npz files.")
ensure_npy = bool(getattr(cfg.data, "segmamba_unpack", True))
dataset = SegMambaNPZDataset(
data_dir=input_path,
test=True,
ensure_npy=ensure_npy,
map_et_to_4=True,
)
loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
else:
if os.path.isdir(input_path):
has_modalities = any(
os.path.isfile(os.path.join(input_path, m + ".nii.gz")) or os.path.isfile(os.path.join(input_path, m + ".nii"))
for m in cfg.data.modalities
)
if has_modalities:
root_dir = os.path.dirname(input_path)
case_ids = [os.path.basename(input_path)]
else:
root_dir = input_path
case_ids = None
else:
raise ValueError("Input must be a directory.")
image_keys = [f"image{i}" for i in range(len(cfg.data.modalities))]
transforms = get_infer_transforms(cfg, image_keys=image_keys)
dataset = BraTSDataset(
root_dir=root_dir,
modalities=cfg.data.modalities,
seg_name=cfg.data.seg_name,
transforms=transforms,
include_label=False,
case_ids=case_ids,
image_keys=image_keys,
)
loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
os.makedirs(args.output, exist_ok=True)
with torch.no_grad():
for batch in loader:
image = batch["image"].to(device)
case_id = batch["case_id"][0] if isinstance(batch["case_id"], (list, tuple)) else batch["case_id"]
# Sliding window for logits only (aux is computed from full pass).
logits = sliding_window_inference(
inputs=image,
roi_size=tuple(cfg.infer.roi_size),
sw_batch_size=cfg.infer.sw_batch_size,
predictor=lambda x: model(x)[0],
overlap=cfg.infer.overlap,
)
_, aux = model(image)
probs = torch.sigmoid(logits)
pi_et = aux["pi_et"].view(probs.shape[0], 1, 1, 1, 1)
probs[:, 2:3] = probs[:, 2:3] * pi_et
regions_bin = (probs > cfg.infer.threshold).float()
# ET postprocess (remove small components)
et_pp = remove_small_components(regions_bin[:, 2], cfg.infer.et_cc_min_size)
regions_bin[:, 2] = et_pp
label_map = regions_to_label(regions_bin)
meta = batch.get("image_meta_dict", None)
affine = _get_affine(meta)
prob_np = probs[0].detach().cpu().numpy().transpose(1, 2, 3, 0) # (D,H,W,3)
bin_np = regions_bin[0].detach().cpu().numpy().transpose(1, 2, 3, 0)
lbl_np = label_map[0, 0].detach().cpu().numpy().astype(np.int16)
save_nifti(os.path.join(args.output, f"{case_id}_regions_prob.nii.gz"), prob_np, affine)
save_nifti(os.path.join(args.output, f"{case_id}_regions_bin.nii.gz"), bin_np, affine)
save_nifti(os.path.join(args.output, f"{case_id}_label.nii.gz"), lbl_np, affine)
if data_format == "segmamba_npz":
seg_path = os.path.join(args.output, f"{case_id}.nii.gz")
seg_arr = regions_bin[0].detach().cpu().numpy().astype(np.uint8)
save_segmamba_3c(seg_path, seg_arr, affine)
if __name__ == "__main__":
main()