File size: 6,471 Bytes
fe8202e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import argparse
import os
import sys
from types import SimpleNamespace
from typing import Any

import yaml
import numpy as np
import torch
import nibabel as nib
from torch.utils.data import DataLoader

# Avoid heavy MONAI import side effects.
os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1")
from monai.inferers import sliding_window_inference

sys.path.append(os.path.join(os.path.dirname(__file__), "src"))

from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE
from gliomasam3_moe.data.brats_dataset import BraTSDataset, SegMambaNPZDataset
from gliomasam3_moe.data.transforms_segmamba_like import get_infer_transforms
from gliomasam3_moe.utils.brats_regions import regions_to_label
from gliomasam3_moe.utils.postprocess import remove_small_components


def _to_namespace(obj: Any):
    if isinstance(obj, dict):
        return SimpleNamespace(**{k: _to_namespace(v) for k, v in obj.items()})
    return obj


def load_config(path: str) -> SimpleNamespace:
    with open(path, "r") as f:
        cfg = yaml.safe_load(f)
    return _to_namespace(cfg)


def _get_affine(meta_dict):
    if meta_dict is None:
        return np.eye(4)
    affine = meta_dict.get("affine", None)
    if isinstance(affine, torch.Tensor):
        affine = affine.detach().cpu().numpy()
    if isinstance(affine, np.ndarray) and affine.ndim == 3:
        affine = affine[0]
    if affine is None:
        affine = np.eye(4)
    return affine


def save_nifti(path: str, arr: np.ndarray, affine: np.ndarray):
    img = nib.Nifti1Image(arr, affine)
    nib.save(img, path)


def save_segmamba_3c(path: str, arr_3c: np.ndarray, affine: np.ndarray | None = None):
    """Save 3-channel mask for SegMamba metrics.

    Expected input: [3, D, H, W], saved as 4D NIfTI (D,H,W,3).
    """
    if affine is None:
        affine = np.eye(4)
    if arr_3c.ndim != 4 or arr_3c.shape[0] != 3:
        raise ValueError(f"expected (3,D,H,W), got {arr_3c.shape}")
    arr = arr_3c.transpose(1, 2, 3, 0)  # (D,H,W,3)
    save_nifti(path, arr.astype(np.uint8), affine)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", type=str, default="configs/train.yaml")
    parser.add_argument("--input", type=str, required=True, help="Case folder or root folder.")
    parser.add_argument("--output", type=str, default="./prediction_results/segmamba")
    parser.add_argument("--checkpoint", type=str, required=True)
    args = parser.parse_args()

    cfg = load_config(args.config)
    device = torch.device(cfg.device if torch.cuda.is_available() else "cpu")

    model = GliomaSAM3_MoE(**cfg.model.__dict__).to(device)
    ckpt = torch.load(args.checkpoint, map_location="cpu")
    model.load_state_dict(ckpt["model"], strict=True)
    model.eval()
    data_format = getattr(cfg.data, "format", "nifti")

    input_path = args.input
    if data_format == "segmamba_npz":
        if not os.path.isdir(input_path):
            raise ValueError("Input must be a directory containing *.npz files.")
        ensure_npy = bool(getattr(cfg.data, "segmamba_unpack", True))
        dataset = SegMambaNPZDataset(
            data_dir=input_path,
            test=True,
            ensure_npy=ensure_npy,
            map_et_to_4=True,
        )
        loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
    else:
        if os.path.isdir(input_path):
            has_modalities = any(
                os.path.isfile(os.path.join(input_path, m + ".nii.gz")) or os.path.isfile(os.path.join(input_path, m + ".nii"))
                for m in cfg.data.modalities
            )
            if has_modalities:
                root_dir = os.path.dirname(input_path)
                case_ids = [os.path.basename(input_path)]
            else:
                root_dir = input_path
                case_ids = None
        else:
            raise ValueError("Input must be a directory.")

        image_keys = [f"image{i}" for i in range(len(cfg.data.modalities))]
        transforms = get_infer_transforms(cfg, image_keys=image_keys)
        dataset = BraTSDataset(
            root_dir=root_dir,
            modalities=cfg.data.modalities,
            seg_name=cfg.data.seg_name,
            transforms=transforms,
            include_label=False,
            case_ids=case_ids,
            image_keys=image_keys,
        )
        loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)

    os.makedirs(args.output, exist_ok=True)
    with torch.no_grad():
        for batch in loader:
            image = batch["image"].to(device)
            case_id = batch["case_id"][0] if isinstance(batch["case_id"], (list, tuple)) else batch["case_id"]

            # Sliding window for logits only (aux is computed from full pass).
            logits = sliding_window_inference(
                inputs=image,
                roi_size=tuple(cfg.infer.roi_size),
                sw_batch_size=cfg.infer.sw_batch_size,
                predictor=lambda x: model(x)[0],
                overlap=cfg.infer.overlap,
            )
            _, aux = model(image)
            probs = torch.sigmoid(logits)
            pi_et = aux["pi_et"].view(probs.shape[0], 1, 1, 1, 1)
            probs[:, 2:3] = probs[:, 2:3] * pi_et
            regions_bin = (probs > cfg.infer.threshold).float()

            # ET postprocess (remove small components)
            et_pp = remove_small_components(regions_bin[:, 2], cfg.infer.et_cc_min_size)
            regions_bin[:, 2] = et_pp

            label_map = regions_to_label(regions_bin)

            meta = batch.get("image_meta_dict", None)
            affine = _get_affine(meta)
            prob_np = probs[0].detach().cpu().numpy().transpose(1, 2, 3, 0)  # (D,H,W,3)
            bin_np = regions_bin[0].detach().cpu().numpy().transpose(1, 2, 3, 0)
            lbl_np = label_map[0, 0].detach().cpu().numpy().astype(np.int16)

            save_nifti(os.path.join(args.output, f"{case_id}_regions_prob.nii.gz"), prob_np, affine)
            save_nifti(os.path.join(args.output, f"{case_id}_regions_bin.nii.gz"), bin_np, affine)
            save_nifti(os.path.join(args.output, f"{case_id}_label.nii.gz"), lbl_np, affine)

            if data_format == "segmamba_npz":
                seg_path = os.path.join(args.output, f"{case_id}.nii.gz")
                seg_arr = regions_bin[0].detach().cpu().numpy().astype(np.uint8)
                save_segmamba_3c(seg_path, seg_arr, affine)


if __name__ == "__main__":
    main()