|
|
import argparse |
|
|
import glob |
|
|
import os |
|
|
import re |
|
|
import sys |
|
|
|
|
|
import numpy as np |
|
|
import torch |
|
|
import SimpleITK as sitk |
|
|
|
|
|
|
|
|
os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1") |
|
|
_repo_root = os.path.abspath(os.path.dirname(__file__)) |
|
|
if "" in sys.path: |
|
|
sys.path.remove("") |
|
|
if _repo_root in sys.path: |
|
|
sys.path.remove(_repo_root) |
|
|
import monai |
|
|
sys.path.insert(0, _repo_root) |
|
|
|
|
|
from monai.inferers import SlidingWindowInferer |
|
|
from monai.utils import set_determinism |
|
|
|
|
|
from light_training.dataloading.dataset import MedicalDataset, get_train_val_test_loader_from_train |
|
|
from light_training.evaluation.metric import dice |
|
|
from light_training.prediction import Predictor |
|
|
from light_training.trainer import Trainer |
|
|
|
|
|
set_determinism(123) |
|
|
|
|
|
|
|
|
def _parse_csv_ints(s: str, n: int): |
|
|
parts = [p.strip() for p in str(s).split(",") if p.strip()] |
|
|
if len(parts) != n: |
|
|
raise ValueError(f"expect {n} integers like '128,128,128', got: {s}") |
|
|
return [int(x) for x in parts] |
|
|
|
|
|
|
|
|
def _parse_csv_floats(s: str, n: int): |
|
|
parts = [p.strip() for p in str(s).split(",") if p.strip()] |
|
|
if len(parts) != n: |
|
|
raise ValueError(f"expect {n} floats like '1,1,1', got: {s}") |
|
|
return [float(x) for x in parts] |
|
|
|
|
|
|
|
|
def _find_ckpt_from_logdir(logdir: str, prefer: str = "best") -> str: |
|
|
model_dir = os.path.join(logdir, "model") |
|
|
if not os.path.isdir(model_dir): |
|
|
raise FileNotFoundError(f"model dir not found: {model_dir}") |
|
|
|
|
|
best = sorted(glob.glob(os.path.join(model_dir, "best_model_*.pt"))) |
|
|
final = sorted(glob.glob(os.path.join(model_dir, "final_model_*.pt"))) |
|
|
tmp = sorted(glob.glob(os.path.join(model_dir, "tmp_model_ep*.pt"))) |
|
|
any_pt = sorted(glob.glob(os.path.join(model_dir, "*.pt"))) |
|
|
|
|
|
def pick_by_score(paths): |
|
|
|
|
|
scored = [] |
|
|
for p in paths: |
|
|
m = re.search(r"_(\d+\\.?\\d*)\\.pt$", os.path.basename(p)) |
|
|
if m is None: |
|
|
continue |
|
|
try: |
|
|
scored.append((float(m.group(1)), p)) |
|
|
except ValueError: |
|
|
continue |
|
|
if scored: |
|
|
scored.sort(key=lambda x: x[0], reverse=True) |
|
|
return scored[0][1] |
|
|
return None |
|
|
|
|
|
if prefer == "best": |
|
|
picked = pick_by_score(best) or (best[-1] if best else None) |
|
|
if picked: |
|
|
return picked |
|
|
if prefer in {"best", "final"}: |
|
|
picked = pick_by_score(final) or (final[-1] if final else None) |
|
|
if picked: |
|
|
return picked |
|
|
if prefer in {"best", "final", "latest"}: |
|
|
if tmp: |
|
|
tmp.sort(key=lambda p: os.path.getmtime(p), reverse=True) |
|
|
return tmp[0] |
|
|
if any_pt: |
|
|
any_pt.sort(key=lambda p: os.path.getmtime(p), reverse=True) |
|
|
return any_pt[0] |
|
|
|
|
|
raise FileNotFoundError(f"no checkpoint found under: {model_dir}") |
|
|
|
|
|
|
|
|
class BraTSTrainer(Trainer): |
|
|
def __init__( |
|
|
self, |
|
|
ckpt_path: str, |
|
|
save_path: str, |
|
|
patch_size, |
|
|
sw_batch_size: int = 2, |
|
|
overlap: float = 0.5, |
|
|
mirror_axes=(0, 1, 2), |
|
|
raw_spacing=(1.0, 1.0, 1.0), |
|
|
device="cuda:0", |
|
|
print_dice: bool = False, |
|
|
): |
|
|
super().__init__( |
|
|
env_type="pytorch", |
|
|
max_epochs=1, |
|
|
batch_size=1, |
|
|
device=device, |
|
|
val_every=1, |
|
|
num_gpus=1, |
|
|
logdir="", |
|
|
master_port=17751, |
|
|
training_script=__file__, |
|
|
) |
|
|
|
|
|
self.patch_size = patch_size |
|
|
self.augmentation = False |
|
|
self.print_dice = print_dice |
|
|
self.save_path = save_path |
|
|
self.raw_spacing = raw_spacing |
|
|
|
|
|
from model_segmamba.segmamba import SegMamba |
|
|
|
|
|
self.model = SegMamba( |
|
|
in_chans=4, |
|
|
out_chans=4, |
|
|
depths=[2, 2, 2, 2], |
|
|
feat_size=[48, 96, 192, 384], |
|
|
) |
|
|
self.load_state_dict(ckpt_path, strict=True) |
|
|
self.model.eval() |
|
|
|
|
|
window_infer = SlidingWindowInferer( |
|
|
roi_size=patch_size, |
|
|
sw_batch_size=sw_batch_size, |
|
|
overlap=overlap, |
|
|
progress=True, |
|
|
mode="gaussian", |
|
|
) |
|
|
|
|
|
self.predictor = Predictor( |
|
|
window_infer=window_infer, |
|
|
mirror_axes=list(mirror_axes) if mirror_axes is not None else None, |
|
|
) |
|
|
|
|
|
os.makedirs(self.save_path, exist_ok=True) |
|
|
|
|
|
def convert_labels(self, labels): |
|
|
|
|
|
result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] |
|
|
|
|
|
return torch.cat(result, dim=1).float() |
|
|
|
|
|
def get_input(self, batch): |
|
|
image = batch["data"] |
|
|
label = batch["seg"] |
|
|
properties = batch["properties"] |
|
|
label = self.convert_labels(label) |
|
|
|
|
|
return image, label, properties |
|
|
|
|
|
def validation_step(self, batch): |
|
|
image, label, properties = self.get_input(batch) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logits = self.predictor.maybe_mirror_and_predict(image, self.model, device=self.device) |
|
|
pred_lbl = logits.argmax(dim=1) |
|
|
pred_3c = self.convert_labels(pred_lbl[:, None])[0].cpu().numpy().astype(np.uint8) |
|
|
|
|
|
if self.print_dice: |
|
|
gt_3c = label[0].cpu().numpy() |
|
|
dices = [dice(pred_3c[i], gt_3c[i]) for i in range(3)] |
|
|
print(dices) |
|
|
|
|
|
case_name = properties.get("name", "") |
|
|
if isinstance(case_name, (list, tuple)) and len(case_name) > 0: |
|
|
case_name = case_name[0] |
|
|
|
|
|
out_path = os.path.join(self.save_path, f"{case_name}.nii.gz") |
|
|
pred_itk = sitk.GetImageFromArray(pred_3c, isVector=False) |
|
|
pred_itk.SetSpacing((float(self.raw_spacing[0]), float(self.raw_spacing[1]), float(self.raw_spacing[2]), 1.0)) |
|
|
sitk.WriteImage(pred_itk, out_path) |
|
|
print(f"saved: {out_path}") |
|
|
|
|
|
return 0 |
|
|
|
|
|
def convert_labels_dim0(self, labels): |
|
|
|
|
|
result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] |
|
|
|
|
|
return torch.cat(result, dim=0).float() |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description="SegMamba inference/prediction for BraTS2023.") |
|
|
parser.add_argument("--data_dir", type=str, default="./data/fullres/train", help="Preprocessed data directory (contains *.npz).") |
|
|
parser.add_argument("--split", type=str, default="test", choices=["train", "val", "test", "all"]) |
|
|
parser.add_argument("--train_rate", type=float, default=0.7) |
|
|
parser.add_argument("--val_rate", type=float, default=0.1) |
|
|
parser.add_argument("--test_rate", type=float, default=0.2) |
|
|
parser.add_argument("--seed", type=int, default=42) |
|
|
|
|
|
parser.add_argument("--ckpt", type=str, default="", help="Checkpoint path (*.pt). If empty, will search under --logdir/model.") |
|
|
parser.add_argument("--logdir", type=str, default="./logs/segmamba", help="Training logdir to locate checkpoints when --ckpt is empty.") |
|
|
parser.add_argument("--ckpt_prefer", type=str, default="best", choices=["best", "final", "latest"]) |
|
|
|
|
|
parser.add_argument("--save_dir", type=str, default="./prediction_results/segmamba", help="Directory to save prediction nii.gz.") |
|
|
parser.add_argument("--device", type=str, default="cuda:0") |
|
|
parser.add_argument("--patch_size", type=str, default="128,128,128") |
|
|
parser.add_argument("--sw_batch_size", type=int, default=2) |
|
|
parser.add_argument("--overlap", type=float, default=0.5) |
|
|
parser.add_argument("--raw_spacing", type=str, default="1,1,1", help="Spacing used when saving NIfTI, e.g. '1,1,1'.") |
|
|
parser.add_argument("--no_mirror", action="store_true", help="Disable mirror TTA.") |
|
|
parser.add_argument("--print_dice", action="store_true", help="Print dice against preprocessed seg (if available).") |
|
|
args = parser.parse_args() |
|
|
|
|
|
patch_size = _parse_csv_ints(args.patch_size, 3) |
|
|
raw_spacing = _parse_csv_floats(args.raw_spacing, 3) |
|
|
|
|
|
ckpt_path = args.ckpt.strip() |
|
|
if ckpt_path == "": |
|
|
ckpt_path = _find_ckpt_from_logdir(args.logdir, prefer=args.ckpt_prefer) |
|
|
if not os.path.isfile(ckpt_path): |
|
|
raise FileNotFoundError(f"checkpoint not found: {ckpt_path}") |
|
|
print(f"Using checkpoint: {ckpt_path}") |
|
|
|
|
|
trainer = BraTSTrainer( |
|
|
ckpt_path=ckpt_path, |
|
|
save_path=args.save_dir, |
|
|
patch_size=patch_size, |
|
|
sw_batch_size=args.sw_batch_size, |
|
|
overlap=args.overlap, |
|
|
mirror_axes=None if args.no_mirror else (0, 1, 2), |
|
|
raw_spacing=raw_spacing, |
|
|
device=args.device, |
|
|
print_dice=args.print_dice, |
|
|
) |
|
|
|
|
|
if args.split == "all": |
|
|
all_paths = sorted(glob.glob(os.path.join(args.data_dir, "*.npz"))) |
|
|
ds = MedicalDataset(all_paths, test=False) |
|
|
else: |
|
|
train_ds, val_ds, test_ds = get_train_val_test_loader_from_train( |
|
|
args.data_dir, |
|
|
train_rate=args.train_rate, |
|
|
val_rate=args.val_rate, |
|
|
test_rate=args.test_rate, |
|
|
seed=args.seed, |
|
|
) |
|
|
ds = {"train": train_ds, "val": val_ds, "test": test_ds}[args.split] |
|
|
|
|
|
trainer.validation_single_gpu(ds) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|