File size: 3,854 Bytes
fe8202e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import argparse
import glob
import os
import re
import sys
from typing import List, Sequence, Tuple

import torch
from torch.utils.data import DataLoader

sys.path.append(os.path.join(os.path.dirname(__file__), "src"))

from gliomasam3_moe.data.brats_dataset import SegMambaNPZDataset, split_npz_paths
from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE
from train import evaluate_test, load_config


def _find_latest_ckpt(ckpt_dir: str) -> str:
    pattern = os.path.join(ckpt_dir, "ckpt_step*.pt")
    matches = []
    for path in glob.glob(pattern):
        m = re.search(r"ckpt_step(\d+)\.pt$", path)
        if m:
            matches.append((int(m.group(1)), path))
    if not matches:
        raise FileNotFoundError(f"No checkpoints found under {ckpt_dir}.")
    matches.sort(key=lambda x: x[0])
    return matches[-1][1]


def _select_train_subset(
    data_dir: str,
    train_rate: float,
    val_rate: float,
    test_rate: float,
    seed: int,
) -> Tuple[Sequence[str], int, int]:
    train_paths, _, test_paths = split_npz_paths(
        data_dir, train_rate=train_rate, val_rate=val_rate, test_rate=test_rate, seed=seed
    )
    test_n = len(test_paths)
    if test_n == 0:
        raise ValueError("Test split size is 0; cannot match train subset size.")
    subset_n = min(len(train_paths), test_n)
    rng = torch.Generator().manual_seed(seed)
    perm = torch.randperm(len(train_paths), generator=rng).tolist()
    subset_paths = [train_paths[i] for i in perm[:subset_n]]
    return subset_paths, test_n, len(train_paths)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", type=str, default="configs/train.yaml")
    parser.add_argument("--checkpoint", type=str, default=None, help="Path to ckpt_step*.pt (default: latest in ckpt_dir).")
    parser.add_argument("--max_cases", type=int, default=0, help="Optional cap on subset size.")
    args = parser.parse_args()

    cfg = load_config(args.config)
    data_dir = cfg.data.root_dir
    if not os.path.isdir(data_dir):
        raise FileNotFoundError(f"data.root_dir does not exist: {data_dir}")

    if getattr(cfg.data, "format", "nifti") != "segmamba_npz":
        raise ValueError("Only segmamba_npz format is supported for this evaluation script.")

    ckpt_path = args.checkpoint or _find_latest_ckpt(cfg.train.ckpt_dir)
    if not os.path.isfile(ckpt_path):
        raise FileNotFoundError(f"Checkpoint not found: {ckpt_path}")

    subset_paths, test_n, train_n = _select_train_subset(
        data_dir,
        train_rate=getattr(cfg.data, "train_rate", 0.7),
        val_rate=getattr(cfg.data, "val_rate", 0.1),
        test_rate=getattr(cfg.data, "test_rate", 0.2),
        seed=cfg.seed,
    )
    if args.max_cases and args.max_cases > 0:
        subset_paths = subset_paths[: min(len(subset_paths), args.max_cases)]

    device = torch.device(cfg.device if torch.cuda.is_available() else "cpu")
    model = GliomaSAM3_MoE(**cfg.model.__dict__).to(device)
    ckpt = torch.load(ckpt_path, map_location=device)
    model.load_state_dict(ckpt["model"], strict=True)

    ensure_npy = bool(getattr(cfg.data, "segmamba_unpack", True))
    dataset = SegMambaNPZDataset(
        data_dir=data_dir,
        npz_paths=subset_paths,
        test=False,
        ensure_npy=ensure_npy,
        map_et_to_4=True,
    )
    loader = DataLoader(
        dataset,
        batch_size=1,
        shuffle=False,
        num_workers=max(0, int(cfg.train.num_workers)),
    )

    metrics = evaluate_test(model, loader, cfg, device)
    print(f"[TRAIN-SUBSET] ckpt={ckpt_path}")
    print(f"[TRAIN-SUBSET] total_train={train_n} test_count={test_n} subset={len(subset_paths)}")
    print(
        f"[TRAIN-SUBSET] dice[WT,TC,ET]={metrics['dice']} "
        f"hd95[WT,TC,ET]={metrics['hd95']}"
    )


if __name__ == "__main__":
    main()