|
|
|
|
|
""" |
|
|
SAM3 + LoRA 推理脚本 - 与 SegMamba 的 4_predict.py 保持一致的评估流程 |
|
|
|
|
|
读取 SegMamba 格式的预处理数据 (.npz),使用 SAM3 + LoRA + decoder 进行推理, |
|
|
输出与 SegMamba 相同格式的预测结果。 |
|
|
""" |
|
|
|
|
|
import argparse |
|
|
import glob |
|
|
import os |
|
|
import sys |
|
|
from pathlib import Path |
|
|
|
|
|
import numpy as np |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import SimpleITK as sitk |
|
|
from tqdm import tqdm |
|
|
import random |
|
|
|
|
|
sys.path.insert(0, "/root/githubs/sam3") |
|
|
sys.path.insert(0, "/root/githubs/SegMamba") |
|
|
|
|
|
|
|
|
random.seed(123) |
|
|
np.random.seed(123) |
|
|
torch.manual_seed(123) |
|
|
|
|
|
|
|
|
def dice(pred, gt): |
|
|
"""计算 Dice 系数""" |
|
|
pred = pred.astype(bool) |
|
|
gt = gt.astype(bool) |
|
|
intersection = np.sum(pred & gt) |
|
|
union = np.sum(pred) + np.sum(gt) |
|
|
if union == 0: |
|
|
return 1.0 if np.sum(pred) == 0 else 0.0 |
|
|
return 2.0 * intersection / union |
|
|
|
|
|
|
|
|
class MedSAM3DetectorSeg(nn.Module): |
|
|
""" |
|
|
与训练时相同的模型结构:SAM3 detector backbone -> lightweight decoder -> mask logits |
|
|
4 类分割: 0=背景, 1=NCR, 2=ED, 3=ET |
|
|
""" |
|
|
|
|
|
def __init__(self, sam3_detector: nn.Module, image_size: int = 1008, num_classes: int = 4): |
|
|
super().__init__() |
|
|
self.detector = sam3_detector |
|
|
self.image_size = int(image_size) |
|
|
self.num_classes = num_classes |
|
|
self.register_buffer("mean", torch.tensor([0.5, 0.5, 0.5]).view(1, 3, 1, 1)) |
|
|
self.register_buffer("std", torch.tensor([0.5, 0.5, 0.5]).view(1, 3, 1, 1)) |
|
|
|
|
|
|
|
|
|
|
|
self.decoder = nn.Sequential( |
|
|
nn.Conv2d(256, 128, 3, padding=1), |
|
|
nn.BatchNorm2d(128), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), |
|
|
nn.Conv2d(128, 64, 3, padding=1), |
|
|
nn.BatchNorm2d(64), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), |
|
|
nn.Conv2d(64, 32, 3, padding=1), |
|
|
nn.BatchNorm2d(32), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), |
|
|
nn.Conv2d(32, num_classes, 1), |
|
|
) |
|
|
|
|
|
def _preprocess(self, images: torch.Tensor) -> torch.Tensor: |
|
|
_, _, h, w = images.shape |
|
|
if h != self.image_size or w != self.image_size: |
|
|
images = F.interpolate( |
|
|
images, size=(self.image_size, self.image_size), mode="bilinear", align_corners=False |
|
|
) |
|
|
images = (images - self.mean.to(images.device)) / self.std.to(images.device) |
|
|
return images |
|
|
|
|
|
def _pick_feat(self, backbone_out) -> torch.Tensor: |
|
|
feat = None |
|
|
if isinstance(backbone_out, dict): |
|
|
if "sam3_features" in backbone_out: |
|
|
feat = backbone_out["sam3_features"] |
|
|
elif "features" in backbone_out: |
|
|
feat = backbone_out["features"] |
|
|
else: |
|
|
for _, v in backbone_out.items(): |
|
|
if isinstance(v, torch.Tensor) and v.ndim == 4: |
|
|
feat = v |
|
|
break |
|
|
elif isinstance(backbone_out, torch.Tensor): |
|
|
feat = backbone_out |
|
|
if feat is None or not isinstance(feat, torch.Tensor) or feat.ndim != 4: |
|
|
raise RuntimeError("Could not find a 4D feature map in SAM3 backbone output") |
|
|
return feat |
|
|
|
|
|
def forward(self, images: torch.Tensor) -> torch.Tensor: |
|
|
orig_h, orig_w = images.shape[-2:] |
|
|
x = self._preprocess(images) |
|
|
backbone_out = self.detector.backbone.forward_image(x) |
|
|
feat = self._pick_feat(backbone_out) |
|
|
logits = self.decoder(feat) |
|
|
if logits.shape[-2:] != (orig_h, orig_w): |
|
|
logits = F.interpolate(logits, size=(orig_h, orig_w), mode="bilinear", align_corners=False) |
|
|
return logits |
|
|
|
|
|
|
|
|
def load_model(checkpoint_path: str, lora_weights: str, decoder_weights: str = None, device: str = "cuda"): |
|
|
"""加载 SAM3 + LoRA 模型""" |
|
|
from sam3.model_builder import build_sam3_video_model |
|
|
from lora import apply_lora_to_model, load_lora_weights |
|
|
|
|
|
print(f"Loading SAM3 from: {checkpoint_path}") |
|
|
sam3 = build_sam3_video_model( |
|
|
checkpoint_path=checkpoint_path, |
|
|
load_from_HF=False, |
|
|
device=device, |
|
|
apply_temporal_disambiguation=True, |
|
|
) |
|
|
|
|
|
|
|
|
model = MedSAM3DetectorSeg(sam3.detector, image_size=1008, num_classes=4) |
|
|
|
|
|
|
|
|
target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "qkv", "proj"] |
|
|
print(f"Applying LoRA to detector...") |
|
|
apply_lora_to_model( |
|
|
model.detector, |
|
|
rank=8, |
|
|
alpha=16.0, |
|
|
dropout=0.0, |
|
|
target_modules=target_modules, |
|
|
exclude_modules=[], |
|
|
) |
|
|
|
|
|
|
|
|
print(f"Loading LoRA weights from: {lora_weights}") |
|
|
load_lora_weights(model.detector, lora_weights) |
|
|
|
|
|
|
|
|
if decoder_weights is not None: |
|
|
decoder_path = Path(decoder_weights) |
|
|
else: |
|
|
decoder_path = Path(lora_weights).parent / "best_decoder_weights.pt" |
|
|
|
|
|
if decoder_path.exists(): |
|
|
print(f"Loading decoder weights from: {decoder_path}") |
|
|
decoder_state = torch.load(decoder_path, map_location="cpu") |
|
|
model.decoder.load_state_dict(decoder_state) |
|
|
else: |
|
|
print(f"WARNING: Decoder weights not found at {decoder_path}") |
|
|
print("The model will use randomly initialized decoder - predictions will be wrong!") |
|
|
print("Please retrain with the updated training script that saves decoder weights.") |
|
|
|
|
|
model = model.to(device) |
|
|
model.eval() |
|
|
return model |
|
|
|
|
|
|
|
|
def convert_labels(labels): |
|
|
""" |
|
|
转换标签为 3 通道 (TC, WT, ET) |
|
|
- TC (Tumor Core): label==1 或 label==3 |
|
|
- WT (Whole Tumor): label==1 或 label==2 或 label==3 |
|
|
- ET (Enhancing Tumor): label==3 |
|
|
""" |
|
|
result = [ |
|
|
(labels == 1) | (labels == 3), |
|
|
(labels == 1) | (labels == 2) | (labels == 3), |
|
|
labels == 3, |
|
|
] |
|
|
return np.stack(result, axis=0).astype(np.float32) |
|
|
|
|
|
|
|
|
def predict_volume(model, volume_4d: np.ndarray, modality: int = 0, |
|
|
target_size: int = 512, device: str = "cuda") -> np.ndarray: |
|
|
""" |
|
|
对 4D volume 进行 4 类分割预测 |
|
|
|
|
|
Args: |
|
|
model: 分割模型 (输出 4 类) |
|
|
volume_4d: (4, D, H, W) 的 4 模态 3D volume |
|
|
modality: 使用的模态索引(0=T1, 1=T1ce, 2=T2, 3=FLAIR) |
|
|
target_size: 目标尺寸 |
|
|
device: 计算设备 |
|
|
|
|
|
Returns: |
|
|
pred_3d: (D, H, W) 的类别预测 (0=背景, 1=NCR, 2=ED, 3=ET) |
|
|
""" |
|
|
|
|
|
volume = volume_4d[modality] |
|
|
D, H, W = volume.shape |
|
|
|
|
|
pred_3d = np.zeros((D, H, W), dtype=np.uint8) |
|
|
|
|
|
with torch.no_grad(): |
|
|
for z in range(D): |
|
|
slice_2d = volume[z] |
|
|
|
|
|
|
|
|
v_min, v_max = slice_2d.min(), slice_2d.max() |
|
|
if v_max > v_min: |
|
|
slice_2d = (slice_2d - v_min) / (v_max - v_min) |
|
|
else: |
|
|
slice_2d = np.zeros_like(slice_2d) |
|
|
|
|
|
|
|
|
slice_rgb = np.stack([slice_2d] * 3, axis=0) |
|
|
|
|
|
|
|
|
slice_tensor = torch.from_numpy(slice_rgb).float().unsqueeze(0) |
|
|
if H != target_size or W != target_size: |
|
|
slice_tensor = F.interpolate(slice_tensor, size=(target_size, target_size), |
|
|
mode="bilinear", align_corners=False) |
|
|
|
|
|
slice_tensor = slice_tensor.to(device) |
|
|
|
|
|
|
|
|
logits = model(slice_tensor) |
|
|
pred_class = logits.argmax(dim=1) |
|
|
|
|
|
|
|
|
if H != target_size or W != target_size: |
|
|
pred_class = F.interpolate(pred_class.unsqueeze(1).float(), size=(H, W), |
|
|
mode="nearest").squeeze(1).long() |
|
|
|
|
|
pred_3d[z] = pred_class[0].cpu().numpy() |
|
|
|
|
|
return pred_3d |
|
|
|
|
|
|
|
|
def labels_to_regions(pred_3d: np.ndarray) -> dict: |
|
|
""" |
|
|
将 4 类预测转换为 TC/WT/ET 区域 |
|
|
|
|
|
BraTS 标签: |
|
|
0: 背景 |
|
|
1: NCR (Necrotic tumor core) |
|
|
2: ED (Peritumoral Edema) |
|
|
3: ET (Enhancing tumor) |
|
|
|
|
|
区域定义: |
|
|
TC (Tumor Core) = NCR + ET = label 1 + label 3 |
|
|
WT (Whole Tumor) = NCR + ED + ET = label 1 + label 2 + label 3 |
|
|
ET (Enhancing Tumor) = label 3 |
|
|
""" |
|
|
tc = ((pred_3d == 1) | (pred_3d == 3)).astype(np.uint8) |
|
|
wt = ((pred_3d == 1) | (pred_3d == 2) | (pred_3d == 3)).astype(np.uint8) |
|
|
et = (pred_3d == 3).astype(np.uint8) |
|
|
return {"TC": tc, "WT": wt, "ET": et} |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description="SAM3+LoRA inference for BraTS2023 (SegMamba-compatible)") |
|
|
|
|
|
|
|
|
parser.add_argument("--data_dir", type=str, default="/data/yty/brats23_processed", |
|
|
help="Preprocessed data directory (contains *.npz)") |
|
|
parser.add_argument("--split", type=str, default="test", choices=["train", "val", "test", "all"]) |
|
|
parser.add_argument("--train_rate", type=float, default=0.7) |
|
|
parser.add_argument("--val_rate", type=float, default=0.1) |
|
|
parser.add_argument("--test_rate", type=float, default=0.2) |
|
|
parser.add_argument("--seed", type=int, default=42) |
|
|
|
|
|
|
|
|
parser.add_argument("--checkpoint", type=str, default="/data/yty/sam3/sam3.pt", |
|
|
help="SAM3 checkpoint path") |
|
|
parser.add_argument("--lora_weights", type=str, |
|
|
default="/data/yty/brats23_sam3_video_lora_bestonly_122/checkpoints/best_lora_weights.pt", |
|
|
help="LoRA weights path") |
|
|
parser.add_argument("--decoder_weights", type=str, default=None, |
|
|
help="Decoder weights path (default: auto-detect from lora_weights dir)") |
|
|
parser.add_argument("--modality", type=int, default=0, |
|
|
help="Which modality to use (0=T1, 1=T1ce, 2=T2, 3=FLAIR)") |
|
|
parser.add_argument("--target_size", type=int, default=512, |
|
|
help="Target image size for inference") |
|
|
|
|
|
|
|
|
parser.add_argument("--save_dir", type=str, default="/data/yty/brats23_sam3_predictions", |
|
|
help="Directory to save predictions") |
|
|
parser.add_argument("--device", type=str, default="cuda:0") |
|
|
parser.add_argument("--raw_spacing", type=str, default="1,1,1") |
|
|
parser.add_argument("--print_dice", action="store_true", help="Print dice for each case") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
raw_spacing = [float(x) for x in args.raw_spacing.split(",")] |
|
|
|
|
|
|
|
|
model = load_model(args.checkpoint, args.lora_weights, args.decoder_weights, args.device) |
|
|
|
|
|
|
|
|
all_paths = sorted(glob.glob(os.path.join(args.data_dir, "*.npz"))) |
|
|
all_names = [os.path.splitext(os.path.basename(p))[0] for p in all_paths] |
|
|
|
|
|
if args.split == "all": |
|
|
cases = list(zip(all_names, all_paths)) |
|
|
else: |
|
|
|
|
|
n = len(all_names) |
|
|
indices = list(range(n)) |
|
|
rng = np.random.RandomState(args.seed) |
|
|
rng.shuffle(indices) |
|
|
|
|
|
n_train = int(n * args.train_rate) |
|
|
n_val = int(n * args.val_rate) |
|
|
|
|
|
train_idx = indices[:n_train] |
|
|
val_idx = indices[n_train:n_train + n_val] |
|
|
test_idx = indices[n_train + n_val:] |
|
|
|
|
|
split_map = {"train": train_idx, "val": val_idx, "test": test_idx} |
|
|
selected_idx = split_map[args.split] |
|
|
|
|
|
cases = [(all_names[i], all_paths[i]) for i in selected_idx] |
|
|
|
|
|
print(f"Found {len(cases)} cases for split '{args.split}'") |
|
|
|
|
|
os.makedirs(args.save_dir, exist_ok=True) |
|
|
|
|
|
all_dices = [] |
|
|
|
|
|
for case_name, npz_path in tqdm(cases, desc="Predicting"): |
|
|
|
|
|
data = np.load(npz_path) |
|
|
image_4d = data["data"] |
|
|
seg = data.get("seg", None) |
|
|
|
|
|
|
|
|
pred_classes = predict_volume(model, image_4d, modality=args.modality, |
|
|
target_size=args.target_size, device=args.device) |
|
|
|
|
|
|
|
|
pred_regions = labels_to_regions(pred_classes) |
|
|
D, H, W = pred_classes.shape |
|
|
pred_3c = np.stack([pred_regions["TC"], pred_regions["WT"], pred_regions["ET"]], axis=0) |
|
|
|
|
|
|
|
|
if seg is not None and args.print_dice: |
|
|
gt = seg[0] |
|
|
gt_3c = convert_labels(gt) |
|
|
|
|
|
dices = [] |
|
|
for i, name in enumerate(["TC", "WT", "ET"]): |
|
|
d = dice(pred_3c[i], gt_3c[i]) |
|
|
dices.append(d) |
|
|
|
|
|
|
|
|
gt_binary = (gt > 0).astype(np.float32) |
|
|
pred_binary = (pred_classes > 0).astype(np.float32) |
|
|
overall_dice = dice(pred_binary, gt_binary) |
|
|
|
|
|
print(f"{case_name}: TC={dices[0]:.4f}, WT={dices[1]:.4f}, ET={dices[2]:.4f}, Overall={overall_dice:.4f}") |
|
|
all_dices.append({ |
|
|
"case": case_name, |
|
|
"TC": dices[0], |
|
|
"WT": dices[1], |
|
|
"ET": dices[2], |
|
|
"Overall": overall_dice, |
|
|
}) |
|
|
|
|
|
|
|
|
out_path = os.path.join(args.save_dir, f"{case_name}.nii.gz") |
|
|
pred_itk = sitk.GetImageFromArray(pred_3c, isVector=False) |
|
|
pred_itk.SetSpacing((raw_spacing[0], raw_spacing[1], raw_spacing[2], 1.0)) |
|
|
sitk.WriteImage(pred_itk, out_path) |
|
|
|
|
|
|
|
|
if all_dices: |
|
|
print("\n" + "=" * 60) |
|
|
print(f"Results Summary ({len(all_dices)} cases):") |
|
|
avg_tc = np.mean([d["TC"] for d in all_dices]) |
|
|
avg_wt = np.mean([d["WT"] for d in all_dices]) |
|
|
avg_et = np.mean([d["ET"] for d in all_dices]) |
|
|
avg_overall = np.mean([d["Overall"] for d in all_dices]) |
|
|
print(f" Average TC Dice: {avg_tc:.4f}") |
|
|
print(f" Average WT Dice: {avg_wt:.4f}") |
|
|
print(f" Average ET Dice: {avg_et:.4f}") |
|
|
print(f" Average Overall Dice: {avg_overall:.4f}") |
|
|
print("=" * 60) |
|
|
|
|
|
|
|
|
np.save(os.path.join(args.save_dir, "metrics.npy"), all_dices, allow_pickle=True) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|
|
|
|