|
|
|
|
|
""" |
|
|
快速测试脚本 - 验证MedSAM3流程是否正常工作 |
|
|
处理单个病例并显示结果 |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import numpy as np |
|
|
import torch |
|
|
from pathlib import Path |
|
|
|
|
|
|
|
|
sys.path.insert(0, '/root/githubs/sam3') |
|
|
|
|
|
|
|
|
def test_preprocessing(): |
|
|
"""测试数据预处理""" |
|
|
print("\n" + "="*50) |
|
|
print("Testing Data Preprocessing...") |
|
|
print("="*50) |
|
|
|
|
|
from preprocess_brats import load_brats_case, convert_to_frames, \ |
|
|
save_segmentation_masks, get_tumor_bbox_and_center, save_prompt_info |
|
|
|
|
|
|
|
|
case_dir = "/data/yty/brats2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/BraTS-GLI-00000-000" |
|
|
output_dir = "/data/yty/brats23_sam3_test" |
|
|
|
|
|
if not Path(case_dir).exists(): |
|
|
print(f"Test case not found: {case_dir}") |
|
|
return None |
|
|
|
|
|
print(f"Loading case: {case_dir}") |
|
|
|
|
|
|
|
|
data, seg, affine = load_brats_case(case_dir) |
|
|
print(f" Data shape: {data.shape}") |
|
|
print(f" Seg shape: {seg.shape if seg is not None else 'None'}") |
|
|
|
|
|
|
|
|
case_name = Path(case_dir).name |
|
|
frames_dir, num_slices = convert_to_frames( |
|
|
data, output_dir, case_name, |
|
|
modality_idx=0, |
|
|
target_size=(512, 512) |
|
|
) |
|
|
print(f" Converted to {num_slices} frames: {frames_dir}") |
|
|
|
|
|
|
|
|
masks_dir = save_segmentation_masks( |
|
|
seg, output_dir, case_name, |
|
|
target_size=(512, 512) |
|
|
) |
|
|
print(f" Saved masks: {masks_dir}") |
|
|
|
|
|
|
|
|
original_size = data.shape[2:4] |
|
|
slice_idx, bbox, center = get_tumor_bbox_and_center(seg) |
|
|
print(f" Tumor center slice: {slice_idx}") |
|
|
print(f" Original bbox: {bbox}") |
|
|
print(f" Original center: {center}") |
|
|
|
|
|
|
|
|
prompt_info = save_prompt_info( |
|
|
output_dir, case_name, slice_idx, bbox, center, |
|
|
original_size, target_size=(512, 512) |
|
|
) |
|
|
print(f" Scaled bbox: {prompt_info['bbox']}") |
|
|
print(f" Scaled center: {prompt_info['center']}") |
|
|
|
|
|
print("\n✅ Preprocessing test passed!") |
|
|
return output_dir |
|
|
|
|
|
|
|
|
def test_sam3_loading(): |
|
|
"""测试SAM3模型加载""" |
|
|
print("\n" + "="*50) |
|
|
print("Testing SAM3 Model Loading...") |
|
|
print("="*50) |
|
|
|
|
|
checkpoint_path = "/data/yty/sam3/sam3.pt" |
|
|
|
|
|
if not Path(checkpoint_path).exists(): |
|
|
print(f"Checkpoint not found: {checkpoint_path}") |
|
|
return False |
|
|
|
|
|
print(f"Loading checkpoint: {checkpoint_path}") |
|
|
|
|
|
try: |
|
|
from sam3.model_builder import build_sam3_video_model |
|
|
|
|
|
model = build_sam3_video_model( |
|
|
checkpoint_path=checkpoint_path, |
|
|
load_from_HF=False, |
|
|
device='cuda' if torch.cuda.is_available() else 'cpu' |
|
|
) |
|
|
|
|
|
print(f" Model loaded successfully!") |
|
|
print(f" Device: {next(model.parameters()).device}") |
|
|
|
|
|
print("\n✅ Model loading test passed!") |
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error loading model: {e}") |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
return False |
|
|
|
|
|
|
|
|
def test_inference(processed_dir): |
|
|
"""测试推理""" |
|
|
print("\n" + "="*50) |
|
|
print("Testing SAM3 Inference...") |
|
|
print("="*50) |
|
|
|
|
|
if processed_dir is None: |
|
|
print("Skipping inference test (no processed data)") |
|
|
return |
|
|
|
|
|
checkpoint_path = "/data/yty/sam3/sam3.pt" |
|
|
|
|
|
try: |
|
|
from infer_brats_sam3 import MedSAM3VideoInference, load_prompt_info |
|
|
|
|
|
|
|
|
print("Initializing MedSAM3VideoInference...") |
|
|
model = MedSAM3VideoInference( |
|
|
checkpoint_path=checkpoint_path, |
|
|
device='cuda' if torch.cuda.is_available() else 'cpu' |
|
|
) |
|
|
|
|
|
|
|
|
case_dirs = sorted([d for d in Path(processed_dir).iterdir() if d.is_dir()]) |
|
|
if not case_dirs: |
|
|
print("No processed cases found") |
|
|
return |
|
|
|
|
|
case_dir = case_dirs[0] |
|
|
case_name = case_dir.name |
|
|
frames_dir = case_dir / "frames" |
|
|
|
|
|
print(f"Testing on case: {case_name}") |
|
|
|
|
|
|
|
|
prompt_info = load_prompt_info(case_dir) |
|
|
if prompt_info is None: |
|
|
print("No prompt info found") |
|
|
return |
|
|
|
|
|
print(f" Prompt slice: {prompt_info['slice_idx']}") |
|
|
print(f" Bbox: {prompt_info['bbox']}") |
|
|
|
|
|
|
|
|
print("Running inference...") |
|
|
pred_masks = model.segment_3d_volume( |
|
|
frames_dir=str(frames_dir), |
|
|
prompt_slice_idx=prompt_info['slice_idx'], |
|
|
prompt_type='box', |
|
|
bbox=prompt_info['bbox'] |
|
|
) |
|
|
|
|
|
print(f" Output shape: {pred_masks.shape}") |
|
|
print(f" Non-zero slices: {np.sum(pred_masks.sum(axis=(1,2)) > 0)}") |
|
|
|
|
|
print("\n✅ Inference test passed!") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error in inference: {e}") |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
|
|
|
|
|
|
def main(): |
|
|
print("="*60) |
|
|
print(" MedSAM3 BraTS Quick Test") |
|
|
print("="*60) |
|
|
|
|
|
|
|
|
print(f"\nCUDA available: {torch.cuda.is_available()}") |
|
|
if torch.cuda.is_available(): |
|
|
print(f"CUDA device: {torch.cuda.get_device_name(0)}") |
|
|
|
|
|
|
|
|
processed_dir = test_preprocessing() |
|
|
|
|
|
|
|
|
model_ok = test_sam3_loading() |
|
|
|
|
|
|
|
|
if model_ok: |
|
|
test_inference(processed_dir) |
|
|
|
|
|
print("\n" + "="*60) |
|
|
print(" Quick Test Complete!") |
|
|
print("="*60) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|