ChipYTY's picture
Add files using upload-large-folder tool
fe8202e verified
#!/usr/bin/env python3
"""
快速测试脚本 - 验证MedSAM3流程是否正常工作
处理单个病例并显示结果
"""
import os
import sys
import numpy as np
import torch
from pathlib import Path
# 添加SAM3路径
sys.path.insert(0, '/root/githubs/sam3')
def test_preprocessing():
"""测试数据预处理"""
print("\n" + "="*50)
print("Testing Data Preprocessing...")
print("="*50)
from preprocess_brats import load_brats_case, convert_to_frames, \
save_segmentation_masks, get_tumor_bbox_and_center, save_prompt_info
# 配置路径
case_dir = "/data/yty/brats2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/BraTS-GLI-00000-000"
output_dir = "/data/yty/brats23_sam3_test"
if not Path(case_dir).exists():
print(f"Test case not found: {case_dir}")
return None
print(f"Loading case: {case_dir}")
# 加载数据
data, seg, affine = load_brats_case(case_dir)
print(f" Data shape: {data.shape}")
print(f" Seg shape: {seg.shape if seg is not None else 'None'}")
# 转换为帧
case_name = Path(case_dir).name
frames_dir, num_slices = convert_to_frames(
data, output_dir, case_name,
modality_idx=0, # T1ce
target_size=(512, 512)
)
print(f" Converted to {num_slices} frames: {frames_dir}")
# 保存mask
masks_dir = save_segmentation_masks(
seg, output_dir, case_name,
target_size=(512, 512)
)
print(f" Saved masks: {masks_dir}")
# 获取提示信息
original_size = data.shape[2:4]
slice_idx, bbox, center = get_tumor_bbox_and_center(seg)
print(f" Tumor center slice: {slice_idx}")
print(f" Original bbox: {bbox}")
print(f" Original center: {center}")
# 保存提示信息
prompt_info = save_prompt_info(
output_dir, case_name, slice_idx, bbox, center,
original_size, target_size=(512, 512)
)
print(f" Scaled bbox: {prompt_info['bbox']}")
print(f" Scaled center: {prompt_info['center']}")
print("\n✅ Preprocessing test passed!")
return output_dir
def test_sam3_loading():
"""测试SAM3模型加载"""
print("\n" + "="*50)
print("Testing SAM3 Model Loading...")
print("="*50)
checkpoint_path = "/data/yty/sam3/sam3.pt"
if not Path(checkpoint_path).exists():
print(f"Checkpoint not found: {checkpoint_path}")
return False
print(f"Loading checkpoint: {checkpoint_path}")
try:
from sam3.model_builder import build_sam3_video_model
model = build_sam3_video_model(
checkpoint_path=checkpoint_path,
load_from_HF=False,
device='cuda' if torch.cuda.is_available() else 'cpu'
)
print(f" Model loaded successfully!")
print(f" Device: {next(model.parameters()).device}")
print("\n✅ Model loading test passed!")
return True
except Exception as e:
print(f"Error loading model: {e}")
import traceback
traceback.print_exc()
return False
def test_inference(processed_dir):
"""测试推理"""
print("\n" + "="*50)
print("Testing SAM3 Inference...")
print("="*50)
if processed_dir is None:
print("Skipping inference test (no processed data)")
return
checkpoint_path = "/data/yty/sam3/sam3.pt"
try:
from infer_brats_sam3 import MedSAM3VideoInference, load_prompt_info
# 初始化模型
print("Initializing MedSAM3VideoInference...")
model = MedSAM3VideoInference(
checkpoint_path=checkpoint_path,
device='cuda' if torch.cuda.is_available() else 'cpu'
)
# 获取测试病例
case_dirs = sorted([d for d in Path(processed_dir).iterdir() if d.is_dir()])
if not case_dirs:
print("No processed cases found")
return
case_dir = case_dirs[0]
case_name = case_dir.name
frames_dir = case_dir / "frames"
print(f"Testing on case: {case_name}")
# 加载提示信息
prompt_info = load_prompt_info(case_dir)
if prompt_info is None:
print("No prompt info found")
return
print(f" Prompt slice: {prompt_info['slice_idx']}")
print(f" Bbox: {prompt_info['bbox']}")
# 运行推理
print("Running inference...")
pred_masks = model.segment_3d_volume(
frames_dir=str(frames_dir),
prompt_slice_idx=prompt_info['slice_idx'],
prompt_type='box',
bbox=prompt_info['bbox']
)
print(f" Output shape: {pred_masks.shape}")
print(f" Non-zero slices: {np.sum(pred_masks.sum(axis=(1,2)) > 0)}")
print("\n✅ Inference test passed!")
except Exception as e:
print(f"Error in inference: {e}")
import traceback
traceback.print_exc()
def main():
print("="*60)
print(" MedSAM3 BraTS Quick Test")
print("="*60)
# 检查CUDA
print(f"\nCUDA available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"CUDA device: {torch.cuda.get_device_name(0)}")
# 测试预处理
processed_dir = test_preprocessing()
# 测试模型加载
model_ok = test_sam3_loading()
# 测试推理(如果模型加载成功)
if model_ok:
test_inference(processed_dir)
print("\n" + "="*60)
print(" Quick Test Complete!")
print("="*60)
if __name__ == "__main__":
main()