|
|
|
|
|
""" |
|
|
BraTS2023 数据集类 - 用于SAM3训练 |
|
|
将3D医学数据转换为SAM3可处理的视频格式 |
|
|
""" |
|
|
|
|
|
import os |
|
|
import random |
|
|
import numpy as np |
|
|
import torch |
|
|
from torch.utils.data import Dataset |
|
|
import nibabel as nib |
|
|
from pathlib import Path |
|
|
from PIL import Image |
|
|
import cv2 |
|
|
import json |
|
|
from typing import Optional, List, Dict, Tuple, Any |
|
|
|
|
|
|
|
|
def normalize_intensity(volume, low_percentile=0.5, high_percentile=99.5): |
|
|
"""对体积数据进行强度归一化""" |
|
|
mask = volume > 0 |
|
|
if mask.sum() == 0: |
|
|
return volume |
|
|
low = np.percentile(volume[mask], low_percentile) |
|
|
high = np.percentile(volume[mask], high_percentile) |
|
|
volume = np.clip(volume, low, high) |
|
|
volume = (volume - low) / (high - low + 1e-8) |
|
|
return volume |
|
|
|
|
|
|
|
|
class BraTSVideoDataset(Dataset): |
|
|
""" |
|
|
BraTS数据集 - 将3D医学数据作为视频序列处理 |
|
|
|
|
|
每个3D体积的切片作为视频帧,支持SAM3视频训练 |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
data_root: str, |
|
|
split: str = 'train', |
|
|
modality: int = 0, |
|
|
target_size: Tuple[int, int] = (512, 512), |
|
|
num_frames: int = 8, |
|
|
frame_stride: int = 1, |
|
|
augment: bool = True, |
|
|
train_ratio: float = 0.9, |
|
|
val_ratio: float = 0.1, |
|
|
test_ratio: float = 0.0, |
|
|
seed: int = 42, |
|
|
split_json: Optional[str] = None, |
|
|
normalize_mean: Tuple[float, ...] = (0.5, 0.5, 0.5), |
|
|
normalize_std: Tuple[float, ...] = (0.5, 0.5, 0.5), |
|
|
): |
|
|
""" |
|
|
Args: |
|
|
data_root: BraTS数据根目录 |
|
|
split: 'train' 或 'val' |
|
|
modality: 使用的模态索引 (0=t1c, 1=t1n, 2=t2f, 3=t2w) |
|
|
target_size: 目标图像大小 |
|
|
num_frames: 每个训练样本的帧数 |
|
|
frame_stride: 帧采样间隔 |
|
|
augment: 是否进行数据增强 |
|
|
train_ratio: 训练集比例 |
|
|
seed: 随机种子 |
|
|
""" |
|
|
super().__init__() |
|
|
|
|
|
self.data_root = Path(data_root) |
|
|
self.split = split |
|
|
self.modality = modality |
|
|
self.modality_names = ['t1c', 't1n', 't2f', 't2w'] |
|
|
self.target_size = target_size |
|
|
self.num_frames = num_frames |
|
|
self.frame_stride = frame_stride |
|
|
self.augment = augment and (split == 'train') |
|
|
self.normalize_mean = normalize_mean |
|
|
self.normalize_std = normalize_std |
|
|
|
|
|
|
|
|
all_cases = sorted([d for d in self.data_root.iterdir() if d.is_dir()]) |
|
|
|
|
|
def _validate_ratios(tr, vr, ter): |
|
|
s = float(tr) + float(vr) + float(ter) |
|
|
if abs(s - 1.0) > 1e-6: |
|
|
raise ValueError(f"train/val/test ratios must sum to 1.0, got {s}") |
|
|
|
|
|
def _split_by_ratio(case_list: List[Path], tr: float, vr: float, ter: float, sd: int): |
|
|
_validate_ratios(tr, vr, ter) |
|
|
case_list = list(case_list) |
|
|
rng = random.Random(sd) |
|
|
rng.shuffle(case_list) |
|
|
n = len(case_list) |
|
|
n_train = int(round(n * tr)) |
|
|
n_val = int(round(n * vr)) |
|
|
n_train = min(max(n_train, 0), n) |
|
|
n_val = min(max(n_val, 0), n - n_train) |
|
|
train = case_list[:n_train] |
|
|
val = case_list[n_train : n_train + n_val] |
|
|
test = case_list[n_train + n_val :] |
|
|
return {"train": train, "val": val, "test": test} |
|
|
|
|
|
def _split_by_json(case_list: List[Path], split_file: str): |
|
|
with open(split_file, "r") as f: |
|
|
cfg = json.load(f) |
|
|
splits = cfg.get("splits", cfg) |
|
|
|
|
|
wanted = splits.get(self.split) |
|
|
if wanted is None: |
|
|
raise KeyError(f"split '{self.split}' not found in split json: {split_file}") |
|
|
wanted = set(wanted) |
|
|
return [c for c in case_list if c.name in wanted] |
|
|
|
|
|
if split_json: |
|
|
self.cases = _split_by_json(all_cases, split_json) |
|
|
else: |
|
|
parts = _split_by_ratio(all_cases, train_ratio, val_ratio, test_ratio, seed) |
|
|
if split not in parts: |
|
|
raise ValueError(f"split must be one of train/val/test, got: {split}") |
|
|
self.cases = parts[split] |
|
|
|
|
|
print(f"BraTS {split} set: {len(self.cases)} cases") |
|
|
|
|
|
|
|
|
self.case_info = self._preload_case_info() |
|
|
|
|
|
def _preload_case_info(self) -> List[Dict]: |
|
|
"""预加载病例信息""" |
|
|
case_info = [] |
|
|
for case_dir in self.cases: |
|
|
case_name = case_dir.name |
|
|
|
|
|
|
|
|
mod_name = self.modality_names[self.modality] |
|
|
possible_paths = [ |
|
|
case_dir / f"{mod_name}.nii.gz", |
|
|
case_dir / f"{case_name}-{mod_name}.nii.gz", |
|
|
] |
|
|
|
|
|
mod_path = None |
|
|
for p in possible_paths: |
|
|
if p.exists(): |
|
|
mod_path = p |
|
|
break |
|
|
|
|
|
if mod_path is None: |
|
|
continue |
|
|
|
|
|
|
|
|
seg_paths = [ |
|
|
case_dir / "seg.nii.gz", |
|
|
case_dir / f"{case_name}-seg.nii.gz", |
|
|
] |
|
|
|
|
|
seg_path = None |
|
|
for p in seg_paths: |
|
|
if p.exists(): |
|
|
seg_path = p |
|
|
break |
|
|
|
|
|
if seg_path is None: |
|
|
continue |
|
|
|
|
|
case_info.append({ |
|
|
'case_name': case_name, |
|
|
'mod_path': str(mod_path), |
|
|
'seg_path': str(seg_path), |
|
|
}) |
|
|
|
|
|
return case_info |
|
|
|
|
|
def __len__(self) -> int: |
|
|
return len(self.case_info) |
|
|
|
|
|
def _load_volume(self, path: str) -> np.ndarray: |
|
|
"""加载NIfTI体积数据""" |
|
|
nii = nib.load(path) |
|
|
volume = nii.get_fdata().astype(np.float32) |
|
|
return volume |
|
|
|
|
|
def _get_tumor_slices(self, seg: np.ndarray) -> List[int]: |
|
|
"""获取包含肿瘤的切片索引""" |
|
|
|
|
|
if seg.shape[0] > seg.shape[2]: |
|
|
seg = np.transpose(seg, (2, 0, 1)) |
|
|
|
|
|
tumor_mask = (seg > 0) |
|
|
tumor_areas = tumor_mask.sum(axis=(1, 2)) |
|
|
tumor_slices = np.where(tumor_areas > 0)[0].tolist() |
|
|
|
|
|
return tumor_slices |
|
|
|
|
|
def _sample_frames(self, tumor_slices: List[int], total_slices: int) -> List[int]: |
|
|
"""采样帧索引""" |
|
|
if len(tumor_slices) == 0: |
|
|
|
|
|
center = total_slices // 2 |
|
|
tumor_slices = list(range(max(0, center - 20), min(total_slices, center + 20))) |
|
|
|
|
|
|
|
|
needed_range = self.num_frames * self.frame_stride |
|
|
|
|
|
|
|
|
if len(tumor_slices) > 0: |
|
|
center_slice = tumor_slices[len(tumor_slices) // 2] |
|
|
else: |
|
|
center_slice = total_slices // 2 |
|
|
|
|
|
|
|
|
half_range = needed_range // 2 |
|
|
start_idx = max(0, center_slice - half_range) |
|
|
end_idx = min(total_slices, start_idx + needed_range) |
|
|
|
|
|
|
|
|
if end_idx - start_idx < needed_range: |
|
|
start_idx = max(0, end_idx - needed_range) |
|
|
|
|
|
|
|
|
available = list(range(start_idx, end_idx, self.frame_stride)) |
|
|
|
|
|
if len(available) < self.num_frames: |
|
|
|
|
|
available = available + [available[-1]] * (self.num_frames - len(available)) |
|
|
else: |
|
|
available = available[:self.num_frames] |
|
|
|
|
|
return available |
|
|
|
|
|
def _process_slice(self, slice_2d: np.ndarray) -> np.ndarray: |
|
|
"""处理单个切片""" |
|
|
|
|
|
slice_2d = normalize_intensity(slice_2d) |
|
|
|
|
|
|
|
|
slice_2d = (slice_2d * 255).astype(np.uint8) |
|
|
|
|
|
|
|
|
slice_rgb = np.stack([slice_2d, slice_2d, slice_2d], axis=-1) |
|
|
|
|
|
|
|
|
if self.target_size is not None: |
|
|
slice_rgb = cv2.resize(slice_rgb, self.target_size, interpolation=cv2.INTER_LINEAR) |
|
|
|
|
|
return slice_rgb |
|
|
|
|
|
def _process_mask(self, mask_2d: np.ndarray, num_classes: int = 4) -> np.ndarray: |
|
|
"""处理单个mask切片 |
|
|
|
|
|
BraTS 标签: |
|
|
0: 背景 |
|
|
1: NCR (Necrotic tumor core) |
|
|
2: ED (Peritumoral Edema) |
|
|
3: ET (Enhancing tumor) |
|
|
|
|
|
Args: |
|
|
mask_2d: 原始标签 |
|
|
num_classes: 类别数 (4 = 背景 + 3类肿瘤) |
|
|
""" |
|
|
|
|
|
mask_2d = mask_2d.astype(np.uint8) |
|
|
|
|
|
|
|
|
if self.target_size is not None: |
|
|
mask_2d = cv2.resize(mask_2d, self.target_size, interpolation=cv2.INTER_NEAREST) |
|
|
|
|
|
return mask_2d |
|
|
|
|
|
def _get_bbox_from_mask(self, mask: np.ndarray) -> Optional[Tuple[float, float, float, float]]: |
|
|
"""从mask获取归一化的bbox""" |
|
|
if mask.sum() == 0: |
|
|
return None |
|
|
|
|
|
rows = np.any(mask, axis=1) |
|
|
cols = np.any(mask, axis=0) |
|
|
|
|
|
y_indices = np.where(rows)[0] |
|
|
x_indices = np.where(cols)[0] |
|
|
|
|
|
if len(y_indices) == 0 or len(x_indices) == 0: |
|
|
return None |
|
|
|
|
|
y_min, y_max = y_indices[0], y_indices[-1] |
|
|
x_min, x_max = x_indices[0], x_indices[-1] |
|
|
|
|
|
h, w = mask.shape |
|
|
|
|
|
|
|
|
return (x_min / w, y_min / h, x_max / w, y_max / h) |
|
|
|
|
|
def _augment(self, frames: List[np.ndarray], masks: List[np.ndarray]) -> Tuple[List[np.ndarray], List[np.ndarray]]: |
|
|
"""数据增强""" |
|
|
if not self.augment: |
|
|
return frames, masks |
|
|
|
|
|
|
|
|
if random.random() > 0.5: |
|
|
frames = [np.fliplr(f).copy() for f in frames] |
|
|
masks = [np.fliplr(m).copy() for m in masks] |
|
|
|
|
|
|
|
|
if random.random() > 0.5: |
|
|
frames = [np.flipud(f).copy() for f in frames] |
|
|
masks = [np.flipud(m).copy() for m in masks] |
|
|
|
|
|
|
|
|
if random.random() > 0.5: |
|
|
k = random.choice([1, 2, 3]) |
|
|
frames = [np.rot90(f, k).copy() for f in frames] |
|
|
masks = [np.rot90(m, k).copy() for m in masks] |
|
|
|
|
|
return frames, masks |
|
|
|
|
|
def __getitem__(self, idx: int) -> Dict[str, Any]: |
|
|
""" |
|
|
获取一个训练样本 |
|
|
|
|
|
Returns: |
|
|
Dict containing: |
|
|
- frames: (T, C, H, W) 视频帧 |
|
|
- masks: (T, H, W) 分割mask |
|
|
- bboxes: (T, 4) 边界框 (归一化坐标) |
|
|
- frame_indices: 帧索引 |
|
|
- case_name: 病例名称 |
|
|
""" |
|
|
info = self.case_info[idx] |
|
|
|
|
|
|
|
|
volume = self._load_volume(info['mod_path']) |
|
|
seg = self._load_volume(info['seg_path']) |
|
|
|
|
|
|
|
|
if volume.shape[0] > volume.shape[2]: |
|
|
volume = np.transpose(volume, (2, 0, 1)) |
|
|
seg = np.transpose(seg, (2, 0, 1)) |
|
|
|
|
|
total_slices = volume.shape[0] |
|
|
|
|
|
|
|
|
tumor_slices = self._get_tumor_slices(seg) |
|
|
frame_indices = self._sample_frames(tumor_slices, total_slices) |
|
|
|
|
|
|
|
|
frames = [] |
|
|
masks = [] |
|
|
|
|
|
for idx in frame_indices: |
|
|
frame = self._process_slice(volume[idx]) |
|
|
mask = self._process_mask(seg[idx]) |
|
|
frames.append(frame) |
|
|
masks.append(mask) |
|
|
|
|
|
|
|
|
frames, masks = self._augment(frames, masks) |
|
|
|
|
|
|
|
|
bboxes = [] |
|
|
for mask in masks: |
|
|
bbox = self._get_bbox_from_mask(mask) |
|
|
if bbox is None: |
|
|
bbox = (0.0, 0.0, 1.0, 1.0) |
|
|
bboxes.append(bbox) |
|
|
|
|
|
|
|
|
|
|
|
frames_tensor = torch.stack([ |
|
|
torch.from_numpy(f).permute(2, 0, 1).float() / 255.0 |
|
|
for f in frames |
|
|
]) |
|
|
|
|
|
|
|
|
mean = torch.tensor(self.normalize_mean).view(1, 3, 1, 1) |
|
|
std = torch.tensor(self.normalize_std).view(1, 3, 1, 1) |
|
|
frames_tensor = (frames_tensor - mean) / std |
|
|
|
|
|
masks_tensor = torch.stack([ |
|
|
torch.from_numpy(m).long() for m in masks |
|
|
]) |
|
|
|
|
|
bboxes_tensor = torch.tensor(bboxes, dtype=torch.float32) |
|
|
|
|
|
return { |
|
|
'frames': frames_tensor, |
|
|
'masks': masks_tensor, |
|
|
'bboxes': bboxes_tensor, |
|
|
'frame_indices': torch.tensor(frame_indices), |
|
|
'case_name': info['case_name'], |
|
|
'num_frames': len(frame_indices), |
|
|
} |
|
|
|
|
|
|
|
|
class BraTSImageDataset(Dataset): |
|
|
""" |
|
|
BraTS数据集 - 2D切片版本,用于图像级别的训练 |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
data_root: str, |
|
|
split: str = 'train', |
|
|
modality: int = 0, |
|
|
target_size: Tuple[int, int] = (512, 512), |
|
|
augment: bool = True, |
|
|
train_ratio: float = 0.9, |
|
|
val_ratio: float = 0.1, |
|
|
test_ratio: float = 0.0, |
|
|
seed: int = 42, |
|
|
split_json: Optional[str] = None, |
|
|
only_tumor_slices: bool = True, |
|
|
normalize_mean: Tuple[float, ...] = (0.5, 0.5, 0.5), |
|
|
normalize_std: Tuple[float, ...] = (0.5, 0.5, 0.5), |
|
|
): |
|
|
super().__init__() |
|
|
|
|
|
self.data_root = Path(data_root) |
|
|
self.split = split |
|
|
self.modality = modality |
|
|
self.modality_names = ['t1c', 't1n', 't2f', 't2w'] |
|
|
self.target_size = target_size |
|
|
self.augment = augment and (split == 'train') |
|
|
self.only_tumor_slices = only_tumor_slices |
|
|
self.normalize_mean = normalize_mean |
|
|
self.normalize_std = normalize_std |
|
|
|
|
|
|
|
|
all_cases = sorted([d for d in self.data_root.iterdir() if d.is_dir()]) |
|
|
|
|
|
def _validate_ratios(tr, vr, ter): |
|
|
s = float(tr) + float(vr) + float(ter) |
|
|
if abs(s - 1.0) > 1e-6: |
|
|
raise ValueError(f"train/val/test ratios must sum to 1.0, got {s}") |
|
|
|
|
|
def _split_by_ratio(case_list: List[Path], tr: float, vr: float, ter: float, sd: int): |
|
|
_validate_ratios(tr, vr, ter) |
|
|
case_list = list(case_list) |
|
|
rng = random.Random(sd) |
|
|
rng.shuffle(case_list) |
|
|
n = len(case_list) |
|
|
n_train = int(round(n * tr)) |
|
|
n_val = int(round(n * vr)) |
|
|
n_train = min(max(n_train, 0), n) |
|
|
n_val = min(max(n_val, 0), n - n_train) |
|
|
train = case_list[:n_train] |
|
|
val = case_list[n_train : n_train + n_val] |
|
|
test = case_list[n_train + n_val :] |
|
|
return {"train": train, "val": val, "test": test} |
|
|
|
|
|
def _split_by_json(case_list: List[Path], split_file: str): |
|
|
with open(split_file, "r") as f: |
|
|
cfg = json.load(f) |
|
|
splits = cfg.get("splits", cfg) |
|
|
wanted = splits.get(self.split) |
|
|
if wanted is None: |
|
|
raise KeyError(f"split '{self.split}' not found in split json: {split_file}") |
|
|
wanted = set(wanted) |
|
|
return [c for c in case_list if c.name in wanted] |
|
|
|
|
|
if split_json: |
|
|
cases = _split_by_json(all_cases, split_json) |
|
|
else: |
|
|
parts = _split_by_ratio(all_cases, train_ratio, val_ratio, test_ratio, seed) |
|
|
if split not in parts: |
|
|
raise ValueError(f"split must be one of train/val/test, got: {split}") |
|
|
cases = parts[split] |
|
|
|
|
|
|
|
|
self.samples = self._build_sample_index(cases) |
|
|
print(f"BraTS {split} set: {len(self.samples)} slices from {len(cases)} cases") |
|
|
|
|
|
def _build_sample_index(self, cases: List[Path]) -> List[Dict]: |
|
|
"""构建切片级别的索引""" |
|
|
samples = [] |
|
|
|
|
|
for case_dir in cases: |
|
|
case_name = case_dir.name |
|
|
|
|
|
|
|
|
mod_name = self.modality_names[self.modality] |
|
|
mod_path = None |
|
|
for name in [f"{mod_name}.nii.gz", f"{case_name}-{mod_name}.nii.gz"]: |
|
|
p = case_dir / name |
|
|
if p.exists(): |
|
|
mod_path = str(p) |
|
|
break |
|
|
|
|
|
seg_path = None |
|
|
for name in ["seg.nii.gz", f"{case_name}-seg.nii.gz"]: |
|
|
p = case_dir / name |
|
|
if p.exists(): |
|
|
seg_path = str(p) |
|
|
break |
|
|
|
|
|
if mod_path is None or seg_path is None: |
|
|
continue |
|
|
|
|
|
|
|
|
seg = nib.load(seg_path).get_fdata() |
|
|
if seg.shape[0] > seg.shape[2]: |
|
|
seg = np.transpose(seg, (2, 0, 1)) |
|
|
|
|
|
for slice_idx in range(seg.shape[0]): |
|
|
has_tumor = (seg[slice_idx] > 0).any() |
|
|
|
|
|
if self.only_tumor_slices and not has_tumor: |
|
|
continue |
|
|
|
|
|
samples.append({ |
|
|
'case_name': case_name, |
|
|
'mod_path': mod_path, |
|
|
'seg_path': seg_path, |
|
|
'slice_idx': slice_idx, |
|
|
'has_tumor': has_tumor, |
|
|
}) |
|
|
|
|
|
return samples |
|
|
|
|
|
def __len__(self) -> int: |
|
|
return len(self.samples) |
|
|
|
|
|
def __getitem__(self, idx: int) -> Dict[str, Any]: |
|
|
sample = self.samples[idx] |
|
|
|
|
|
|
|
|
volume = nib.load(sample['mod_path']).get_fdata().astype(np.float32) |
|
|
seg = nib.load(sample['seg_path']).get_fdata().astype(np.float32) |
|
|
|
|
|
if volume.shape[0] > volume.shape[2]: |
|
|
volume = np.transpose(volume, (2, 0, 1)) |
|
|
seg = np.transpose(seg, (2, 0, 1)) |
|
|
|
|
|
|
|
|
slice_idx = sample['slice_idx'] |
|
|
image = volume[slice_idx] |
|
|
mask = seg[slice_idx] |
|
|
|
|
|
|
|
|
image = normalize_intensity(image) |
|
|
image = (image * 255).astype(np.uint8) |
|
|
image = np.stack([image, image, image], axis=-1) |
|
|
|
|
|
if self.target_size is not None: |
|
|
image = cv2.resize(image, self.target_size, interpolation=cv2.INTER_LINEAR) |
|
|
mask = cv2.resize(mask, self.target_size, interpolation=cv2.INTER_NEAREST) |
|
|
|
|
|
|
|
|
mask = mask.astype(np.uint8) |
|
|
|
|
|
|
|
|
if self.augment: |
|
|
if random.random() > 0.5: |
|
|
image = np.fliplr(image).copy() |
|
|
mask = np.fliplr(mask).copy() |
|
|
if random.random() > 0.5: |
|
|
image = np.flipud(image).copy() |
|
|
mask = np.flipud(mask).copy() |
|
|
|
|
|
|
|
|
bbox = self._get_bbox_from_mask(mask) |
|
|
if bbox is None: |
|
|
bbox = (0.0, 0.0, 1.0, 1.0) |
|
|
|
|
|
|
|
|
image_tensor = torch.from_numpy(image).permute(2, 0, 1).float() / 255.0 |
|
|
mean = torch.tensor(self.normalize_mean).view(3, 1, 1) |
|
|
std = torch.tensor(self.normalize_std).view(3, 1, 1) |
|
|
image_tensor = (image_tensor - mean) / std |
|
|
|
|
|
mask_tensor = torch.from_numpy(mask).long() |
|
|
bbox_tensor = torch.tensor(bbox, dtype=torch.float32) |
|
|
|
|
|
return { |
|
|
'image': image_tensor, |
|
|
'mask': mask_tensor, |
|
|
'bbox': bbox_tensor, |
|
|
'case_name': sample['case_name'], |
|
|
'slice_idx': slice_idx, |
|
|
'has_tumor': sample['has_tumor'], |
|
|
} |
|
|
|
|
|
def _get_bbox_from_mask(self, mask: np.ndarray) -> Optional[Tuple[float, float, float, float]]: |
|
|
if mask.sum() == 0: |
|
|
return None |
|
|
|
|
|
rows = np.any(mask, axis=1) |
|
|
cols = np.any(mask, axis=0) |
|
|
|
|
|
y_indices = np.where(rows)[0] |
|
|
x_indices = np.where(cols)[0] |
|
|
|
|
|
if len(y_indices) == 0 or len(x_indices) == 0: |
|
|
return None |
|
|
|
|
|
y_min, y_max = y_indices[0], y_indices[-1] |
|
|
x_min, x_max = x_indices[0], x_indices[-1] |
|
|
|
|
|
h, w = mask.shape |
|
|
return (x_min / w, y_min / h, x_max / w, y_max / h) |
|
|
|
|
|
|
|
|
def collate_fn_brats(batch: List[Dict]) -> Dict[str, Any]: |
|
|
"""自定义collate函数""" |
|
|
|
|
|
if 'frames' in batch[0]: |
|
|
return { |
|
|
'frames': torch.stack([b['frames'] for b in batch]), |
|
|
'masks': torch.stack([b['masks'] for b in batch]), |
|
|
'bboxes': torch.stack([b['bboxes'] for b in batch]), |
|
|
'case_names': [b['case_name'] for b in batch], |
|
|
} |
|
|
|
|
|
else: |
|
|
return { |
|
|
'images': torch.stack([b['image'] for b in batch]), |
|
|
'masks': torch.stack([b['mask'] for b in batch]), |
|
|
'bboxes': torch.stack([b['bbox'] for b in batch]), |
|
|
'case_names': [b['case_name'] for b in batch], |
|
|
} |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
data_root = "/data/yty/brats2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData" |
|
|
|
|
|
print("Testing BraTSVideoDataset...") |
|
|
video_ds = BraTSVideoDataset(data_root, split='train', num_frames=8) |
|
|
print(f" Total samples: {len(video_ds)}") |
|
|
|
|
|
sample = video_ds[0] |
|
|
print(f" frames shape: {sample['frames'].shape}") |
|
|
print(f" masks shape: {sample['masks'].shape}") |
|
|
print(f" bboxes shape: {sample['bboxes'].shape}") |
|
|
|
|
|
print("\nTesting BraTSImageDataset...") |
|
|
image_ds = BraTSImageDataset(data_root, split='train') |
|
|
print(f" Total slices: {len(image_ds)}") |
|
|
|
|
|
sample = image_ds[0] |
|
|
print(f" image shape: {sample['image'].shape}") |
|
|
print(f" mask shape: {sample['mask'].shape}") |
|
|
print(f" bbox: {sample['bbox']}") |
|
|
|