Delete .virtual_documents
Browse files
.virtual_documents/data/Untitled.ipynb
DELETED
|
@@ -1,11 +0,0 @@
|
|
| 1 |
-
import pandas as pd
|
| 2 |
-
|
| 3 |
-
# get FCD ids
|
| 4 |
-
df = pd.read_csv("participants.tsv", sep="\t")
|
| 5 |
-
fcd_ids = df[df['group']=='fcd']['participant_id']
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.virtual_documents/data/data_tests.ipynb
DELETED
|
@@ -1,255 +0,0 @@
|
|
| 1 |
-
import pandas as pd
|
| 2 |
-
|
| 3 |
-
# get FCD ids
|
| 4 |
-
df = pd.read_csv("participants.tsv", sep="\t")
|
| 5 |
-
fcd_ids = df[df['group']=='fcd']['participant_id']
|
| 6 |
-
|
| 7 |
-
# get train/test
|
| 8 |
-
train = df[(df['group'] == 'fcd') & (df['split'] == 'train')]
|
| 9 |
-
print(len(train))
|
| 10 |
-
|
| 11 |
-
# update with new splits
|
| 12 |
-
|
| 13 |
-
# filter FCD samples
|
| 14 |
-
fcd_df = df[df['group'] == 'fcd'].copy()
|
| 15 |
-
# shuffle indices
|
| 16 |
-
fcd_df = fcd_df.sample(frac=1, random_state=42).reset_index(drop=True)
|
| 17 |
-
# compute split index
|
| 18 |
-
n_train = int(len(fcd_df) * 0.9)
|
| 19 |
-
# assign split labels
|
| 20 |
-
fcd_df.loc[:n_train-1, 'split'] = 'train'
|
| 21 |
-
fcd_df.loc[n_train:, 'split'] = 'val'
|
| 22 |
-
#update
|
| 23 |
-
df.loc[fcd_df.index, 'split'] = fcd_df['split']
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
# new train amount
|
| 27 |
-
train = df[(df['group'] == 'fcd') & (df['split'] == 'train')]
|
| 28 |
-
print(len(train))
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
# get train ids
|
| 32 |
-
train_ids = df[(df['group'] == 'fcd') & (df['split'] == 'train')]['participant_id']
|
| 33 |
-
train_ids.head()
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
val = df[(df['group'] == 'fcd') & (df['split'] == 'val')]
|
| 37 |
-
print(len(val))
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
# get train ids
|
| 41 |
-
val_ids = df[(df['group'] == 'fcd') & (df['split'] == 'val')]['participant_id']
|
| 42 |
-
val_ids.head()
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
from pathlib import Path
|
| 46 |
-
import re
|
| 47 |
-
|
| 48 |
-
masks = 0
|
| 49 |
-
t1w = 0
|
| 50 |
-
flair = 0
|
| 51 |
-
|
| 52 |
-
# how many have masks
|
| 53 |
-
for id in fcd_ids:
|
| 54 |
-
folder = Path(f"{id}/anat/")
|
| 55 |
-
pattern = re.compile(r".*_roi\.nii\.gz$") # ← regex: any name ending exactly in “_roi.nii.gz”
|
| 56 |
-
roi_exists = any(pattern.fullmatch(p.name) for p in folder.iterdir())
|
| 57 |
-
if roi_exists:
|
| 58 |
-
masks += 1
|
| 59 |
-
|
| 60 |
-
# how many have t1w
|
| 61 |
-
for id in fcd_ids:
|
| 62 |
-
folder = Path(f"{id}/anat/")
|
| 63 |
-
pattern = re.compile(r".*_T1w\.nii\.gz$") # ← regex: any name ending exactly in “_roi.nii.gz”
|
| 64 |
-
roi_exists = any(pattern.fullmatch(p.name) for p in folder.iterdir())
|
| 65 |
-
if roi_exists:
|
| 66 |
-
t1w += 1
|
| 67 |
-
|
| 68 |
-
# how many have flair
|
| 69 |
-
for id in fcd_ids:
|
| 70 |
-
folder = Path(f"{id}/anat/")
|
| 71 |
-
pattern = re.compile(r".*_FLAIR\.nii\.gz$") # ← regex: any name ending exactly in “_roi.nii.gz”
|
| 72 |
-
roi_exists = any(pattern.fullmatch(p.name) for p in folder.iterdir())
|
| 73 |
-
if roi_exists:
|
| 74 |
-
flair += 1
|
| 75 |
-
|
| 76 |
-
print(masks, t1w, flair)
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
import os, nibabel, torch, numpy as np
|
| 80 |
-
import torch.nn as nn
|
| 81 |
-
from torch.utils.data import Dataset
|
| 82 |
-
import pandas as pd
|
| 83 |
-
import re
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
class InpaintVolumes(Dataset):
|
| 87 |
-
"""
|
| 88 |
-
Y : float32 [C, D, H, W] full multi-modal MRI stack
|
| 89 |
-
M : float32 [1, D, H, W] binary in-painting mask (shared by all mods)
|
| 90 |
-
Y_void = Y * (1 - M) context with lesion region blanked
|
| 91 |
-
name : identifier string
|
| 92 |
-
"""
|
| 93 |
-
|
| 94 |
-
# ------------------------------------------------------------
|
| 95 |
-
def __init__(self,
|
| 96 |
-
root_dir: str,
|
| 97 |
-
subset: str = 'train', # 'train' | 'val'
|
| 98 |
-
img_size: int = 256, # 128 or 256 cube
|
| 99 |
-
modalities: tuple = ('T1w',), # order defines channel order
|
| 100 |
-
normalize=None):
|
| 101 |
-
super().__init__()
|
| 102 |
-
self.root_dir = os.path.expanduser(root_dir)
|
| 103 |
-
self.subset = subset
|
| 104 |
-
self.img_size = img_size
|
| 105 |
-
self.modalities = modalities
|
| 106 |
-
self.normalize = normalize or (lambda x: x)
|
| 107 |
-
self.cases = self._index_cases() # ⇒ list[dict]
|
| 108 |
-
|
| 109 |
-
# ------------------------------------------------------------
|
| 110 |
-
def _index_cases(self):
|
| 111 |
-
"""
|
| 112 |
-
Build a list like:
|
| 113 |
-
{'img': {'T1w': path, 'FLAIR': path, ...},
|
| 114 |
-
'mask': path,
|
| 115 |
-
'name': case_id}
|
| 116 |
-
Edit only this block to suit your folder / filename scheme.
|
| 117 |
-
"""
|
| 118 |
-
cases = []
|
| 119 |
-
|
| 120 |
-
# metadata
|
| 121 |
-
df = pd.read_csv(f"{self.root_dir}/participants.tsv", sep="\t")
|
| 122 |
-
# update with new splits
|
| 123 |
-
|
| 124 |
-
# filter FCD samples
|
| 125 |
-
fcd_df = df[df['group'] == 'fcd'].copy()
|
| 126 |
-
# shuffle indices
|
| 127 |
-
fcd_df = fcd_df.sample(frac=1, random_state=42).reset_index(drop=True)
|
| 128 |
-
# compute split index
|
| 129 |
-
n_train = int(len(fcd_df) * 0.9)
|
| 130 |
-
# assign split labels
|
| 131 |
-
fcd_df.loc[:n_train-1, 'split'] = 'train'
|
| 132 |
-
fcd_df.loc[n_train:, 'split'] = 'val'
|
| 133 |
-
#update
|
| 134 |
-
df.loc[fcd_df.index, 'split'] = fcd_df['split']
|
| 135 |
-
|
| 136 |
-
missing = []
|
| 137 |
-
|
| 138 |
-
for participant_id in df[(df['split']==self.subset) & (df['group']=='fcd')]['participant_id']:
|
| 139 |
-
case_dir = f"{self.root_dir}/{participant_id}/anat/"
|
| 140 |
-
files = os.listdir(case_dir)
|
| 141 |
-
|
| 142 |
-
img_dict = {}
|
| 143 |
-
for mod in self.modalities:
|
| 144 |
-
pattern = re.compile(rf"^{re.escape(participant_id)}.*{re.escape(mod)}\.nii\.gz$")
|
| 145 |
-
matches = [f for f in files if pattern.match(f)]
|
| 146 |
-
assert matches, f"Missing {mod} for {participant_id} in {case_dir}"
|
| 147 |
-
img_dict[mod] = os.path.join(case_dir, matches[0])
|
| 148 |
-
|
| 149 |
-
mask_matches = [f for f in files if re.match(rf"^{re.escape(participant_id)}.*roi\.nii\.gz$", f)]
|
| 150 |
-
mask_path = os.path.join(case_dir, mask_matches[0])
|
| 151 |
-
|
| 152 |
-
cases.append({'img': img_dict, 'mask': mask_path, 'name': participant_id})
|
| 153 |
-
|
| 154 |
-
return cases
|
| 155 |
-
|
| 156 |
-
# ------------------------------------------------------------
|
| 157 |
-
def _pad_to_cube(self, vol, fill=0.0):
|
| 158 |
-
"""Symmetric 3-D pad to [img_size³]. `vol` is [*, D, H, W]."""
|
| 159 |
-
D, H, W = vol.shape[-3:]
|
| 160 |
-
pad_D, pad_H, pad_W = self.img_size - D, self.img_size - H, self.img_size - W
|
| 161 |
-
pad = (pad_W // 2, pad_W - pad_W // 2,
|
| 162 |
-
pad_H // 2, pad_H - pad_H // 2,
|
| 163 |
-
pad_D // 2, pad_D - pad_D // 2)
|
| 164 |
-
return nn.functional.pad(vol, pad, value=fill)
|
| 165 |
-
|
| 166 |
-
# ------------------------------------------------------------
|
| 167 |
-
def __getitem__(self, idx):
|
| 168 |
-
rec = self.cases[idx]
|
| 169 |
-
name = rec['name']
|
| 170 |
-
|
| 171 |
-
# ---------- load C modalities --------------------------
|
| 172 |
-
vols = []
|
| 173 |
-
for mod in self.modalities:
|
| 174 |
-
mod_path = rec['img'][mod]
|
| 175 |
-
arr = nibabel.load(mod_path).get_fdata().astype(np.float32)
|
| 176 |
-
|
| 177 |
-
# robust min-max clipping and normalization
|
| 178 |
-
lo, hi = np.quantile(arr, [0.001, 0.999])
|
| 179 |
-
arr = np.clip(arr, lo, hi)
|
| 180 |
-
arr = (arr - lo) / (hi - lo + 1e-6)
|
| 181 |
-
|
| 182 |
-
vols.append(torch.from_numpy(arr))
|
| 183 |
-
|
| 184 |
-
first_mod = self.modalities[0]
|
| 185 |
-
nii_obj = nibabel.load(rec['img'][first_mod])
|
| 186 |
-
affine = nii_obj.affine
|
| 187 |
-
|
| 188 |
-
Y = torch.stack(vols, dim=0) # [C, D, H, W]
|
| 189 |
-
|
| 190 |
-
# ---------- load mask ----------------------------------
|
| 191 |
-
M_arr = nibabel.load(rec['mask']).get_fdata().astype(np.uint8)
|
| 192 |
-
M = torch.from_numpy(M_arr).unsqueeze(0) # [1, D, H, W]
|
| 193 |
-
M = (M > 0).to(Y.dtype)
|
| 194 |
-
|
| 195 |
-
# ---------- pad (and optional downsample) --------------
|
| 196 |
-
Y = self._pad_to_cube(Y, fill=0.0)
|
| 197 |
-
M = self._pad_to_cube(M, fill=0.0)
|
| 198 |
-
if self.img_size == 128:
|
| 199 |
-
pool = nn.AvgPool3d(2, 2)
|
| 200 |
-
Y = pool(Y); M = pool(M)
|
| 201 |
-
|
| 202 |
-
# ---------- derive context image -----------------------
|
| 203 |
-
Y_void = Y * (1 - M)
|
| 204 |
-
|
| 205 |
-
return Y, M, Y_void, name, affine # shapes: [C, D, H, W], [1, D, H, W], [C, D, H, W], ...
|
| 206 |
-
|
| 207 |
-
# ------------------------------------------------------------
|
| 208 |
-
def __len__(self):
|
| 209 |
-
return len(self.cases)
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
# Test cell ───────────────────────────────────────────────────
|
| 213 |
-
from pathlib import Path
|
| 214 |
-
from torch.utils.data import DataLoader
|
| 215 |
-
import numpy as np
|
| 216 |
-
|
| 217 |
-
root = Path("") # ← update
|
| 218 |
-
dl = InpaintVolumes(
|
| 219 |
-
root_dir=root,
|
| 220 |
-
subset='train',
|
| 221 |
-
img_size=256,
|
| 222 |
-
modalities=('T1w',))
|
| 223 |
-
|
| 224 |
-
loader = DataLoader(dl, batch_size=1, shuffle=True, num_workers=0)
|
| 225 |
-
|
| 226 |
-
print(type(loader))
|
| 227 |
-
|
| 228 |
-
# grab one sample
|
| 229 |
-
Y, M, Y_void, name, affine = next(iter(loader))
|
| 230 |
-
|
| 231 |
-
print("case:", name[0])
|
| 232 |
-
print("Y :", Y.shape, Y.dtype, f"min={Y.min():.3f}", f"max={Y.max():.3f}")
|
| 233 |
-
print("M :", M.shape, M.dtype, f"mask voxels={M.sum().item()}")
|
| 234 |
-
print("Y_void :", Y_void.shape, Y_void.dtype)
|
| 235 |
-
|
| 236 |
-
# quick assertions
|
| 237 |
-
C = len(dl.modalities)
|
| 238 |
-
print(Y.shape[1:], M.shape[1:])
|
| 239 |
-
assert Y.shape[1:] == M.shape[1:], "spatial mismatch"
|
| 240 |
-
assert Y.shape[0] == C, "channel count mismatch"
|
| 241 |
-
assert torch.allclose(Y * (1-M), Y_void), "Y_void incorrect"
|
| 242 |
-
|
| 243 |
-
print(affine)
|
| 244 |
-
|
| 245 |
-
print("✅ dataloader sanity-check passed")
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
nibabel.save(nibabel.Nifti1Image(Y[0].numpy(), affine[0]),
|
| 249 |
-
f"{name[0]}_T1w_proc.nii.gz")
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|