File size: 6,048 Bytes
9af4e54 7b444c6 b20046d 9af4e54 7b444c6 d39534d d7fde79 7b444c6 38ed1c4 7b444c6 d22402a 6922ee9 38ed1c4 6b79144 38ed1c4 d22402a 38ed1c4 7b444c6 b20046d 7b444c6 38ed1c4 075e02c 38ed1c4 6922ee9 b20046d 6b79144 075e02c 8d4d1ad 075e02c b20046d 6922ee9 7b444c6 9a3fd46 7b444c6 5b81cf0 7b444c6 d39534d 7b444c6 1420516 7b444c6 d39534d 7b444c6 d39534d 7b444c6 38ed1c4 7b444c6 075e02c 38ed1c4 b20046d 075e02c 60dc30d 075e02c e2af673 0d9d733 6922ee9 b20046d 6922ee9 b20046d 6b79144 075e02c b20046d 0d9d733 6922ee9 b20046d 6922ee9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import datasets
from typing import List
import pandas as pd
import numpy as np
from pathlib import Path
import os
_DESCRIPTION = "Prostate dataset."
class Prostate158Dataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="2d",
version=VERSION,
description="Return all the dataset in a 2d image format",
),
# datasets.BuilderConfig(
# name="2d_array",
# version=VERSION,
# description="Return all the dataset in a 2d array format",
# ),
# datasets.BuilderConfig(
# name="3d_array",
# version=VERSION,
# description="Return all the dataset in a 2d array format",
# ),
datasets.BuilderConfig(
name="3d_path",
version=VERSION,
description="Return all the dataset in a 3d path format",
),
]
DEFAULT_CONFIG_NAME = "3d_path"
def _info(self):
if self.config.name == "2d":
features = datasets.Features(
{
"t2": datasets.Image(),
"adc": datasets.Image(),
"dwi": datasets.Image(),
"t2_anatomy_reader1": datasets.Image(),
"adc_tumor_reader1": datasets.Image(),
}
)
# elif self.config.name == "2d_array":
# features = datasets.Features(
# {
# "t2": datasets.Array2D(shape=(200,200), dtype="int"),
# "adc": datasets.Array2D(shape=(200,200), dtype="int"),
# "dwi": datasets.Array2D(shape=(200,200), dtype="int"),
# "t2_anatomy_reader1": datasets.Array2D(shape=(200,200), dtype="int"),
# "adc_tumor_reader1": datasets.Array2D(shape=(200,200), dtype="int"),
# }
# )
elif self.config.name == "3d_path":
features = datasets.Features(
{
"t2_path": datasets.Value(dtype="string"),
"adc_path": datasets.Value(dtype="string"),
"dwi_path": datasets.Value(dtype="string"),
"t2_anatomy_reader1_path": datasets.Value(dtype="string"),
"adc_tumor_reader1_path": datasets.Value(dtype="string"),
}
)
# elif self.config.name == "3d_array":
# features = datasets.Features(
# {
# "t2": datasets.Array3D(shape=(200,200,20), dtype="int"),
# "adc": datasets.Array3D(shape=(200,200,20), dtype="int"),
# "dwi": datasets.Array3D(shape=(200,200,20), dtype="int"),
# "t2_anatomy_reader1": datasets.Array3D(shape=(200,200,20), dtype="int"),
# "adc_tumor_reader1": datasets.Array3D(shape=(200,200,20), dtype="int"),
# }
# )
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
)
def _split_generators(
self,
dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
downloaded_files = dl_manager.download_and_extract("data.zip")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"split": "train",
"downloaded_files": Path(downloaded_files),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"split": "valid",
"downloaded_files": Path(downloaded_files),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"split": "test",
"downloaded_files": Path(downloaded_files),
},
),
]
def _generate_examples(self, split, downloaded_files):
images_list = ["t2", "adc", "dwi", "t2_anatomy_reader1", "adc_tumor_reader1"]
df = pd.read_csv(downloaded_files / f"{split}.csv")
if self.config.name == "2d":
import nibabel as nib
from PIL import Image # moving imports here to not require unnecessary packages to other users
yield_index = -1
for row in df.to_dict(orient="records"):
images_data = {image_name: nib.load(downloaded_files / row[image_name]).get_fdata() for image_name in images_list}
for i in range(images_data["t2"].shape[2]):
yield_index += 1
yield yield_index, {image_name: Image.fromarray(image[:, :, i]) for image_name, image in images_data.items()}
# elif self.config.name == "2d_array":
# import nibabel as nib
# yield_index = -1
# for row in df.to_dict(orient="records"):
# images_data = {image_name: nib.load(downloaded_files / row[image_name]).get_fdata() for image_name in images_list}
# for i in range(images_data["t2"].shape[2]):
# yield_index += 1
# yield yield_index, {image_name: image[:, :, i] for image_name, image in images_data.items()}
elif self.config.name == "3d_path":
for idx, row in enumerate(df.to_dict(orient="records")):
yield idx, {image_name+"_path": downloaded_files / row[image_name] for image_name in images_list}
# elif self.config.name == "3d_array":
# import nibabel as nib
# for idx, row in enumerate(df.to_dict(orient="records")):
# yield idx, {image_name: nib.load(downloaded_files / row[image_name]).get_fdata() for image_name in images_list}
|