| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """TODO: Add a description here.""" |
|
|
| |
| import csv |
| import json |
| import os |
| from typing import Dict, List, Mapping, Optional, Set, Sequence, Tuple, Union |
|
|
| import numpy as np |
|
|
| import datasets |
| import SimpleITK as sitk |
|
|
| |
| def import_csv_data(filepath: str) -> List[Dict[str, str]]: |
| """Import all rows of CSV file.""" |
| results = [] |
| with open(filepath, encoding='utf-8') as f: |
| reader = csv.DictReader(f) |
| for line in reader: |
| results.append(line) |
| return results |
|
|
| |
| N_PATIENTS = 257 |
|
|
| |
| |
| _CITATION = """\ |
| @InProceedings{huggingface:dataset, |
| title = {A great new dataset}, |
| author={huggingface, Inc. |
| }, |
| year={2020} |
| } |
| """ |
|
|
| |
| |
| _DESCRIPTION = """\ |
| This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
| """ |
|
|
| _HOMEPAGE = "https://zenodo.org/records/10159290" |
|
|
| _LICENSE = """Creative Commons Attribution 4.0 International License \ |
| (https://creativecommons.org/licenses/by/4.0/legalcode)""" |
|
|
| |
| |
| |
| _URLS = { |
| "images":"https://zenodo.org/records/10159290/files/images.zip", |
| "masks":"https://zenodo.org/records/10159290/files/masks.zip", |
| "overview":"https://zenodo.org/records/10159290/files/overview.csv", |
| "gradings":"https://zenodo.org/records/10159290/files/radiological_gradings.csv", |
| } |
|
|
| class CustomBuilderConfig(datasets.BuilderConfig): |
| |
| def __init__( |
| self, |
| name: str = 'default', |
| version: str = '0.0.0', |
| data_dir: Optional[str] = None, |
| data_files: Optional[Union[str, Sequence, Mapping]] = None, |
| description: Optional[str] = None, |
| scan_types: List[str] = ['t1', 't2', 't2_SPACE'], |
| ): |
| super().__init__(name, version, data_dir, data_files, description) |
| self.scan_types = scan_types |
|
|
|
|
| class SPIDER(datasets.GeneratorBasedBuilder): |
| """TODO: Short description of my dataset.""" |
|
|
| VERSION = datasets.Version("1.1.0") |
|
|
| |
| |
| |
|
|
| |
| |
| BUILDER_CONFIG_CLASS = CustomBuilderConfig |
|
|
| |
| |
| |
| BUILDER_CONFIGS = [ |
| CustomBuilderConfig( |
| name="all_scan_types", |
| version=VERSION, |
| description="Use images of all scan types (t1, t2, t2 SPACE)", |
| scan_types=['t1', 't2', 't2_SPACE'], |
| ), |
| CustomBuilderConfig( |
| name="t1_scan_types", |
| version=VERSION, |
| description="Use images of t1 scan types only", |
| scan_types=['t1'], |
| ), |
| CustomBuilderConfig( |
| name="t2_scan_types", |
| version=VERSION, |
| description="Use images of t2 scan types only", |
| scan_types=['t2'], |
| ), |
| CustomBuilderConfig( |
| name="t2_SPACE_scan_types", |
| version=VERSION, |
| description="Use images of t2 SPACE scan types only", |
| scan_types=['t2_SPACE'], |
| ), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "all_scan_types" |
|
|
| def _info(self): |
| """ |
| This method specifies the datasets.DatasetInfo object which contains |
| informations and typings for the dataset. |
| """ |
| features = datasets.Features( |
| { |
| "sentence": datasets.Value("string"), |
| "option1": datasets.Value("string"), |
| "answer": datasets.Value("string") |
| |
| } |
| ) |
|
|
| return datasets.DatasetInfo( |
| |
| description=_DESCRIPTION, |
| |
| features=features, |
| |
| |
| |
| |
| homepage=_HOMEPAGE, |
| |
| license=_LICENSE, |
| |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """ |
| This method is tasked with downloading/extracting the data |
| and defining the splits depending on the configuration |
| If several configurations are possible (listed in BUILDER_CONFIGS), |
| the configuration selected by the user is in self.config.name |
| """ |
|
|
| |
| |
| |
| paths_dict = dl_manager.download_and_extract(_URLS) |
| |
| scan_types = ['t1'] |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "paths_dict": paths_dict, |
| "split": "train", |
| "scan_types": scan_types, |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "paths_dict": paths_dict, |
| "split": "validate", |
| "scan_types": scan_types, |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "paths_dict": paths_dict, |
| "split": "test", |
| "scan_types": scan_types, |
| }, |
| ), |
| ] |
|
|
| def _generate_examples( |
| self, |
| paths_dict: Dict[str, str], |
| split: str = 'train', |
| scan_types: List[str] = ['t1', 't2', 't2_SPACE'], |
| validate_share: float = 0.3, |
| test_share: float = 0.2, |
| raw_image: bool = True, |
| numeric_array: bool = True, |
| metadata: bool = True, |
| rad_gradings: bool = True, |
| random_seed: int = 9999, |
| ) -> Tuple[str, Dict]: |
| """ |
| This method handles input defined in _split_generators to yield |
| (key, example) tuples from the dataset. The `key` is for legacy reasons |
| (tfds) and is not important in itself, but must be unique for each example. |
| |
| Args |
| paths_dict: mapping of data element name to temporary file location |
| split: specify training, validation, or testing set; |
| options = 'train', 'validate', OR 'test' |
| scan_types: list of sagittal scan types to use in examples; |
| options = ['t1', 't2', 't2_SPACE'] |
| validate_share: float indicating share of data to use for validation; |
| must be in range (0.0, 1.0); note that training share is |
| calculated as (1 - validate_share - test_share) |
| test_share: float indicating share of data to use for testing; |
| must be in range (0.0, 1.0); note that training share is |
| calculated as (1 - validate_share - test_share) |
| raw_image: indicates whether to include .mha image file in example |
| numeric_array: indicates whether to include numpy numeric array of |
| image in example |
| metadata: indicates whether to include patient and scanner metadata |
| with image example |
| rad_gradings: indicates whether to include patient's radiological |
| gradings with image example |
| |
| Yields |
| Tuple (unique patient-scan ID, dict of |
| """ |
| |
| train_share = (1.0 - validate_share - test_share) |
| np.random.seed(int(random_seed)) |
|
|
| |
| for item in scan_types: |
| if item not in ['t1', 't2', 't2_SPACE']: |
| raise ValueError( |
| 'Scan type "{item}" not recognized as valid scan type.\ |
| Verify scan type argument.' |
| ) |
| if split not in ['train', 'validate', 'test']: |
| raise ValueError( |
| f'Split argument "{split}" is not recognized. \ |
| Please enter one of ["train", "validate", "test"]' |
| ) |
| if train_share <= 0.0: |
| raise ValueError( |
| f'Training share is calculated as (1 - validate_share - test_share) \ |
| and must be greater than 0. Current calculated value is \ |
| {round(train_share, 3)}. Adjust validate_share and/or \ |
| test_share parameters.' |
| ) |
| if validate_share > 1.0 or validate_share < 0.0: |
| raise ValueError( |
| f'Validation share must be between (0, 1). Current value is \ |
| {validate_share}.' |
| ) |
| if test_share > 1.0 or test_share < 0.0: |
| raise ValueError( |
| f'Testing share must be between (0, 1). Current value is \ |
| {test_share}.' |
| ) |
|
|
| |
| partition = np.random.choice( |
| ['train', 'dev', 'test'], |
| p=[train_share, validate_share, test_share], |
| size=N_PATIENTS, |
| ) |
| patient_ids = (np.arange(N_PATIENTS) + 1) |
| train_ids = set(patient_ids[partition == 'train']) |
| validate_ids = set(patient_ids[partition == 'dev']) |
| test_ids = set(patient_ids[partition == 'test']) |
| assert len(train_ids.union(validate_ids, test_ids)) == N_PATIENTS |
|
|
| |
| overview_data = import_csv_data(paths_dict['overview']) |
| grades_data = import_csv_data(paths_dict['gradings']) |
| |
| |
| overview_dict = {} |
| for item in overview_data: |
| key = item['new_file_name'] |
| overview_dict[key] = item |
| |
| |
| grades_dict = {} |
| for patient_id in patient_ids: |
| patient_grades = [ |
| x for x in grades_data if x['Patient'] == str(patient_id) |
| ] |
| if patient_grades: |
| grades_dict[str(patient_id)] = patient_grades |
| |
| |
| image_files = [ |
| file for file in os.listdir(os.path.join(paths_dict['images'], 'images')) |
| if file.endswith('.mha') |
| ] |
| assert len(image_files) > 0, "No image files found--check directory path." |
| |
| mask_files = [ |
| file for file in os.listdir(os.path.join(paths_dict['masks'], 'masks')) |
| if file.endswith('.mha') |
| ] |
| assert len(mask_files) > 0, "No mask files found--check directory path." |
| |
| |
| image_files = [ |
| file for file in image_files |
| if any(scan_type in file for scan_type in scan_types) |
| ] |
|
|
| mask_files = [ |
| file for file in mask_files |
| if any(scan_type in file for scan_type in scan_types) |
| ] |
| |
| |
| if split == 'train': |
| subset_ids = train_ids |
| elif split == 'validate': |
| subset_ids = validate_ids |
| elif split == 'test': |
| subset_ids = test_ids |
| |
| image_files = [ |
| file for file in image_files |
| if any(str(patient_id) in file.split('_')[0] for patient_id in subset_ids) |
| ] |
| |
| mask_files = [ |
| file for file in mask_files |
| if any(str(patient_id) in file.split('_')[0] for patient_id in subset_ids) |
| ] |
| assert len(image_files) == len(mask_files), "The number of image files\ |
| does not match the number of mask files--verify subsetting operation." |
| |
| |
| |
| |
| np.random.shuffle(image_files) |
| |
| |
| |
| for example in image_files: |
| |
| |
| scan_id = example.replace('.mha', '') |
| patient_id = scan_id.split('_')[0] |
| scan_type = '_'.join(scan_id.split('_')[1:]) |
|
|
| |
| image_path = os.path.join(paths_dict['images'], 'images', example) |
| image = sitk.ReadImage(image_path) |
| |
| |
| image_array = sitk.GetArrayFromImage(image) |
| |
| |
| image_overview = overview_dict[scan_id] |
|
|
| |
| patient_grades_dict = {} |
| for item in grades_dict[patient_id]: |
| key = f'IVD{item["IVD label"]}' |
| value = { |
| k:v for k,v in item.items() |
| if k not in ['Patient', 'IVD label'] |
| } |
| patient_grades_dict[key] = value |
| |
| |
| return_dict = {'patient_id':patient_id, 'scan_type':scan_type} |
| if raw_image: |
| return_dict['raw_image'] = image |
| if numeric_array: |
| return_dict['numeric_array'] = image_array |
| if metadata: |
| return_dict['metadata'] = image_overview |
| if rad_gradings: |
| return_dict['rad_gradings'] = patient_grades_dict |
| |
| |
| yield (scan_id, return_dict) |
|
|