code stringlengths 17 6.64M |
|---|
def mean_merge_fn(planes: list):
return np.stack(planes).mean(axis=0)
|
class AssembleInteractionFn():
' Function interface enabling interaction with the `index_expression` and the `data` before it gets added to the\n assembled `prediction` in :class:`.SubjectAssembler`.\n\n\n .. automethod:: __call__\n '
def __call__(self, key, data, index_expr, **kwargs):
'\n\n Args:\n key (str): The identifier or key of the data.\n data (numpy.ndarray): The data.\n index_expr (.IndexExpression): The current index_expression that might be modified.\n **kwargs (dict): Any other arguments\n\n Returns:\n tuple: Modified `data` and modified `index_expression`\n\n '
raise NotImplementedError()
|
class ApplyTransformInteractionFn(AssembleInteractionFn):
def __init__(self, transform: tfm.Transform) -> None:
self.transform = transform
def __call__(self, key, data, index_expr, **kwargs):
temp = tfm.raise_error_if_entry_not_extracted
tfm.raise_error_entry_not_extracted = False
ret = self.transform({key: data, defs.KEY_INDEX_EXPR: index_expr})
tfm.raise_error_entry_not_extracted = temp
return (ret[key], ret[defs.KEY_INDEX_EXPR])
|
class PlaneSubjectAssembler(Assembler):
def __init__(self, datasource: extr.PymiaDatasource, merge_fn=mean_merge_fn, zero_fn=numpy_zeros):
"Assembles predictions of one or multiple subjects where predictions are made in all three planes.\n\n This class assembles the prediction from all planes (axial, coronal, sagittal) and merges the prediction\n according to :code:`merge_fn`.\n\n Assumes that the network output, i.e. to_assemble, is of shape (B, ..., C)\n where B is the batch size and C is the numbers of channels (must be at least 1) and ... refers to an arbitrary image\n dimension.\n\n Args:\n datasource (.PymiaDatasource): The datasource\n merge_fn: A function that processes a sample.\n Args: planes: list with the assembled prediction for all planes.\n Returns: Merged numpy.ndarray\n zero_fn: A function that initializes the numpy array to hold the predictions.\n Args: shape: tuple with the shape of the subject's labels, id: str identifying the subject.\n Returns: A np.ndarray\n "
self.datasource = datasource
self.planes = {}
self._subjects_ready = set()
self.zero_fn = zero_fn
self.merge_fn = merge_fn
@property
def subjects_ready(self):
'see :meth:`Assembler.subjects_ready`'
return self._subjects_ready.copy()
def add_batch(self, to_assemble: typing.Union[(np.ndarray, typing.Dict[(str, np.ndarray)])], sample_indices: np.ndarray, last_batch=False, **kwargs):
'see :meth:`Assembler.add_batch`'
if (not isinstance(to_assemble, dict)):
to_assemble = {'__prediction': to_assemble}
sample_indices = sample_indices.tolist()
for (batch_idx, sample_idx) in enumerate(sample_indices):
(subject_index, index_expression) = self.datasource.indices[sample_idx]
plane_dimension = self._get_plane_dimension(index_expression)
if (plane_dimension not in self.planes):
self.planes[plane_dimension] = SubjectAssembler(self.datasource, self.zero_fn)
indexing = index_expression.get_indexing()
if (not isinstance(indexing, list)):
indexing = [indexing]
extractor = extr.ImagePropertyShapeExtractor(numpy_format=True)
required_plane_shape = self.datasource.direct_extract(extractor, subject_index)[defs.KEY_SHAPE]
index_at_plane = indexing[plane_dimension]
if isinstance(index_at_plane, tuple):
required_off_plane_size = (index_at_plane[1] - index_at_plane[0])
required_plane_shape[plane_dimension] = required_off_plane_size
else:
required_plane_shape.pop(plane_dimension)
transform = tfm.SizeCorrection(tuple(required_plane_shape), entries=tuple(to_assemble.keys()))
self.planes[plane_dimension].assemble_interaction_fn = ApplyTransformInteractionFn(transform)
self.planes[plane_dimension].add_sample(to_assemble, batch_idx, sample_idx)
ready = None
for plane_assembler in self.planes.values():
if last_batch:
plane_assembler.end()
if (ready is None):
ready = set(plane_assembler.subjects_ready)
else:
ready.intersection_update(plane_assembler.subjects_ready)
self._subjects_ready = ready
def get_assembled_subject(self, subject_index: int):
'see :meth:`Assembler.get_assembled_subject`'
try:
self._subjects_ready.remove(subject_index)
except KeyError:
if (subject_index not in self.planes[0].predictions):
raise ValueError('Subject with index {} not in assembler'.format(subject_index))
assembled = {}
for plane in self.planes.values():
ret_val = plane.get_assembled_subject(subject_index)
if (not isinstance(ret_val, dict)):
ret_val = {'__prediction': ret_val}
for (key, value) in ret_val.items():
assembled.setdefault(key, []).append(value)
for key in assembled:
assembled[key] = self.merge_fn(assembled[key])
if ('__prediction' in assembled):
return assembled['__prediction']
return assembled
@staticmethod
def _get_plane_dimension(index_expr):
for (i, entry) in enumerate(index_expr.expression):
if isinstance(entry, int):
return i
if (isinstance(entry, slice) and (entry != slice(None))):
return i
|
class Subject2dAssembler(Assembler):
def __init__(self, datasource: extr.PymiaDatasource) -> None:
'Assembles predictions of two-dimensional images.\n\n Two-dimensional images do not specifically require assembling. For pipeline compatibility reasons this class provides\n , nevertheless, a implementation for the two-dimensional case.\n \n Args:\n datasource (.PymiaDatasource): The datasource\n '
super().__init__()
self.datasource = datasource
self._subjects_ready = set()
self.predictions = {}
@property
def subjects_ready(self):
'see :meth:`Assembler.subjects_ready`'
return self._subjects_ready.copy()
def add_batch(self, to_assemble: typing.Union[(np.ndarray, typing.Dict[(str, np.ndarray)])], sample_indices: np.ndarray, last_batch=False, **kwargs):
'see :meth:`Assembler.add_batch`'
if (not isinstance(to_assemble, dict)):
to_assemble = {'__prediction': to_assemble}
sample_indices = sample_indices.tolist()
for (batch_idx, sample_idx) in enumerate(sample_indices):
(subject_index, _) = self.datasource.indices[sample_idx]
for key in to_assemble:
self.predictions.setdefault(subject_index, {})[key] = to_assemble[key][batch_idx]
self._subjects_ready.add(subject_index)
def get_assembled_subject(self, subject_index):
'see :meth:`Assembler.get_assembled_subject`'
try:
self._subjects_ready.remove(subject_index)
except KeyError:
if (subject_index not in self.predictions):
raise ValueError('Subject with index {} not in assembler'.format(subject_index))
assembled = self.predictions.pop(subject_index)
if ('__prediction' in assembled):
return assembled['__prediction']
return assembled
|
class RandomCrop(tfm.Transform):
def __init__(self, shape: typing.Union[(int, tuple)], axis: typing.Union[(int, tuple)]=None, p: float=1.0, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)):
"Randomly crops the sample to the specified shape.\n\n The sample shape must be bigger than the crop shape.\n\n Notes:\n A probability lower than 1.0 might make not much sense because it results in inconsistent output dimensions.\n\n Args:\n shape (int, tuple): The shape of the sample after the cropping.\n If axis is not defined, the cropping will be applied from the first dimension onwards of the sample.\n Use None to exclude an axis or define axis to specify the axis/axes to crop.\n E.g.:\n\n - shape=256 with the default axis parameter results in a shape of 256 x ...\n - shape=(256, 128) with the default axis parameter results in a shape of 256 x 128 x ...\n - shape=(None, 256) with the default axis parameter results in a shape of <as before> x 256 x ...\n - shape=(256, 128) with axis=(1, 0) results in a shape of 128 x 256 x ...\n - shape=(None, 128, 256) with axis=(1, 2, 0) results in a shape of 256 x <as before> x 256 x ...\n axis (int, tuple): Axis or axes to which the shape int or tuple correspond(s) to.\n If defined, must have the same length as shape.\n p (float): The probability of the cropping to be applied.\n entries (tuple): The sample's entries to apply the cropping to.\n "
super().__init__()
if isinstance(shape, int):
shape = (shape,)
if (axis is None):
axis = tuple(range(len(shape)))
if isinstance(axis, int):
axis = (axis,)
if (len(axis) != len(shape)):
raise ValueError('If specified, the axis parameter must be of the same length as the shape')
self.axis = tuple([a for (a, s) in zip(axis, shape) if (s is not None)])
self.shape = tuple([s for s in shape if (s is not None)])
self.p = p
self.entries = entries
def __call__(self, sample: dict) -> dict:
if (self.p < np.random.random()):
return sample
for entry in self.entries:
if (entry not in sample):
raise ValueError(tfm.ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
anchors = [np.random.randint(0, (sample[self.entries[0]].shape[a] - s)) for (a, s) in zip(self.axis, self.shape)]
for entry in self.entries:
for (axis, new_axis_size, anchor) in zip(self.axis, self.shape, anchors):
sample[entry] = np.take(sample[entry], range(anchor, (anchor + new_axis_size)), axis)
return sample
|
class RandomElasticDeformation(tfm.Transform):
def __init__(self, num_control_points: int=4, deformation_sigma: float=5.0, interpolators: tuple=(sitk.sitkBSpline, sitk.sitkNearestNeighbor), spatial_rank: int=2, fill_value: float=0.0, p: float=0.5, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)):
"Randomly transforms the sample elastically.\n\n Notes:\n The code bases on NiftyNet's RandomElasticDeformationLayer class (version 0.3.0).\n\n Warnings:\n Always inspect the results of this transform on some samples (especially for 3-D data).\n\n Args:\n num_control_points (int): The number of control points for the b-spline mesh.\n deformation_sigma (float): The maximum deformation along the deformation mesh.\n interpolators (tuple): The SimpleITK interpolators to use for each entry in entries.\n spatial_rank (int): The spatial rank (dimension) of the sample.\n fill_value (float): The fill value for the resampling.\n p (float): The probability of the elastic transformation to be applied.\n entries (tuple): The sample's entries to apply the elastic transformation to.\n "
super().__init__()
if (len(interpolators) != len(entries)):
raise ValueError('interpolators must have the same length as entries')
self.num_control_points = max(num_control_points, 2)
self.deformation_sigma = max(deformation_sigma, 1)
self.spatial_rank = spatial_rank
self.interpolators = interpolators
self.fill_value = fill_value
self.p = p
self.entries = entries
def __call__(self, sample: dict) -> dict:
if (self.p < np.random.random()):
return sample
for entry in self.entries:
if (entry not in sample):
raise ValueError(tfm.ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
shape = sample[self.entries[0]].shape[:self.spatial_rank]
img = sitk.GetImageFromArray(np.zeros(shape))
transform_mesh_size = ([self.num_control_points] * img.GetDimension())
bspline_transformation = sitk.BSplineTransformInitializer(img, transform_mesh_size)
params = bspline_transformation.GetParameters()
params = np.asarray(params, dtype=np.float)
params += (np.random.randn(params.shape[0]) * self.deformation_sigma)
params = tuple(params)
bspline_transformation.SetParameters(tuple(params))
for (interpolator_idx, entry) in enumerate(self.entries):
data = sample[entry]
for channel in range(data.shape[(- 1)]):
img = sitk.GetImageFromArray(data[(..., channel)])
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(img)
resampler.SetInterpolator(self.interpolators[interpolator_idx])
resampler.SetDefaultPixelValue(self.fill_value)
resampler.SetTransform(bspline_transformation)
img_deformed = resampler.Execute(img)
sample[entry][(..., channel)] = sitk.GetArrayFromImage(img_deformed)
return sample
|
class RandomMirror(tfm.Transform):
def __init__(self, axis: int=(- 2), p: float=1.0, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)):
"Randomly mirrors the sample along a given axis.\n\n Args:\n p (float): The probability of the mirroring to be applied.\n axis (int): The axis to apply the mirroring.\n entries (tuple): The sample's entries to apply the mirroring to.\n "
super().__init__()
self.axis = axis
self.p = p
self.entries = entries
def __call__(self, sample: dict) -> dict:
if (self.p < np.random.random()):
return sample
for entry in self.entries:
if (entry not in sample):
raise ValueError(tfm.ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
sample[entry] = np.flip(sample[entry], self.axis).copy()
return sample
|
class RandomRotation90(tfm.Transform):
def __init__(self, axes: typing.Tuple[int]=((- 3), (- 2)), p: float=1.0, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)):
"Randomly rotates the sample 90, 180, or 270 degrees in the plane specified by axes.\n\n Raises:\n UserWarning: If the plane to rotate is not rectangular.\n\n Args:\n axes (tuple): The sample is rotated in the plane defined by the axes.\n Axes must be of length two and different.\n p (float): The probability of the rotation to be applied.\n entries (tuple): The sample's entries to apply the rotation to.\n "
super().__init__()
if (len(axes) != 2):
raise ValueError('axes must be of length two')
self.axes = axes
self.p = p
self.entries = entries
def __call__(self, sample: dict) -> dict:
if (self.p < np.random.random()):
return sample
k = np.random.randint(1, 4)
for entry in self.entries:
if (entry not in sample):
raise ValueError(tfm.ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
if (sample[entry].shape[self.axes[0]] != sample[entry].shape[self.axes[1]]):
warnings.warn(f'entry "{entry}" has unequal in-plane dimensions ({sample[entry].shape[self.axes[0]]}, {sample[entry].shape[self.axes[1]]}). Random 90 degree rotation might produce undesired results. Verify the output!', RuntimeWarning)
sample[entry] = np.rot90(sample[entry], k, axes=self.axes).copy()
return sample
|
class RandomShift(tfm.Transform):
def __init__(self, shift: typing.Union[(int, tuple)], axis: typing.Union[(int, tuple)]=None, p: float=1.0, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)):
"Randomly shifts the sample along axes by a value from the interval [-p * size(axis), +p * size(axis)],\n where p is the percentage of shifting and size(axis) is the size along an axis.\n\n Args:\n shift (int, tuple): The percentage of shifting of the axis' size.\n If axis is not defined, the shifting will be applied from the first dimension onwards of the sample.\n Use None to exclude an axis or define axis to specify the axis/axes to crop.\n E.g.:\n\n - shift=0.2 with the default axis parameter shifts the sample along the 1st axis.\n - shift=(0.2, 0.1) with the default axis parameter shifts the sample along the 1st and 2nd axes.\n - shift=(None, 0.2) with the default axis parameter shifts the sample along the 2st axis.\n - shift=(0.2, 0.1) with axis=(1, 0) shifts the sample along the 1st and 2nd axes.\n - shift=(None, 0.1, 0.2) with axis=(1, 2, 0) shifts the sample along the 1st and 3rd axes.\n axis (int, tuple): Axis or axes to which the shift int or tuple correspond(s) to.\n If defined, must have the same length as shape.\n p (float): The probability of the shift to be applied.\n entries (tuple): The sample's entries to apply the shifting to.\n "
super().__init__()
if isinstance(shift, int):
shift = (shift,)
if (axis is None):
axis = tuple(range(len(shift)))
if isinstance(axis, int):
axis = (axis,)
if (len(axis) != len(shift)):
raise ValueError('If specified, the axis parameter must be of the same length as the shift')
self.axis = tuple([a for (a, s) in zip(axis, shift) if (s is not None)])
self.shift = tuple([s for s in shift if (s is not None)])
self.p = p
self.entries = entries
def __call__(self, sample: dict) -> dict:
if (self.p < np.random.random()):
return sample
for entry in self.entries:
if (entry not in sample):
raise ValueError(tfm.ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
shifts_maximums = [int((s * sample[self.entries[0]].shape[a])) for (a, s) in zip(self.axis, self.shift)]
shifts = [(np.random.randint((- s_max), s_max) if (s_max != 0) else 0) for s_max in shifts_maximums]
for entry in self.entries:
for (axis, shift) in zip(self.axis, shifts):
sample[entry] = np.roll(sample[entry], shift, axis)
return sample
|
class PytorchDatasetAdapter(torch_data.Dataset):
def __init__(self, datasource: extr.PymiaDatasource) -> None:
'A wrapper class for :class:`.PymiaDatasource` to fit the\n `torch.utils.data.Dataset <https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset>`_ interface.\n\n Args:\n datasource (.PymiaDatasource): The pymia datasource instance.\n '
super().__init__()
self.datasource = datasource
def __len__(self) -> int:
return len(self.datasource)
def __getitem__(self, index: int):
return self.datasource[index]
|
class SubsetSequentialSampler(smplr.Sampler):
def __init__(self, indices):
'Samples elements sequential from a given list of indices, without replacement.\n\n The class adopts the `torch.utils.data.Sampler\n <https://pytorch.org/docs/1.3.0/data.html#torch.utils.data.Sampler>`_ interface.\n\n Args:\n indices list: list of indices that define the subset to be used for the sampling.\n '
super().__init__(None)
self.indices = indices
def __iter__(self):
return (idx for idx in self.indices)
def __len__(self):
return len(self.indices)
|
def get_tf_generator(data_source: extr.PymiaDatasource):
'Returns a generator that wraps :class:`.PymiaDatasource` for the TensorFlow data handling.\n\n The returned generator can be used with `tf.data.Dataset.from_generator\n <https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_generator>`_ in order to build a TensorFlow dataset.\n\n Args:\n data_source (.PymiaDatasource): the datasource to be wrapped.\n\n Returns:\n generator: Function that loops over the entire datasource and yields all entries.\n '
def generator():
for i in range(len(data_source)):
(yield data_source[i])
return generator
|
class ImageProperties():
def __init__(self, image: sitk.Image):
'Represents ITK image properties.\n\n Holds common ITK image meta-data such as the size, origin, spacing, and direction.\n\n See Also:\n SimpleITK provides `itk::simple::Image::CopyInformation`_ to copy image information.\n\n .. _itk::simple::Image::CopyInformation:\n https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1Image.html#afa8a4757400c414e809d1767ee616bd0\n\n Args:\n image (sitk.Image): The image whose properties to hold.\n '
self.size = image.GetSize()
self.origin = image.GetOrigin()
self.spacing = image.GetSpacing()
self.direction = image.GetDirection()
self.dimensions = image.GetDimension()
self.number_of_components_per_pixel = image.GetNumberOfComponentsPerPixel()
self.pixel_id = image.GetPixelID()
def is_two_dimensional(self) -> bool:
'Determines whether the image is two-dimensional.\n\n Returns:\n bool: True if the image is two-dimensional; otherwise, False.\n '
return (self.dimensions == 2)
def is_three_dimensional(self) -> bool:
'Determines whether the image is three-dimensional.\n\n Returns:\n bool: True if the image is three-dimensional; otherwise, False.\n '
return (self.dimensions == 3)
def is_vector_image(self) -> bool:
'Determines whether the image is a vector image.\n\n Returns:\n bool: True for vector images; False for scalar images.\n '
return (self.number_of_components_per_pixel > 1)
def __str__(self):
'Gets a printable string representation.\n\n Returns:\n str: String representation.\n '
return 'ImageProperties:\n size: {self.size}\n origin: {self.origin}\n spacing: {self.spacing}\n direction: {self.direction}\n dimensions: {self.dimensions}\n number_of_components_per_pixel: {self.number_of_components_per_pixel}\n pixel_id: {self.pixel_id}\n'.format(self=self)
def __eq__(self, other):
'Determines the equality of two ImageProperties classes.\n\n Notes\n The equality does not include the number_of_components_per_pixel and pixel_id.\n\n Args:\n other (object): An ImageProperties instance or any other object.\n\n Returns:\n bool: True if the ImageProperties are equal; otherwise, False.\n '
if isinstance(other, self.__class__):
return ((self.size == other.size) and (self.origin == other.origin) and (self.spacing == other.spacing) and (self.direction == other.direction) and (self.dimensions == other.dimensions))
return NotImplemented
def __ne__(self, other):
'Determines the non-equality of two ImageProperties classes.\n\n Notes\n The non-equality does not include the number_of_components_per_pixel and pixel_id.\n\n Args:\n other (object): An ImageProperties instance or any other object.\n\n Returns:\n bool: True if the ImageProperties are non-equal; otherwise, False.\n '
if isinstance(other, self.__class__):
return (not self.__eq__(other))
return NotImplemented
def __hash__(self):
'Gets the hash.\n\n Returns:\n int: The hash of the object.\n '
return hash(tuple(sorted(self.__dict__.items())))
|
class NumpySimpleITKImageBridge():
'A numpy to SimpleITK bridge, which provides static methods to convert between numpy array and SimpleITK image.'
@staticmethod
def convert(array: np.ndarray, properties: ImageProperties) -> sitk.Image:
'Converts a numpy array to a SimpleITK image.\n\n Args:\n array (np.ndarray): The image as numpy array. The shape can be either:\n\n - shape=(n,), where n = total number of voxels\n - shape=(n,v), where n = total number of voxels and v = number of components per pixel (vector image)\n - shape=(<reversed image size>), what you get from sitk.GetArrayFromImage()\n - shape=(<reversed image size>,v), what you get from sitk.GetArrayFromImage()\n and v = number of components per pixel (vector image)\n\n properties (ImageProperties): The image properties.\n\n Returns:\n sitk.Image: The SimpleITK image.\n '
is_vector = False
if (not (array.shape == properties.size[::(- 1)])):
if (array.ndim == 1):
array = array.reshape(properties.size[::(- 1)])
elif (array.ndim == 2):
is_vector = True
array = array.reshape((properties.size[::(- 1)] + (array.shape[1],)))
elif (array.ndim == (len(properties.size) + 1)):
is_vector = True
else:
raise ValueError('array shape {} not supported'.format(array.shape))
image = sitk.GetImageFromArray(array, is_vector)
image.SetOrigin(properties.origin)
image.SetSpacing(properties.spacing)
image.SetDirection(properties.direction)
return image
|
class SimpleITKNumpyImageBridge():
'A SimpleITK to numpy bridge.\n\n Converts SimpleITK images to numpy arrays. Use the ``NumpySimpleITKImageBridge`` to convert back.\n '
@staticmethod
def convert(image: sitk.Image) -> typing.Tuple[(np.ndarray, ImageProperties)]:
'Converts an image to a numpy array and an ImageProperties class.\n\n Args:\n image (SimpleITK.Image): The image.\n\n Returns:\n A Tuple[np.ndarray, ImageProperties]: The image as numpy array and the image properties.\n\n Raises:\n ValueError: If `image` is `None`.\n '
if (image is None):
raise ValueError('Parameter image can not be None')
return (sitk.GetArrayFromImage(image), ImageProperties(image))
|
class Callback():
'Base class for the interaction with the dataset creation.\n\n Implementations of the :class:`.Callback` class can be provided to :meth:`.Traverser.traverse` in order to\n write/process specific information of the original data.\n '
def on_start(self, params: dict):
'Called at the beginning of :meth:`.Traverser.traverse`.\n\n Args:\n params (dict): Parameters provided by the :class:`.Traverser`. The provided parameters will differ from\n :meth:`.Callback.on_subject`.\n '
pass
def on_end(self, params: dict):
'Called at the end of :meth:`.Traverser.traverse`.\n\n Args:\n params (dict): Parameters provided by the :class:`.Traverser`. The provided parameters will differ from\n :meth:`.Callback.on_subject`.\n '
pass
def on_subject(self, params: dict):
'Called for each subject of :meth:`.Traverser.traverse`.\n\n Args:\n params (dict): Parameters provided by the :class:`.Traverser` containing subject specific information\n and data.\n '
pass
|
class ComposeCallback(Callback):
def __init__(self, callbacks: typing.List[Callback]) -> None:
'Composes many :class:`.Callback` instances and behaves like an single :class:`.Callback` instance.\n\n This class allows passing multiple :class:`.Callback` to :meth:`.Traverser.traverse`.\n\n Args:\n callbacks (list): A list of :class:`.Callback` instances.\n '
self.callbacks = callbacks
def on_start(self, params: dict):
for c in self.callbacks:
c.on_start(params)
def on_end(self, params: dict):
for c in self.callbacks:
c.on_end(params)
def on_subject(self, params: dict):
for c in self.callbacks:
c.on_subject(params)
|
class MonitoringCallback(Callback):
'Callback that monitors the dataset creation process by logging the progress to the console.'
def on_start(self, params: dict):
print('start dataset creation')
def on_subject(self, params: dict):
index = params[defs.KEY_SUBJECT_INDEX]
subject_files = params[defs.KEY_SUBJECT_FILES]
print('[{}/{}] {}'.format((index + 1), len(subject_files), subject_files[index].subject))
def on_end(self, params: dict):
print('dataset creation finished')
|
class WriteDataCallback(Callback):
def __init__(self, writer: wr.Writer) -> None:
'Callback that writes the raw data to the dataset.\n\n Args:\n writer (.creation.writer.Writer): The writer used to write the data.\n '
self.writer = writer
def on_subject(self, params: dict):
subject_files = params[defs.KEY_SUBJECT_FILES]
subject_index = params[defs.KEY_SUBJECT_INDEX]
index_str = defs.subject_index_to_str(subject_index, len(subject_files))
for category in params[defs.KEY_CATEGORIES]:
data = params[category]
self.writer.write('{}/{}'.format(defs.LOC_DATA_PLACEHOLDER.format(category), index_str), data, dtype=data.dtype)
|
class WriteEssentialCallback(Callback):
def __init__(self, writer: wr.Writer) -> None:
'Callback that writes the essential information to the dataset.\n\n Args:\n writer (.creation.writer.Writer): The writer used to write the data.\n '
self.writer = writer
self.reserved_for_shape = False
def on_start(self, params: dict):
subject_count = len(params[defs.KEY_SUBJECT_FILES])
self.writer.reserve(defs.LOC_SUBJECT, (subject_count,), str)
self.reserved_for_shape = False
def on_subject(self, params: dict):
subject_files = params[defs.KEY_SUBJECT_FILES]
subject_index = params[defs.KEY_SUBJECT_INDEX]
subject = subject_files[subject_index].subject
self.writer.fill(defs.LOC_SUBJECT, subject, expr.IndexExpression(subject_index))
if (not self.reserved_for_shape):
for category in params[defs.KEY_CATEGORIES]:
self.writer.reserve(defs.LOC_SHAPE_PLACEHOLDER.format(category), (len(subject_files), params[category].ndim), dtype=np.uint16)
self.reserved_for_shape = True
for category in params[defs.KEY_CATEGORIES]:
shape = params[category].shape
self.writer.fill(defs.LOC_SHAPE_PLACEHOLDER.format(category), shape, expr.IndexExpression(subject_index))
|
class WriteImageInformationCallback(Callback):
def __init__(self, writer: wr.Writer, category=defs.KEY_IMAGES) -> None:
'Callback that writes the image information (shape, origin, direction, spacing) to the dataset.\n\n Args:\n writer (.creation.writer.Writer): The writer used to write the data.\n category (str): The category from which to extract the information from.\n '
self.writer = writer
self.category = category
self.new_subject = False
def on_start(self, params: dict):
subject_count = len(params[defs.KEY_SUBJECT_FILES])
self.writer.reserve(defs.LOC_IMGPROP_SHAPE, (subject_count, 3), dtype=np.uint16)
self.writer.reserve(defs.LOC_IMGPROP_ORIGIN, (subject_count, 3), dtype=np.float)
self.writer.reserve(defs.LOC_IMGPROP_DIRECTION, (subject_count, 9), dtype=np.float)
self.writer.reserve(defs.LOC_IMGPROP_SPACING, (subject_count, 3), dtype=np.float)
def on_subject(self, params: dict):
subject_index = params[defs.KEY_SUBJECT_INDEX]
properties = params[defs.KEY_PLACEHOLDER_PROPERTIES.format(self.category)]
self.writer.fill(defs.LOC_IMGPROP_SHAPE, properties.size, expr.IndexExpression(subject_index))
self.writer.fill(defs.LOC_IMGPROP_ORIGIN, properties.origin, expr.IndexExpression(subject_index))
self.writer.fill(defs.LOC_IMGPROP_DIRECTION, properties.direction, expr.IndexExpression(subject_index))
self.writer.fill(defs.LOC_IMGPROP_SPACING, properties.spacing, expr.IndexExpression(subject_index))
|
class WriteNamesCallback(Callback):
def __init__(self, writer: wr.Writer) -> None:
'Callback that writes the names of the category entries to the dataset.\n\n Args:\n writer (.creation.writer.Writer): The writer used to write the data.\n '
self.writer = writer
def on_start(self, params: dict):
for category in params[defs.KEY_CATEGORIES]:
self.writer.write(defs.LOC_NAMES_PLACEHOLDER.format(category), params[defs.KEY_PLACEHOLDER_NAMES.format(category)], dtype='str')
|
class WriteFilesCallback(Callback):
def __init__(self, writer: wr.Writer) -> None:
'Callback that writes the file names to the dataset.\n\n Args:\n writer (.creation.writer.Writer): The writer used to write the data.\n '
self.writer = writer
self.file_root = None
@staticmethod
def _get_common_path(subject_files):
def get_subject_common(subject_file: subj.SubjectFile):
return os.path.commonpath(list(subject_file.get_all_files().values()))
return os.path.commonpath([get_subject_common(sf) for sf in subject_files])
def on_start(self, params: dict):
subject_files = params[defs.KEY_SUBJECT_FILES]
self.file_root = self._get_common_path(subject_files)
if os.path.isfile(self.file_root):
self.file_root = os.path.dirname(self.file_root)
self.writer.write(defs.LOC_FILES_ROOT, self.file_root, dtype='str')
for category in params[defs.KEY_CATEGORIES]:
self.writer.reserve(defs.LOC_FILES_PLACEHOLDER.format(category), (len(subject_files), len(params[defs.KEY_PLACEHOLDER_NAMES.format(category)])), dtype='str')
def on_subject(self, params: dict):
subject_index = params[defs.KEY_SUBJECT_INDEX]
subject_files = params[defs.KEY_SUBJECT_FILES]
subject_file = subject_files[subject_index]
for category in params[defs.KEY_CATEGORIES]:
for (index, file_name) in enumerate(subject_file.categories[category].entries.values()):
relative_path = os.path.relpath(file_name, self.file_root)
index_expr = expr.IndexExpression(indexing=[subject_index, index], axis=(0, 1))
self.writer.fill(defs.LOC_FILES_PLACEHOLDER.format(category), relative_path, index_expr)
|
def get_default_callbacks(writer: wr.Writer, meta_only=False) -> ComposeCallback:
'Provides a selection of commonly used callbacks to write the most important information to the dataset.\n\n Args:\n writer (.creation.writer.Writer): The writer used to write the data.\n meta_only (bool): Whether only callbacks for a metadata dataset creation should be returned.\n\n Returns:\n Callback: The composed selection of common callbacks.\n '
callbacks = [MonitoringCallback(), WriteDataCallback(writer), WriteFilesCallback(writer), WriteImageInformationCallback(writer), WriteEssentialCallback(writer)]
if (not meta_only):
callbacks.append(WriteNamesCallback(writer))
return ComposeCallback(callbacks)
|
class Load(abc.ABC):
'Interface for loading the data during the dataset creation in :meth:`.Traverser.traverse`\n \n .. automethod:: __call__\n '
@abc.abstractmethod
def __call__(self, file_name: str, id_: str, category: str, subject_id: str) -> typing.Tuple[(np.ndarray, typing.Union[(conv.ImageProperties, None)])]:
'Loads the data from the file system according to the implementation.\n\n Args:\n file_name (str): Path to the corresponding data.\n id_ (str): Identifier for the entry of the category, e.g., "Flair".\n category (str): Name of the category, e.g., \'images\'.\n subject_id (str): Identifier of the current subject.\n\n Returns:\n tuple: A numpy array containing the loaded data and :class:`ImageProperties` describing the data.\n :class:`.ImageProperties` is :code:`None` if the loaded data does not contain further properties.\n '
pass
|
class LoadDefault(Load):
'The default loader.\n\n It loads every data item (id/entry, category) for each subject as :code:`sitk.Image`\n and the corresponding :class:`.ImageProperties`.\n '
def __call__(self, file_name: str, id_: str, category: str, subject_id: str) -> typing.Tuple[(np.ndarray, typing.Union[(conv.ImageProperties, None)])]:
img = sitk.ReadImage(file_name)
return (sitk.GetArrayFromImage(img), conv.ImageProperties(img))
|
def default_concat(data: typing.List[np.ndarray]) -> np.ndarray:
'Default concatenation function used to combine all entries from a category (e.g. T1, T2 data from "images" category)\n in :meth:`.Traverser.traverse`\n\n Args:\n data (list): List of numpy.ndarray entries to be concatenated.\n\n Returns:\n numpy.ndarray: Concatenated entry.\n '
return np.stack(data, axis=(- 1))
|
class Traverser():
def __init__(self, categories: typing.Union[(str, typing.Tuple[(str, ...)])]=None):
'Class managing the dataset creation process.\n\n Args:\n categories (str or tuple of str): The categories to traverse. If None, then all categories of a\n :class:`.SubjectFile` will be traversed.\n '
if isinstance(categories, str):
categories = (categories,)
self.categories = categories
def traverse(self, subject_files: typing.List[subj.SubjectFile], load=load.LoadDefault(), callback: cb.Callback=None, transform: tfm.Transform=None, concat_fn=default_concat):
'Controls the actual dataset creation. It goes through the file list, loads the files,\n applies transformation to the data, and calls the callbacks to do the storing (or other stuff).\n\n Args:\n subject_files (list): list of :class:`SubjectFile` to be processes.\n load (callable): A load function or :class:`.Load` instance that performs the data loading\n callback (.Callback): A callback or composed (:class:`.ComposeCallback`) callback performing the storage of the\n loaded data (and other things such as logging).\n transform (.Transform): Transformation to be applied to the data after loading\n and before :meth:`Callback.on_subject` is called\n concat_fn (callable): Function that concatenates all the entries of a category\n (e.g. T1, T2 data from "images" category). Default is :func:`default_concat`.\n '
if (len(subject_files) == 0):
raise ValueError('No files')
if (not isinstance(subject_files[0], subj.SubjectFile)):
raise ValueError('files must be of type {}'.format(subj.SubjectFile.__class__.__name__))
if (callback is None):
raise ValueError('callback can not be None')
if (self.categories is None):
self.categories = subject_files[0].categories
callback_params = {defs.KEY_SUBJECT_FILES: subject_files}
for category in self.categories:
callback_params.setdefault(defs.KEY_CATEGORIES, []).append(category)
callback_params[defs.KEY_PLACEHOLDER_NAMES.format(category)] = self._get_names(subject_files, category)
callback.on_start(callback_params)
for (subject_index, subject_file) in enumerate(subject_files):
transform_params = {defs.KEY_SUBJECT_INDEX: subject_index}
for category in self.categories:
category_list = []
category_property = None
for (id_, file_path) in subject_file.categories[category].entries.items():
(np_data, data_property) = load(file_path, id_, category, subject_file.subject)
category_list.append(np_data)
if (category_property is None):
category_property = data_property
category_data = concat_fn(category_list)
transform_params[category] = category_data
transform_params[defs.KEY_PLACEHOLDER_PROPERTIES.format(category)] = category_property
if transform:
transform_params = transform(transform_params)
callback.on_subject({**transform_params, **callback_params})
callback.on_end(callback_params)
@staticmethod
def _get_names(subject_files: typing.List[subj.SubjectFile], category: str) -> list:
names = subject_files[0].categories[category].entries.keys()
if (not all(((s.categories[category].entries.keys() == names) for s in subject_files))):
raise ValueError('Inconsistent {} identifiers in the subject list'.format(category))
return list(names)
|
class Writer(abc.ABC):
'Represents the abstract dataset writer defining an interface for the writing process.'
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __del__(self):
self.close()
@abc.abstractmethod
def close(self):
'Close the writer.'
pass
@abc.abstractmethod
def open(self):
'Open the writer.'
pass
@abc.abstractmethod
def reserve(self, entry: str, shape: tuple, dtype=None):
'Reserve space in the dataset for later writing.\n\n Args:\n entry(str): The dataset entry to be created.\n shape(tuple): The shape to be reserved.\n dtype: The dtype.\n '
pass
@abc.abstractmethod
def fill(self, entry: str, data, index: expr.IndexExpression=None):
'Fill parts of a reserved dataset entry.\n\n Args:\n entry(str): The dataset entry to be filled.\n data(object): The data to write.\n index(.IndexExpression): The slicing expression.\n '
pass
@abc.abstractmethod
def write(self, entry: str, data, dtype=None):
'Create and write entry.\n\n Args:\n entry(str): The dataset entry to be written.\n data(object): The data to write.\n dtype: The dtype.\n '
pass
|
class Hdf5Writer(Writer):
str_type = h5py.special_dtype(vlen=str)
def __init__(self, file_path: str) -> None:
'Writer class for HDF5 file type.\n\n Args:\n file_path(str): The path to the dataset file to write.\n '
self.h5 = None
self.file_path = file_path
def close(self):
'see :meth:`.Writer.close`'
if (self.h5 is not None):
self.h5.close()
self.h5 = None
def open(self):
'see :meth:`.Writer.open`'
self.h5 = h5py.File(self.file_path, mode='a', libver='latest')
def reserve(self, entry: str, shape: tuple, dtype=None):
'see :meth:`.Writer.reserve`'
if ((dtype is str) or (dtype == 'str') or (isinstance(dtype, np.dtype) and (dtype.type == np.str_))):
dtype = self.str_type
self.h5.create_dataset(entry, shape, dtype=dtype)
def fill(self, entry: str, data, index: expr.IndexExpression=None):
'see :meth:`.Writer.fill`'
if (self.h5[entry].dtype is self.str_type):
data = np.asarray(data, dtype=object)
if (index is None):
index = expr.IndexExpression()
self.h5[entry][index.expression] = data
def write(self, entry: str, data, dtype=None):
'see :meth:`.Writer.write`'
if ((dtype is str) or (dtype == 'str') or (isinstance(dtype, np.dtype) and (dtype.type == np.str_))):
dtype = self.str_type
data = np.asarray(data, dtype=object)
if (entry in self.h5):
del self.h5[entry]
self.h5.create_dataset(entry, dtype=dtype, data=data)
|
def get_writer(file_path: str) -> Writer:
'Get the dataset writer corresponding to the file extension.\n\n Args:\n file_path(str): The path of the dataset file to be written.\n\n Returns:\n .creation.writer.Writer: Writer corresponding to dataset file extension.\n '
extension = os.path.splitext(file_path)[1]
if (extension not in writer_registry):
raise ValueError('unknown dataset file extension "{}"'.format(extension))
return writer_registry[extension](file_path)
|
def subject_index_to_str(subject_index, nb_subjects):
max_digits = len(str(nb_subjects))
index_str = '{{:0{}}}'.format(max_digits).format(subject_index)
return index_str
|
def convert_to_string(data):
'Converts extracted string data from bytes to string, as strings are handled as bytes since h5py >= 3.0.\n\n The function has been introduced as part of an `issue <https://github.com/rundherum/pymia/issues/40>`_.\n\n Args:\n data: The data to be converted; either :obj:`bytes` or list of :obj:`bytes`.\n\n Returns:\n The converted data as :obj:`str` or list of :obj:`str`.\n '
if isinstance(data, bytes):
return data.decode('utf-8')
elif isinstance(data, list):
return [convert_to_string(d) for d in data]
else:
return data
|
class PymiaDatasource():
def __init__(self, dataset_path: str, indexing_strategy: idx.IndexingStrategy=None, extractor: extr.Extractor=None, transform: tfm.Transform=None, subject_subset: list=None, init_reader_once: bool=True) -> None:
'Provides convenient and adaptable reading of the data from a created dataset.\n\n Args:\n dataset_path (str): The path to the dataset to be read from.\n indexing_strategy(.IndexingStrategy): Strategy defining how the data is indexed for reading.\n extractor (.Extractor): Extractor or multiple extractors (:class:`.ComposeExtractor`) extracting the desired\n data from the dataset.\n transform (.Transform): Transformation(s) to be applied to the extracted data.\n subject_subset (list): A list of subject identifiers defining a subset of subject to be processed.\n init_reader_once (bool): Whether the reader is initialized once or for every retrieval (default: :code:`True`)\n\n Examples:\n The class mainly allows to modes of operation. The first mode is by extracting the data by index.\n\n >>> ds = PymiaDatasource(...)\n >>> for i in range(len(ds)):\n >>> sample = ds[i]\n\n The second mode of operation is by directly extracting data.\n\n >>> ds = PymiaDatasource(...)\n >>> # Different from ds[index] since the extractor and transform override the ones in ds\n >>> sample = ds.direct_extract(extractor, index, transform=transform)\n\n Typically, the first mode is use to loop over the entire dataset as fast as possible, extracting just the necessary\n information, such as data chunks (e.g., slice, patch, sub-volume). Less critical information\n (e.g. image shape, orientation) not required with every chunk of data can independently be extracted with the second\n mode of operation.\n '
self.dataset_path = dataset_path
self.indexing_strategy = None
self.extractor = extractor
self.transform = transform
self.subject_subset = subject_subset
self.init_reader_once = init_reader_once
self.indices = []
'list: A list containing all sample indices. This is a mapping from item `i` to tuple \n `(subject_index, index_expression)`.'
self.reader = None
if (indexing_strategy is None):
indexing_strategy = idx.EmptyIndexing()
self.set_indexing_strategy(indexing_strategy, subject_subset)
def close_reader(self):
'Close the reader.'
if (self.reader is not None):
self.reader.close()
self.reader = None
def set_extractor(self, extractor: extr.Extractor):
'Set the extractor(s).\n\n Args:\n extractor (.Extractor): Extractor or multiple extractors (:class:`.ComposeExtractor`) extracting the desired\n data from the dataset.\n '
self.extractor = extractor
def set_indexing_strategy(self, indexing_strategy: idx.IndexingStrategy, subject_subset: list=None):
'Set (or modify) the indexing strategy.\n\n Args:\n indexing_strategy (.IndexingStrategy): Strategy defining how the data is indexed for reading.\n subject_subset (list): A list of subject identifiers defining a subset of subject to be processed.\n '
self.indices.clear()
self.indexing_strategy = indexing_strategy
with rd.get_reader(self.dataset_path) as reader:
all_subjects = reader.get_subjects()
last_shape = None
for subject_idx in range(len(all_subjects)):
if ((subject_subset is None) or (all_subjects[subject_idx] in subject_subset)):
current_shape = reader.get_shape(subject_idx)
if (not (last_shape == current_shape)):
subject_indices = self.indexing_strategy(current_shape)
last_shape = current_shape
subject_and_indices = zip((len(subject_indices) * [subject_idx]), subject_indices)
self.indices.extend(subject_and_indices)
def set_transform(self, transform: tfm.Transform):
'Set the transform.\n\n Args:\n transform (.Transform): Transformation(s) to be applied to the extracted data.\n '
self.transform = transform
def get_subjects(self):
'"Get all the subjects in the dataset.\n\n Returns:\n list: All subject identifiers in the dataset.\n '
with rd.get_reader(self.dataset_path) as reader:
return reader.get_subjects()
def direct_extract(self, extractor: extr.Extractor, subject_index: int, index_expr: expr.IndexExpression=None, transform: tfm.Transform=None):
'Extract data directly, bypassing the extractors and transforms of the instance.\n\n The purpose of this method is to enable extraction of data that is not required for every data chunk\n (e.g., slice, patch, sub-volume) but only from time to time e.g., image shape, origin.\n\n Args:\n extractor (.Extractor): Extractor or multiple extractors (:class:`.ComposeExtractor`) extracting the desired\n data from the dataset.\n subject_index (int): Index of the subject to be extracted.\n index_expr (.IndexExpression): The indexing to extract a chunk of data only.\n Not required if only image related information (e.g., image shape, origin) should be extracted.\n Needed when desiring a chunk of data (e.g., slice, patch, sub-volume).\n transform (.Transform): Transformation(s) to be applied to the extracted data.\n\n Returns:\n dict: Extracted data in a dictionary. Keys are defined by the used :class:`.Extractor`.\n '
if (index_expr is None):
index_expr = expr.IndexExpression()
params = {defs.KEY_SUBJECT_INDEX: subject_index, defs.KEY_INDEX_EXPR: index_expr}
extracted = {}
if (not self.init_reader_once):
with rd.get_reader(self.dataset_path) as reader:
extractor.extract(reader, params, extracted)
else:
if (self.reader is None):
self.reader = rd.get_reader(self.dataset_path, direct_open=True)
extractor.extract(self.reader, params, extracted)
if transform:
extracted = transform(extracted)
return extracted
def __len__(self):
return len(self.indices)
def __getitem__(self, item):
(subject_index, index_expr) = self.indices[item]
extracted = self.direct_extract(self.extractor, subject_index, index_expr, self.transform)
extracted[defs.KEY_SAMPLE_INDEX] = item
return extracted
def __del__(self):
self.close_reader()
|
class Reader(abc.ABC):
def __init__(self, file_path: str) -> None:
'Abstract dataset reader.\n\n Args:\n file_path(str): The path to the dataset file.\n '
super().__init__()
self.file_path = file_path
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __del__(self):
self.close()
@abc.abstractmethod
def get_subject_entries(self) -> list:
"Get the dataset entries holding the subject's data.\n\n Returns:\n list: The list of subject entry strings.\n "
pass
@abc.abstractmethod
def get_shape(self, subject_index: int) -> list:
'Get the shape from an entry.\n\n Args:\n subject_index(int): The index of the subject.\n\n Returns:\n list: The shape of each dimension.\n '
pass
@abc.abstractmethod
def get_subjects(self) -> list:
'Get the subject names in the dataset.\n\n Returns:\n list: The list of subject names.\n '
pass
@abc.abstractmethod
def read(self, entry: str, index: expr.IndexExpression=None):
'Read a dataset entry.\n\n Args:\n entry(str): The dataset entry.\n index(expr.IndexExpression): The slicing expression.\n\n Returns:\n The read data.\n '
pass
@abc.abstractmethod
def has(self, entry: str) -> bool:
'Check whether a dataset entry exists.\n\n Args:\n entry(str): The dataset entry.\n\n Returns:\n bool: Whether the entry exists.\n '
pass
@abc.abstractmethod
def open(self):
'Open the reader.'
pass
@abc.abstractmethod
def close(self):
'Close the reader.'
pass
|
class Hdf5Reader(Reader):
'Represents the dataset reader for HDF5 files.'
def __init__(self, file_path: str, category=defs.KEY_IMAGES) -> None:
'Initializes a new instance.\n\n Args:\n file_path(str): The path to the dataset file.\n category(str): The category of an entry that defines the shape request\n '
super().__init__(file_path)
self.h5 = None
self.category = category
def get_subject_entries(self) -> list:
'see :meth:`.Reader.get_subject_entries`'
nb_subjects = len(self.get_subjects())
return [defs.subject_index_to_str(i, nb_subjects) for i in range(nb_subjects)]
def get_shape(self, subject_index: int) -> list:
'see :meth:`.Reader.get_shape`'
return self.read(defs.LOC_SHAPE_PLACEHOLDER.format(self.category), expr.IndexExpression(subject_index)).tolist()
def get_subjects(self) -> list:
'see :meth:`.Reader.get_subjects`'
return byte_converter.convert_to_string(self.read(defs.LOC_SUBJECT))
def read(self, entry: str, index: expr.IndexExpression=None):
'see :meth:`.Reader.read`'
if (index is None):
data = self.h5[entry][()]
else:
data = self.h5[entry][index.expression]
if (isinstance(data, np.ndarray) and (data.dtype == np.object)):
return data.tolist()
return data
def has(self, entry: str) -> bool:
'see :meth:`.Reader.has`'
return (entry in self.h5)
def open(self):
'see :meth:`.Reader.open`'
self.h5 = h5py.File(self.file_path, mode='r', libver='latest')
def close(self):
'see :meth:`.Reader.close`'
if (self.h5 is not None):
self.h5.close()
self.h5 = None
|
def get_reader(file_path: str, direct_open: bool=False) -> Reader:
'Get the dataset reader corresponding to the file extension.\n\n Args:\n file_path(str): The path to the dataset file.\n direct_open(bool): Whether the file should directly be opened.\n\n Returns:\n Reader: Reader corresponding to dataset file extension.\n '
extension = os.path.splitext(file_path)[1]
if (extension not in reader_registry):
raise ValueError('unknown dataset file extension "{}"'.format(extension))
reader = reader_registry[extension](file_path)
if direct_open:
reader.open()
return reader
|
class SelectionStrategy(abc.ABC):
'Interface for selecting indices according some rule.\n\n .. automethod:: __call__\n .. automethod:: __repr__\n '
@abc.abstractmethod
def __call__(self, sample: dict) -> bool:
'\n\n Args:\n sample (dict): An extracted from :class:`.PymiaDatasource`.\n\n Returns:\n bool: Whether or not the sample should be considered.\n\n '
pass
def __repr__(self) -> str:
'\n Returns:\n str: Representation of the strategy. Should include attributes such that it uniquely defines the strategy.\n '
return self.__class__.__name__
|
class NonConstantSelection(SelectionStrategy):
def __init__(self, loop_axis=None) -> None:
super().__init__()
self.loop_axis = loop_axis
def __call__(self, sample) -> bool:
image_data = sample[defs.KEY_IMAGES]
if (self.loop_axis is None):
return (not self._all_equal(image_data))
slicing = [slice(None) for _ in range(image_data.ndim)]
for i in range(image_data.shape[self.loop_axis]):
slicing[self.loop_axis] = i
slice_data = image_data[slicing]
if (not self._all_equal(slice_data)):
return True
return False
@staticmethod
def _all_equal(image_data):
return np.all((image_data == image_data.ravel()[0]))
def __repr__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.loop_axis)
|
class NonBlackSelection(SelectionStrategy):
def __init__(self, black_value: float=0.0) -> None:
self.black_value = black_value
def __call__(self, sample) -> bool:
return (sample[defs.KEY_IMAGES] > self.black_value).any()
def __repr__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.black_value)
|
class PercentileSelection(SelectionStrategy):
def __init__(self, percentile: float) -> None:
self.percentile = percentile
def __call__(self, sample) -> bool:
image_data = sample[defs.KEY_IMAGES]
percentile_value = np.percentile(image_data, self.percentile)
return (image_data >= percentile_value).all()
def __repr__(self) -> str:
return '{} ({})'.format(self.__class__.__name__, self.percentile)
|
class WithForegroundSelection(SelectionStrategy):
def __call__(self, sample) -> bool:
return sample[defs.KEY_LABELS].any()
|
class SubjectSelection(SelectionStrategy):
'Select subjects by their name or index.'
def __init__(self, subjects) -> None:
if isinstance(subjects, int):
subjects = (subjects,)
if isinstance(subjects, str):
subjects = (subjects,)
self.subjects = subjects
def __call__(self, sample) -> bool:
return ((sample[defs.KEY_SUBJECT] in self.subjects) or (sample[defs.KEY_SUBJECT_INDEX] in self.subjects))
def __repr__(self) -> str:
return '{} ({})'.format(self.__class__.__name__, ','.join(self.subjects))
|
class ComposeSelection(SelectionStrategy):
def __init__(self, strategies) -> None:
self.strategies = strategies
def __call__(self, sample) -> bool:
return all((strategy(sample) for strategy in self.strategies))
def __repr__(self) -> str:
return '|'.join((repr(s) for s in self.strategies))
|
def select_indices(data_source: ds.PymiaDatasource, selection_strategy: SelectionStrategy):
selected_indices = []
for (i, sample) in enumerate(data_source):
if selection_strategy(sample):
selected_indices.append(i)
return selected_indices
|
class IndexExpression():
def __init__(self, indexing: t.Union[(int, tuple, t.List[int], t.List[tuple], t.List[list])]=None, axis: t.Union[(int, tuple)]=None) -> None:
'Defines the indexing of a chunk of raw data in the dataset.\n\n Args:\n indexing (int, tuple, list): The indexing. If :obj:`int` or list of :obj:`int`, individual entries of and axis\n are indexed. If :obj:`tuple` or list of :obj:`tuple`, the axis should be sliced.\n axis (int, tuple): The axis/axes to the corresponding indexing. If :obj:`tuple`, the length has to be equal to the\n list length of :obj:`indexing`\n '
self.expression = None
'list of :obj:`slice` objects defining the slicing each axis'
self.set_indexing(indexing, axis)
def set_indexing(self, indexing: t.Union[(int, tuple, slice, t.List[int], t.List[tuple], t.List[list])], axis: t.Union[(int, tuple)]=None):
if (indexing is None):
self.expression = slice(None)
return
if (isinstance(indexing, int) or isinstance(indexing, tuple)):
indexing = [indexing]
if (axis is None):
axis = tuple(range(len(indexing)))
if isinstance(axis, int):
axis = (axis,)
expr = [slice(None) for _ in range((max(axis) + 1))]
for (a, index) in zip(axis, indexing):
if isinstance(index, int):
expr[a] = index
elif isinstance(index, (tuple, list)):
(start, stop) = index
expr[a] = slice(start, stop)
elif isinstance(index, slice):
expr[a] = index
else:
raise ValueError('Unknown type "{}" of index'.format(type(index)))
self.expression = tuple(expr)
def get_indexing(self):
'\n Returns:\n list: a list tuples defining the indexing (i.e., None, index, (start, stop)) at each axis. Can be used to generate\n a new index expression.\n '
indexing = []
for index in self.expression:
if (index is None):
indexing.append(None)
elif isinstance(index, slice):
indexing.append((index.start, index.stop))
elif isinstance(index, int):
indexing.append(index)
else:
raise ValueError("only 'int', 'slice', and 'None' types possible in expression")
return indexing
|
class FileCategory():
def __init__(self, entries=None) -> None:
if (entries is None):
entries = {}
self.entries = entries
|
class SubjectFile():
def __init__(self, subject: str, **file_groups) -> None:
'Holds the file information of a subject.\n\n Args:\n subject (str): The subject identifier.\n **file_groups (dict): The groups of file types containing the file path entries.\n '
self.subject = subject
self.categories = {}
for (category_name, category_files) in file_groups.items():
self.categories[category_name] = FileCategory(category_files)
self._check_validity()
def _check_validity(self):
all_file_ids = []
for file_category in self.categories.values():
all_file_ids.extend(file_category.entries.keys())
if (len(all_file_ids) > len(set(all_file_ids))):
raise ValueError('Identifiers must be unique')
def get_all_files(self):
'\n Returns:\n dict: All file path entries of a subject `flattened` (without groups/category).\n\n '
all_files = {}
for file_category in self.categories.values():
for (id_, filename) in file_category.entries.items():
all_files[id_] = filename
return all_files
|
class Transform(abc.ABC):
@abc.abstractmethod
def __call__(self, sample: dict) -> dict:
pass
|
class ComposeTransform(Transform):
def __init__(self, transforms: typing.Iterable[Transform]) -> None:
self.transforms = transforms
def __call__(self, sample: dict) -> dict:
for t in self.transforms:
sample = t(sample)
return sample
|
class LoopEntryTransform(Transform, abc.ABC):
def __init__(self, loop_axis=None, entries=()) -> None:
super().__init__()
self.loop_axis = loop_axis
self.entries = entries
@staticmethod
def loop_entries(sample: dict, fn, entries, loop_axis=None):
for entry in entries:
if (entry not in sample):
if raise_error_if_entry_not_extracted:
raise ValueError(ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
continue
np_entry = check_and_return(sample[entry], np.ndarray)
if (loop_axis is None):
np_entry = fn(np_entry, entry)
else:
slicing = [slice(None) for _ in range(np_entry.ndim)]
for i in range(np_entry.shape[loop_axis]):
slicing[loop_axis] = i
np_entry[tuple(slicing)] = fn(np_entry[tuple(slicing)], entry, i)
sample[entry] = np_entry
return sample
def __call__(self, sample: dict) -> dict:
return self.loop_entries(sample, self.transform_entry, self.entries, self.loop_axis)
@abc.abstractmethod
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
pass
|
class IntensityRescale(LoopEntryTransform):
def __init__(self, lower, upper, loop_axis=None, entries=(defs.KEY_IMAGES,)) -> None:
super().__init__(loop_axis=loop_axis, entries=entries)
self.lower = lower
self.upper = upper
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
return self._normalize(np_entry, self.lower, self.upper)
@staticmethod
def _normalize(arr: np.ndarray, lower, upper):
dtype = arr.dtype
(min_, max_) = (arr.min(), arr.max())
if (min_ == max_):
raise ValueError('cannot normalize when min == max')
arr = ((((arr - min_) / (max_ - min_)) * (upper - lower)) + lower)
return arr.astype(dtype)
|
class IntensityNormalization(LoopEntryTransform):
def __init__(self, loop_axis=None, entries=(defs.KEY_IMAGES,)) -> None:
super().__init__(loop_axis=loop_axis, entries=entries)
self.normalize_fn = self._normalize
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
if (not np.issubdtype(np_entry.dtype, np.floating)):
raise ValueError('Array must be floating type')
return self._normalize(np_entry)
@staticmethod
def _normalize(arr: np.ndarray):
return ((arr - arr.mean()) / arr.std())
|
class LambdaTransform(LoopEntryTransform):
def __init__(self, lambda_fn, loop_axis=None, entries=(defs.KEY_IMAGES,)) -> None:
super().__init__(loop_axis=loop_axis, entries=entries)
self.lambda_fn = lambda_fn
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
return self.lambda_fn(np_entry)
|
class ClipPercentile(LoopEntryTransform):
def __init__(self, upper_percentile: float, lower_percentile: float=None, loop_axis=None, entries=(defs.KEY_IMAGES,)) -> None:
super().__init__(loop_axis=loop_axis, entries=entries)
self.upper_percentile = upper_percentile
if (lower_percentile is None):
lower_percentile = (100 - upper_percentile)
self.lower_percentile = lower_percentile
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
return self._clip(np_entry)
def _clip(self, arr: np.ndarray):
upper_max = np.percentile(arr, self.upper_percentile)
arr[(arr > upper_max)] = upper_max
lower_max = np.percentile(arr, self.lower_percentile)
arr[(arr < lower_max)] = lower_max
return arr
|
class Relabel(LoopEntryTransform):
def __init__(self, label_changes: typing.Dict[(int, int)], entries=(defs.KEY_LABELS,)) -> None:
super().__init__(loop_axis=None, entries=entries)
self.label_changes = label_changes
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
for (new_label, old_label) in self.label_changes.items():
np_entry[(np_entry == old_label)] = new_label
return np_entry
|
class Reshape(LoopEntryTransform):
def __init__(self, shapes: dict) -> None:
'Initializes a new instance of the Reshape class.\n\n Args:\n shapes (dict): A dict with keys being the entries and the values the new shapes of the entries.\n E.g. shapes = {defs.KEY_IMAGES: (-1, 4), defs.KEY_LABELS : (-1, 1)}\n '
super().__init__(loop_axis=None, entries=tuple(shapes.keys()))
self.shapes = shapes
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
return np.reshape(np_entry, self.shapes[entry])
|
class Permute(LoopEntryTransform):
def __init__(self, permutation: tuple, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)) -> None:
super().__init__(loop_axis=None, entries=entries)
self.permutation = permutation
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
return np.transpose(np_entry, self.permutation)
|
class Squeeze(LoopEntryTransform):
def __init__(self, entries=(defs.KEY_IMAGES, defs.KEY_LABELS), squeeze_axis=None) -> None:
super().__init__(loop_axis=None, entries=entries)
self.squeeze_axis = squeeze_axis
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
return np_entry.squeeze(self.squeeze_axis)
|
class UnSqueeze(LoopEntryTransform):
def __init__(self, axis=(- 1), entries=(defs.KEY_IMAGES, defs.KEY_LABELS)) -> None:
super().__init__(loop_axis=None, entries=entries)
self.axis = axis
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
return np.expand_dims(np_entry, self.axis)
|
class SizeCorrection(Transform):
'Size correction transformation.\n\n Corrects the size, i.e. shape, of an array to a given reference shape.\n '
def __init__(self, shape: typing.Tuple[(typing.Union[(None, int)], ...)], pad_value: int=0, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)) -> None:
'Initializes a new instance of the SizeCorrection class.\n\n Args:\n shape (tuple of ints): The reference shape in NumPy format, i.e. z-, y-, x-order. To not correct an axis\n dimension, set the axis value to None.\n pad_value (int): The value to set the padded values of the array.\n entries ():\n '
super().__init__()
self.entries = entries
self.shape = shape
self.pad_value = pad_value
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if (entry not in sample):
if raise_error_if_entry_not_extracted:
raise ValueError(ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
continue
np_entry = check_and_return(sample[entry], np.ndarray)
if (not (len(self.shape) <= np_entry.ndim)):
raise ValueError('Shape dimension needs to be less or equal to {}'.format(np_entry.ndim))
for (idx, size) in enumerate(self.shape):
if ((size is not None) and (size < np_entry.shape[idx])):
before = ((np_entry.shape[idx] - size) // 2)
after = ((np_entry.shape[idx] - ((np_entry.shape[idx] - size) // 2)) - ((np_entry.shape[idx] - size) % 2))
slicing = ([slice(None)] * np_entry.ndim)
slicing[idx] = slice(before, after)
np_entry = np_entry[tuple(slicing)]
elif ((size is not None) and (size > np_entry.shape[idx])):
before = ((size - np_entry.shape[idx]) // 2)
after = (((size - np_entry.shape[idx]) // 2) + ((size - np_entry.shape[idx]) % 2))
pad_width = ([(0, 0)] * np_entry.ndim)
pad_width[idx] = (before, after)
np_entry = np.pad(np_entry, pad_width, mode='constant', constant_values=self.pad_value)
sample[entry] = np_entry
return sample
|
class Mask(Transform):
def __init__(self, mask_key: str, mask_value: int=0, masking_value: float=0.0, loop_axis=None, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)) -> None:
super().__init__()
self.mask_key = mask_key
self.mask_value = mask_value
self.masking_value = masking_value
self.loop_axis = loop_axis
self.entries = entries
def __call__(self, sample: dict) -> dict:
np_mask = check_and_return(sample[self.mask_key], np.ndarray)
for entry in self.entries:
if (entry not in sample):
if raise_error_if_entry_not_extracted:
raise ValueError(ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
continue
np_entry = check_and_return(sample[entry], np.ndarray)
if (np_mask.shape == np_entry.shape):
np_entry[(np_mask == self.mask_value)] = self.masking_value
else:
mask_for_np_entry = np.repeat(np.expand_dims(np_mask, self.loop_axis), np_entry.shape[self.loop_axis], axis=self.loop_axis)
np_entry[(mask_for_np_entry == self.mask_value)] = self.masking_value
sample[entry] = np_entry
return sample
|
class RandomCrop(LoopEntryTransform):
def __init__(self, size: tuple, loop_axis=None, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)) -> None:
super().__init__(loop_axis, entries)
self.size = size
self.slices = None
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
current_size = np_entry.shape[(- len(self.size)):]
dim_diff = (np_entry.ndim - len(self.size))
if (entry == self.entries[0]):
mins = np.zeros(3, dtype=np.int)
maxs = np.subtract(current_size, self.size)
if (maxs < 0).any():
raise ValueError('current size is not large enough for crop')
offset = np.random.randint(mins, (maxs + 1), len(self.size))
self.slices = tuple([slice(offset[i], (offset[i] + self.size[i])) for i in range(len(offset))])
slices = ((dim_diff * (slice(None),)) + self.slices)
return np_entry[slices]
|
def check_and_return(obj, type_):
if (not isinstance(obj, type_)):
raise ValueError("entry must be '{}'".format(type_.__name__))
return obj
|
class Result():
def __init__(self, id_: str, label: str, metric: str, value):
"Represents a result.\n\n Args:\n id_ (str): The identification of the result (e.g., the subject's name).\n label (str): The label of the result (e.g., the foreground).\n metric (str): The metric.\n value (int, float): The value of the metric.\n "
self.id_ = id_
self.label = label
self.metric = metric
self.value = value
|
class Evaluator(abc.ABC):
def __init__(self, metrics: typing.List[pymia_metric.Metric]):
'Evaluator base class.\n\n Args:\n metrics (list of pymia_metric.Metric): A list of metrics.\n '
self.metrics = metrics
self.results = []
@abc.abstractmethod
def evaluate(self, prediction: typing.Union[(sitk.Image, np.ndarray)], reference: typing.Union[(sitk.Image, np.ndarray)], id_: str, **kwargs):
'Evaluates the metrics on the provided prediction and reference.\n\n Args:\n prediction (typing.Union[sitk.Image, np.ndarray]): The prediction.\n reference (typing.Union[sitk.Image, np.ndarray]): The reference.\n id_ (str): The identification of the case to evaluate.\n '
raise NotImplementedError
def clear(self):
'Clears the results.'
self.results = []
|
class SegmentationEvaluator(Evaluator):
def __init__(self, metrics: typing.List[pymia_metric.Metric], labels: dict):
'Represents a segmentation evaluator, evaluating metrics on predictions against references.\n\n Args:\n metrics (list of pymia_metric.Metric): A list of metrics.\n labels (dict): A dictionary with labels (key of type int) and label descriptions (value of type string).\n '
super().__init__(metrics)
self.labels = labels
def add_label(self, label: typing.Union[(tuple, int)], description: str):
"Adds a label with its description to the evaluation.\n\n Args:\n label (Union[tuple, int]): The label or a tuple of labels that should be merged.\n description (str): The label's description.\n "
self.labels[label] = description
def evaluate(self, prediction: typing.Union[(sitk.Image, np.ndarray)], reference: typing.Union[(sitk.Image, np.ndarray)], id_: str, **kwargs):
'Evaluates the metrics on the provided prediction and reference image.\n\n Args:\n prediction (typing.Union[sitk.Image, np.ndarray]): The predicted image.\n reference (typing.Union[sitk.Image, np.ndarray]): The reference image.\n id_ (str): The identification of the case to evaluate.\n\n Raises:\n ValueError: If no labels are defined (see add_label).\n '
if (not self.labels):
raise ValueError('No labels to evaluate defined')
if (isinstance(prediction, sitk.Image) and (prediction.GetNumberOfComponentsPerPixel() > 1)):
raise ValueError('Image has more than one component per pixel')
if (isinstance(reference, sitk.Image) and (reference.GetNumberOfComponentsPerPixel() > 1)):
raise ValueError('Image has more than one component per pixel')
prediction_array = (sitk.GetArrayFromImage(prediction) if isinstance(prediction, sitk.Image) else prediction)
reference_array = (sitk.GetArrayFromImage(reference) if isinstance(reference, sitk.Image) else reference)
for (label, label_str) in self.labels.items():
prediction_of_label = np.in1d(prediction_array.ravel(), label, True).reshape(prediction_array.shape).astype(np.uint8)
reference_of_label = np.in1d(reference_array.ravel(), label, True).reshape(reference_array.shape).astype(np.uint8)
confusion_matrix = pymia_metric.ConfusionMatrix(prediction_of_label, reference_of_label)
distances = None
def get_spacing():
if isinstance(prediction, sitk.Image):
return prediction.GetSpacing()[::(- 1)]
else:
return ((1.0,) * reference_of_label.ndim)
for (param_index, metric) in enumerate(self.metrics):
if isinstance(metric, pymia_metric.ConfusionMatrixMetric):
metric.confusion_matrix = confusion_matrix
elif isinstance(metric, pymia_metric.SpacingMetric):
metric.reference = reference_of_label
metric.prediction = prediction_of_label
metric.spacing = get_spacing()
elif isinstance(metric, pymia_metric.NumpyArrayMetric):
metric.reference = reference_of_label
metric.prediction = prediction_of_label
elif isinstance(metric, pymia_metric.DistanceMetric):
if (distances is None):
distances = pymia_metric.Distances(prediction_of_label, reference_of_label, get_spacing())
metric.distances = distances
self.results.append(Result(id_, label_str, metric.metric, metric.calculate()))
|
class AreaMetric(SpacingMetric, abc.ABC):
def __init__(self, metric: str='AREA'):
'Represents an area metric base class.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def _calculate_area(self, image: np.ndarray, slice_number: int=(- 1)) -> float:
'Calculates the area of a slice in a binary image.\n\n Args:\n image (np.ndarray): The binary 2-D or 3-D image. 3-D images must be of shape (Z, Y, X), meaning image slices as the\n first dimension.\n slice_number (int): The slice number to calculate the area.\n Defaults to -1, which will calculate the area on the intermediate slice.\n '
if (image.ndim == 2):
return ((image.sum() * self.spacing[0]) * self.spacing[1])
else:
if (slice_number == (- 1)):
slice_number = (image.shape[0] // 2)
return ((image[(slice_number, ...)].sum() * self.spacing[1]) * self.spacing[2])
|
class VolumeMetric(SpacingMetric, abc.ABC):
def __init__(self, metric: str='VOL'):
'Represents a volume metric base class.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def _calculate_volume(self, image: np.ndarray) -> float:
'Calculates the volume of a label image.\n\n Args:\n image (np.ndarray): The binary 3-D label image.\n '
voxel_volume = np.prod(self.spacing)
number_of_voxels = image.sum()
return (number_of_voxels * voxel_volume)
|
class Accuracy(ConfusionMatrixMetric):
def __init__(self, metric: str='ACURCY'):
'Represents an accuracy metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the accuracy.'
sum_ = (((self.confusion_matrix.tp + self.confusion_matrix.tn) + self.confusion_matrix.fp) + self.confusion_matrix.fn)
if (sum_ != 0):
return ((self.confusion_matrix.tp + self.confusion_matrix.tn) / sum_)
else:
return 0
|
class AdjustedRandIndex(ConfusionMatrixMetric):
def __init__(self, metric: str='ADJRIND'):
'Represents an adjusted rand index metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the adjusted rand index.'
tp = self.confusion_matrix.tp
tn = self.confusion_matrix.tn
fp = self.confusion_matrix.fp
fn = self.confusion_matrix.fn
n = self.confusion_matrix.n
fp_tn = (tn + fp)
tp_fn = (fn + tp)
tn_fn = (tn + fn)
tp_fp = (fp + tp)
nis = ((tn_fn * tn_fn) + (tp_fp * tp_fp))
njs = ((fp_tn * fp_tn) + (tp_fn * tp_fn))
sum_of_squares = ((((tp * tp) + (tn * tn)) + (fp * fp)) + (fn * fn))
a = (((((tp * (tp - 1)) + (fp * (fp - 1))) + (tn * (tn - 1))) + (fn * (fn - 1))) / 2.0)
b = ((njs - sum_of_squares) / 2.0)
c = ((nis - sum_of_squares) / 2.0)
d = (((((n * n) + sum_of_squares) - nis) - njs) / 2.0)
x1 = (a - (((a + c) * (a + b)) / (((a + b) + c) + d)))
x2 = (((a + c) + (a + b)) / 2.0)
x3 = (((a + c) * (a + b)) / (((a + b) + c) + d))
denominator = (x2 - x3)
if (denominator != 0):
return (x1 / denominator)
else:
return 0
|
class AreaUnderCurve(ConfusionMatrixMetric):
def __init__(self, metric: str='AUC'):
'Represents an area under the curve metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the area under the curve.'
specificity = (self.confusion_matrix.tn / (self.confusion_matrix.tn + self.confusion_matrix.fp))
false_positive_rate = (1 - specificity)
if ((self.confusion_matrix.tp + self.confusion_matrix.fn) == 0):
warnings.warn('Unable to compute area under the curve due to division by zero, returning -inf', NotComputableMetricWarning)
return float('-inf')
true_positive_rate = (self.confusion_matrix.tp / (self.confusion_matrix.tp + self.confusion_matrix.fn))
return (((true_positive_rate - false_positive_rate) + 1) / 2)
|
class AverageDistance(SpacingMetric):
def __init__(self, metric: str='AVGDIST'):
'Represents an average (Hausdorff) distance metric.\n\n Calculates the distance between the set of non-zero pixels of two images using the following equation:\n\n .. math:: AVD(A,B) = max(d(A,B), d(B,A)),\n\n where\n\n .. math:: d(A,B) = \\frac{1}{N} \\sum_{a \\in A} \\min_{b \\in B} \\lVert a - b \\rVert\n\n is the directed Hausdorff distance and :math:`A` and :math:`B` are the set of non-zero pixels in the images.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the average (Hausdorff) distance.'
if (np.count_nonzero(self.reference) == 0):
warnings.warn('Unable to compute average distance due to empty reference mask, returning inf', NotComputableMetricWarning)
return float('inf')
if (np.count_nonzero(self.prediction) == 0):
warnings.warn('Unable to compute average distance due to empty prediction mask, returning inf', NotComputableMetricWarning)
return float('inf')
img_pred = sitk.GetImageFromArray(self.prediction)
img_pred.SetSpacing(self.spacing[::(- 1)])
img_ref = sitk.GetImageFromArray(self.reference)
img_ref.SetSpacing(self.spacing[::(- 1)])
distance_filter = sitk.HausdorffDistanceImageFilter()
distance_filter.Execute(img_pred, img_ref)
return distance_filter.GetAverageHausdorffDistance()
|
class CohenKappaCoefficient(ConfusionMatrixMetric):
def __init__(self, metric: str='KAPPA'):
"Represents a Cohen's kappa coefficient metric.\n\n Args:\n metric (str): The identification string of the metric.\n "
super().__init__(metric)
def calculate(self):
"Calculates the Cohen's kappa coefficient."
tp = self.confusion_matrix.tp
tn = self.confusion_matrix.tn
fp = self.confusion_matrix.fp
fn = self.confusion_matrix.fn
agreement = (tp + tn)
chance0 = ((tn + fn) * (tn + fp))
chance1 = ((fp + tp) * (fn + tp))
sum_ = (((tn + fn) + fp) + tp)
chance = ((chance0 + chance1) / sum_)
if ((sum_ - chance) == 0):
warnings.warn("Unable to compute Cohen's kappa coefficient due to division by zero, returning -inf", NotComputableMetricWarning)
return float('-inf')
return ((agreement - chance) / (sum_ - chance))
|
class DiceCoefficient(ConfusionMatrixMetric):
def __init__(self, metric: str='DICE'):
'Represents a Dice coefficient metric with empty target handling, defined as:\n\n .. math:: \\begin{cases} 1 & \\left\\vert{y}\\right\\vert = \\left\\vert{\\hat y}\\right\\vert = 0 \\\\ Dice(y,\\hat y) & \\left\\vert{y}\\right\\vert > 0 \\\\ \\end{cases}\n\n where :math:`\\hat y` is the prediction and :math:`y` the target.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the Dice coefficient.'
if ((self.confusion_matrix.tp == 0) and (((self.confusion_matrix.tp + self.confusion_matrix.fp) + self.confusion_matrix.fn) == 0)):
return 1.0
return ((2 * self.confusion_matrix.tp) / (((2 * self.confusion_matrix.tp) + self.confusion_matrix.fp) + self.confusion_matrix.fn))
|
class FalseNegative(ConfusionMatrixMetric):
def __init__(self, metric: str='FN'):
'Represents a false negative metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the false negatives.'
return self.confusion_matrix.fn
|
class FalsePositive(ConfusionMatrixMetric):
def __init__(self, metric: str='FP'):
'Represents a false positive metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the false positives.'
return self.confusion_matrix.fp
|
class Fallout(ConfusionMatrixMetric):
def __init__(self, metric: str='FALLOUT'):
'Represents a fallout (false positive rate) metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the fallout (false positive rate).'
specificity = (self.confusion_matrix.tn / (self.confusion_matrix.tn + self.confusion_matrix.fp))
return (1 - specificity)
|
class FalseNegativeRate(ConfusionMatrixMetric):
def __init__(self, metric: str='FNR'):
'Represents a false negative rate metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the false negative rate.'
sensitivity = (self.confusion_matrix.tp / (self.confusion_matrix.tp + self.confusion_matrix.fn))
return (1 - sensitivity)
|
class FMeasure(ConfusionMatrixMetric):
def __init__(self, beta: float=1.0, metric: str='FMEASR'):
'Represents a F-measure metric.\n\n Args:\n beta (float): The beta to trade-off precision and recall.\n Use 0.5 or 2 to calculate the F0.5 and F2 measure, respectively.\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
self.beta = beta
def calculate(self):
'Calculates the F1 measure.'
beta_squared = (self.beta * self.beta)
precision = Precision()
precision.confusion_matrix = self.confusion_matrix
precision = precision.calculate()
recall = Sensitivity()
recall.confusion_matrix = self.confusion_matrix
recall = recall.calculate()
denominator = ((beta_squared * precision) + recall)
if (denominator != 0):
return ((1 + beta_squared) * ((precision * recall) / denominator))
else:
return 0
|
class GlobalConsistencyError(ConfusionMatrixMetric):
def __init__(self, metric: str='GCOERR'):
'Represents a global consistency error metric.\n\n Implementation based on Martin 2001. todo(fabianbalsiger): add entire reference\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the global consistency error.'
tp = self.confusion_matrix.tp
tn = self.confusion_matrix.tn
fp = self.confusion_matrix.fp
fn = self.confusion_matrix.fn
if (((tp + fn) == 0) or ((tn + fp) == 0) or ((tp + fp) == 0) or ((tn + fn) == 0)):
warnings.warn('Unable to compute global consistency error due to division by zero, returning inf', NotComputableMetricWarning)
return float('inf')
n = (((tp + tn) + fp) + fn)
e1 = ((((fn * (fn + (2 * tp))) / (tp + fn)) + ((fp * (fp + (2 * tn))) / (tn + fp))) / n)
e2 = ((((fp * (fp + (2 * tp))) / (tp + fp)) + ((fn * (fn + (2 * tn))) / (tn + fn))) / n)
return min(e1, e2)
|
class HausdorffDistance(DistanceMetric):
def __init__(self, percentile: float=100.0, metric: str='HDRFDST'):
'Represents a Hausdorff distance metric.\n\n Calculates the distance between the set of non-zero pixels of two images using the following equation:\n\n .. math:: H(A,B) = max(h(A,B), h(B,A)),\n\n where\n\n .. math:: h(A,B) = \\max_{a \\in A} \\min_{b \\in B} \\lVert a - b \\rVert\n\n is the directed Hausdorff distance and :math:`A` and :math:`B` are the set of non-zero pixels in the images.\n\n Args:\n percentile (float): The percentile (0, 100] to compute, i.e. 100 computes the Hausdorff distance and\n 95 computes the 95th Hausdorff distance.\n metric (str): The identification string of the metric.\n\n See Also:\n - Nikolov, S., Blackwell, S., Mendes, R., De Fauw, J., Meyer, C., Hughes, C., … Ronneberger, O. (2018). Deep learning to achieve clinically applicable segmentation of head and neck anatomy for radiotherapy. http://arxiv.org/abs/1809.04430\n - `Original implementation <https://github.com/deepmind/surface-distance>`_\n '
super().__init__(metric)
self.percentile = percentile
def calculate(self):
'Calculates the Hausdorff distance.'
if ((self.distances.distances_gt_to_pred is not None) and (len(self.distances.distances_gt_to_pred) > 0)):
surfel_areas_cum_gt = (np.cumsum(self.distances.surfel_areas_gt) / np.sum(self.distances.surfel_areas_gt))
idx = np.searchsorted(surfel_areas_cum_gt, (self.percentile / 100.0))
perc_distance_gt_to_pred = self.distances.distances_gt_to_pred[min(idx, (len(self.distances.distances_gt_to_pred) - 1))]
else:
warnings.warn('Unable to compute Hausdorff distance due to empty reference mask, returning inf', NotComputableMetricWarning)
return float('inf')
if ((self.distances.distances_pred_to_gt is not None) and (len(self.distances.distances_pred_to_gt) > 0)):
surfel_areas_cum_pred = (np.cumsum(self.distances.surfel_areas_pred) / np.sum(self.distances.surfel_areas_pred))
idx = np.searchsorted(surfel_areas_cum_pred, (self.percentile / 100.0))
perc_distance_pred_to_gt = self.distances.distances_pred_to_gt[min(idx, (len(self.distances.distances_pred_to_gt) - 1))]
else:
warnings.warn('Unable to compute Hausdorff distance due to empty prediction mask, returning inf', NotComputableMetricWarning)
return float('inf')
return max(perc_distance_gt_to_pred, perc_distance_pred_to_gt)
|
class InterclassCorrelation(NumpyArrayMetric):
def __init__(self, metric: str='ICCORR'):
'Represents an interclass correlation metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the interclass correlation.'
gt = self.reference.flatten()
seg = self.prediction.flatten()
n = gt.size
mean_gt = gt.mean()
mean_seg = seg.mean()
mean = ((mean_gt + mean_seg) / 2)
m = ((gt + seg) / 2)
ssw = (np.power((gt - m), 2).sum() + np.power((seg - m), 2).sum())
ssb = np.power((m - mean), 2).sum()
ssw /= n
ssb = ((ssb / (n - 1)) * 2)
if ((ssb + ssw) == 0):
warnings.warn('Unable to compute interclass correlation due to division by zero, returning -inf', NotComputableMetricWarning)
return float('-inf')
return ((ssb - ssw) / (ssb + ssw))
|
class JaccardCoefficient(ConfusionMatrixMetric):
def __init__(self, metric: str='JACRD'):
'Represents a Jaccard coefficient metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the Jaccard coefficient.'
tp = self.confusion_matrix.tp
fp = self.confusion_matrix.fp
fn = self.confusion_matrix.fn
if (((tp + fp) + fn) == 0):
warnings.warn('Unable to compute Jaccard coefficient due to division by zero, returning -inf', NotComputableMetricWarning)
return float('-inf')
return (tp / ((tp + fp) + fn))
|
class MahalanobisDistance(NumpyArrayMetric):
def __init__(self, metric: str='MAHLNBS'):
'Represents a Mahalanobis distance metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the Mahalanobis distance.'
gt_n = np.count_nonzero(self.reference)
seg_n = np.count_nonzero(self.prediction)
if (gt_n == 0):
warnings.warn('Unable to compute Mahalanobis distance due to empty reference mask, returning inf', NotComputableMetricWarning)
return float('inf')
if (seg_n == 0):
warnings.warn('Unable to compute Mahalanobis distance due to empty prediction mask, returning inf', NotComputableMetricWarning)
return float('inf')
gt_indices = np.flip(np.where((self.reference == 1)), axis=0)
gt_mean = gt_indices.mean(axis=1)
gt_cov = np.cov(gt_indices)
seg_indices = np.flip(np.where((self.prediction == 1)), axis=0)
seg_mean = seg_indices.mean(axis=1)
seg_cov = np.cov(seg_indices)
common_cov = (((gt_n * gt_cov) + (seg_n * seg_cov)) / (gt_n + seg_n))
common_cov_inv = np.linalg.inv(common_cov)
mean = (gt_mean - seg_mean)
return math.sqrt(mean.dot(common_cov_inv).dot(mean.T))
|
class MutualInformation(ConfusionMatrixMetric):
def __init__(self, metric: str='MUTINF'):
'Represents a mutual information metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the mutual information.'
tp = self.confusion_matrix.tp
tn = self.confusion_matrix.tn
fp = self.confusion_matrix.fp
fn = self.confusion_matrix.fn
n = self.confusion_matrix.n
fn_tp = (fn + tp)
fp_tp = (fp + tp)
if ((fn_tp == 0) or ((fn_tp / n) == 1) or (fp_tp == 0) or ((fp_tp / n) == 1)):
warnings.warn('Unable to compute mutual information due to log2 of 0, returning -inf', NotComputableMetricWarning)
return float('-inf')
h1 = (- (((fn_tp / n) * math.log2((fn_tp / n))) + ((1 - (fn_tp / n)) * math.log2((1 - (fn_tp / n))))))
h2 = (- (((fp_tp / n) * math.log2((fp_tp / n))) + ((1 - (fp_tp / n)) * math.log2((1 - (fp_tp / n))))))
p00 = (1 if (tn == 0) else (tn / n))
p01 = (1 if (fn == 0) else (fn / n))
p10 = (1 if (fp == 0) else (fp / n))
p11 = (1 if (tp == 0) else (tp / n))
h12 = (- (((((tn / n) * math.log2(p00)) + ((fn / n) * math.log2(p01))) + ((fp / n) * math.log2(p10))) + ((tp / n) * math.log2(p11))))
mi = ((h1 + h2) - h12)
return mi
|
class Precision(ConfusionMatrixMetric):
def __init__(self, metric: str='PRCISON'):
'Represents a precision metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the precision.'
sum_ = (self.confusion_matrix.tp + self.confusion_matrix.fp)
if (sum_ != 0):
return (self.confusion_matrix.tp / sum_)
else:
return 0
|
class PredictionArea(AreaMetric):
def __init__(self, slice_number: int=(- 1), metric: str='PREDAREA'):
'Represents a prediction area metric.\n\n Args:\n slice_number (int): The slice number to calculate the area.\n Defaults to -1, which will calculate the area on the intermediate slice.\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
self.slice_number = slice_number
def calculate(self):
'Calculates the predicted area on a specified slice in mm2.'
return self._calculate_area(self.prediction, self.slice_number)
|
class PredictionVolume(VolumeMetric):
def __init__(self, metric: str='PREDVOL'):
'Represents a prediction volume metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the predicted volume in mm3.'
return self._calculate_volume(self.prediction)
|
class ProbabilisticDistance(NumpyArrayMetric):
def __init__(self, metric: str='PROBDST'):
'Represents a probabilistic distance metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the probabilistic distance.'
gt = self.reference.flatten().astype(np.int8)
seg = self.prediction.flatten().astype(np.int8)
probability_difference = np.absolute((gt - seg)).sum()
probability_joint = (gt * seg).sum()
if (probability_joint != 0):
return (probability_difference / (2.0 * probability_joint))
else:
return (- 1)
|
class RandIndex(ConfusionMatrixMetric):
def __init__(self, metric: str='RNDIND'):
'Represents a rand index metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the rand index.'
tp = self.confusion_matrix.tp
tn = self.confusion_matrix.tn
fp = self.confusion_matrix.fp
fn = self.confusion_matrix.fn
n = self.confusion_matrix.n
fp_tn = (tn + fp)
tp_fn = (fn + tp)
tn_fn = (tn + fn)
tp_fp = (fp + tp)
nis = ((tn_fn * tn_fn) + (tp_fp * tp_fp))
njs = ((fp_tn * fp_tn) + (tp_fn * tp_fn))
sum_of_squares = ((((tp * tp) + (tn * tn)) + (fp * fp)) + (fn * fn))
a = (((((tp * (tp - 1)) + (fp * (fp - 1))) + (tn * (tn - 1))) + (fn * (fn - 1))) / 2.0)
b = ((njs - sum_of_squares) / 2.0)
c = ((nis - sum_of_squares) / 2.0)
d = (((((n * n) + sum_of_squares) - nis) - njs) / 2.0)
return ((a + d) / (((a + b) + c) + d))
|
class ReferenceArea(AreaMetric):
def __init__(self, slice_number: int=(- 1), metric: str='REFAREA'):
'Represents a reference area metric.\n\n Args:\n slice_number (int): The slice number to calculate the area.\n Defaults to -1, which will calculate the area on the intermediate slice.\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
self.slice_number = slice_number
def calculate(self):
'Calculates the reference area on a specified slice in mm2.'
return self._calculate_area(self.reference, self.slice_number)
|
class ReferenceVolume(VolumeMetric):
def __init__(self, metric: str='REFVOL'):
'Represents a reference volume metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the reference volume in mm3.'
return self._calculate_volume(self.reference)
|
class Sensitivity(ConfusionMatrixMetric):
def __init__(self, metric: str='SNSVTY'):
'Represents a sensitivity (true positive rate or recall) metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the sensitivity (true positive rate).'
if ((self.confusion_matrix.tp + self.confusion_matrix.fn) == 0):
warnings.warn('Unable to compute sensitivity due to division by zero, returning -inf', NotComputableMetricWarning)
return float('-inf')
return (self.confusion_matrix.tp / (self.confusion_matrix.tp + self.confusion_matrix.fn))
|
class Specificity(ConfusionMatrixMetric):
def __init__(self, metric: str='SPCFTY'):
'Represents a specificity metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the specificity.'
return (self.confusion_matrix.tn / (self.confusion_matrix.tn + self.confusion_matrix.fp))
|
class SurfaceDiceOverlap(DistanceMetric):
def __init__(self, tolerance: float=1, metric: str='SURFDICE'):
'Represents a surface Dice coefficient overlap metric.\n\n Args:\n tolerance (float): The tolerance of the surface distance in mm.\n metric (str): The identification string of the metric.\n\n See Also:\n - Nikolov, S., Blackwell, S., Mendes, R., De Fauw, J., Meyer, C., Hughes, C., … Ronneberger, O. (2018). Deep learning to achieve clinically applicable segmentation of head and neck anatomy for radiotherapy. http://arxiv.org/abs/1809.04430\n - `Original implementation <https://github.com/deepmind/surface-distance>`_\n '
super().__init__(metric)
self.tolerance = tolerance
def calculate(self):
'Calculates the surface Dice coefficient overlap.'
if (self.distances.surfel_areas_pred is None):
warnings.warn('Unable to compute surface Dice coefficient overlap due to empty prediction mask, returning -inf', NotComputableMetricWarning)
return float('-inf')
if (self.distances.surfel_areas_gt is None):
warnings.warn('Unable to compute surface Dice coefficient overlap due to empty reference mask, returning -inf', NotComputableMetricWarning)
return float('-inf')
overlap_gt = np.sum(self.distances.surfel_areas_gt[(self.distances.distances_gt_to_pred <= self.tolerance)])
overlap_pred = np.sum(self.distances.surfel_areas_pred[(self.distances.distances_pred_to_gt <= self.tolerance)])
surface_dice = ((overlap_gt + overlap_pred) / (np.sum(self.distances.surfel_areas_gt) + np.sum(self.distances.surfel_areas_pred)))
return float(surface_dice)
|
class SurfaceOverlap(DistanceMetric):
def __init__(self, tolerance: float=1.0, prediction_to_reference: bool=True, metric: str='SURFOVLP'):
'Represents a surface overlap metric.\n\n Computes the overlap of the reference surface with the predicted surface and vice versa allowing a\n specified tolerance (maximum surface-to-surface distance that is regarded as overlapping).\n The overlapping fraction is computed by correctly taking the area of each surface element into account.\n\n Args:\n tolerance (float): The tolerance of the surface distance in mm.\n prediction_to_reference (bool): Computes the prediction to reference if `True`, otherwise the reference to prediction.\n metric (str): The identification string of the metric.\n\n See Also:\n - Nikolov, S., Blackwell, S., Mendes, R., De Fauw, J., Meyer, C., Hughes, C., … Ronneberger, O. (2018). Deep learning to achieve clinically applicable segmentation of head and neck anatomy for radiotherapy. http://arxiv.org/abs/1809.04430\n - `Original implementation <https://github.com/deepmind/surface-distance>`_\n '
super().__init__(metric)
self.tolerance = tolerance
self.prediction_to_reference = prediction_to_reference
def calculate(self):
'Calculates the surface overlap.'
if self.prediction_to_reference:
if ((self.distances.surfel_areas_pred is not None) and (np.sum(self.distances.surfel_areas_pred) > 0)):
return float((np.sum(self.distances.surfel_areas_pred[(self.distances.distances_pred_to_gt <= self.tolerance)]) / np.sum(self.distances.surfel_areas_pred)))
else:
warnings.warn('Unable to compute surface overlap due to empty prediction mask, returning -inf', NotComputableMetricWarning)
return float('-inf')
elif ((self.distances.surfel_areas_gt is not None) and (np.sum(self.distances.surfel_areas_gt) > 0)):
return float((np.sum(self.distances.surfel_areas_gt[(self.distances.distances_gt_to_pred <= self.tolerance)]) / np.sum(self.distances.surfel_areas_gt)))
else:
warnings.warn('Unable to compute surface overlap due to empty reference mask, returning -inf', NotComputableMetricWarning)
return float('-inf')
|
class TrueNegative(ConfusionMatrixMetric):
def __init__(self, metric: str='TN'):
'Represents a true negative metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the true negatives.'
return self.confusion_matrix.tn
|
class TruePositive(ConfusionMatrixMetric):
def __init__(self, metric: str='TP'):
'Represents a true positive metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the true positives.'
return self.confusion_matrix.tp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.