| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """ |
| | A collection of generic interfaces for MONAI transforms. |
| | """ |
| |
|
| | import warnings |
| | from abc import ABC, abstractmethod |
| | from typing import Any, Callable, Hashable, Optional, Sequence, Tuple, Union |
| |
|
| | import numpy as np |
| |
|
| | from monai.config import KeysCollection |
| | from monai.transforms.utils import apply_transform |
| | from monai.utils import ensure_tuple, get_seed |
| |
|
| |
|
| | class Transform(ABC): |
| | """ |
| | An abstract class of a ``Transform``. |
| | A transform is callable that processes ``data``. |
| | |
| | It could be stateful and may modify ``data`` in place, |
| | the implementation should be aware of: |
| | |
| | #. thread safety when mutating its own states. |
| | When used from a multi-process context, transform's instance variables are read-only. |
| | #. ``data`` content unused by this transform may still be used in the |
| | subsequent transforms in a composed transform. |
| | #. storing too much information in ``data`` may not scale. |
| | |
| | See Also |
| | |
| | :py:class:`monai.transforms.Compose` |
| | """ |
| |
|
| | @abstractmethod |
| | def __call__(self, data: Any): |
| | """ |
| | ``data`` is an element which often comes from an iteration over an |
| | iterable, such as :py:class:`torch.utils.data.Dataset`. This method should |
| | return an updated version of ``data``. |
| | To simplify the input validations, most of the transforms assume that |
| | |
| | - ``data`` is a Numpy ndarray, PyTorch Tensor or string |
| | - the data shape can be: |
| | |
| | #. string data without shape, `LoadNifti` and `LoadPNG` transforms expect file paths |
| | #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``, |
| | except that `AddChannel` expects (spatial_dim_1[, spatial_dim_2, ...]) and |
| | `AsChannelFirst` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels) |
| | #. most of the post-processing transforms expect |
| | ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])`` |
| | |
| | - the channel dimension is not omitted even if number of channels is one |
| | |
| | This method can optionally take additional arguments to help execute transformation operation. |
| | |
| | Raises: |
| | NotImplementedError: When the subclass does not override this method. |
| | |
| | """ |
| | raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") |
| |
|
| |
|
| | class Randomizable(ABC): |
| | """ |
| | An interface for handling random state locally, currently based on a class variable `R`, |
| | which is an instance of `np.random.RandomState`. |
| | This is mainly for randomized data augmentation transforms. For example:: |
| | |
| | class RandShiftIntensity(Randomizable): |
| | def randomize(): |
| | self._offset = self.R.uniform(low=0, high=100) |
| | def __call__(self, img): |
| | self.randomize() |
| | return img + self._offset |
| | |
| | transform = RandShiftIntensity() |
| | transform.set_random_state(seed=0) |
| | |
| | """ |
| |
|
| | R: np.random.RandomState = np.random.RandomState() |
| |
|
| | def set_random_state( |
| | self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None |
| | ) -> "Randomizable": |
| | """ |
| | Set the random state locally, to control the randomness, the derived |
| | classes should use :py:attr:`self.R` instead of `np.random` to introduce random |
| | factors. |
| | |
| | Args: |
| | seed: set the random state with an integer seed. |
| | state: set the random state with a `np.random.RandomState` object. |
| | |
| | Raises: |
| | TypeError: When ``state`` is not an ``Optional[np.random.RandomState]``. |
| | |
| | Returns: |
| | a Randomizable instance. |
| | |
| | """ |
| | if seed is not None: |
| | _seed = id(seed) if not isinstance(seed, int) else seed |
| | self.R = np.random.RandomState(_seed) |
| | return self |
| |
|
| | if state is not None: |
| | if not isinstance(state, np.random.RandomState): |
| | raise TypeError(f"state must be None or a np.random.RandomState but is {type(state).__name__}.") |
| | self.R = state |
| | return self |
| |
|
| | self.R = np.random.RandomState() |
| | return self |
| |
|
| | @abstractmethod |
| | def randomize(self, data: Any) -> None: |
| | """ |
| | Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors. |
| | |
| | all :py:attr:`self.R` calls happen here so that we have a better chance to |
| | identify errors of sync the random state. |
| | |
| | This method can generate the random factors based on properties of the input data. |
| | |
| | Raises: |
| | NotImplementedError: When the subclass does not override this method. |
| | |
| | """ |
| | raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") |
| |
|
| |
|
| | class Compose(Randomizable): |
| | """ |
| | ``Compose`` provides the ability to chain a series of calls together in a |
| | sequence. Each transform in the sequence must take a single argument and |
| | return a single value, so that the transforms can be called in a chain. |
| | |
| | ``Compose`` can be used in two ways: |
| | |
| | #. With a series of transforms that accept and return a single |
| | ndarray / tensor / tensor-like parameter. |
| | #. With a series of transforms that accept and return a dictionary that |
| | contains one or more parameters. Such transforms must have pass-through |
| | semantics; unused values in the dictionary must be copied to the return |
| | dictionary. It is required that the dictionary is copied between input |
| | and output of each transform. |
| | |
| | If some transform generates a list batch of data in the transform chain, |
| | every item in the list is still a dictionary, and all the following |
| | transforms will apply to every item of the list, for example: |
| | |
| | #. transformA normalizes the intensity of 'img' field in the dict data. |
| | #. transformB crops out a list batch of images on 'img' and 'seg' field. |
| | And constructs a list of dict data, other fields are copied:: |
| | |
| | { [{ { |
| | 'img': [1, 2], 'img': [1], 'img': [2], |
| | 'seg': [1, 2], 'seg': [1], 'seg': [2], |
| | 'extra': 123, --> 'extra': 123, 'extra': 123, |
| | 'shape': 'CHWD' 'shape': 'CHWD' 'shape': 'CHWD' |
| | } }, }] |
| | |
| | #. transformC then randomly rotates or flips 'img' and 'seg' fields of |
| | every dictionary item in the list. |
| | |
| | The composed transforms will be set the same global random seed if user called |
| | `set_determinism()`. |
| | |
| | When using the pass-through dictionary operation, you can make use of |
| | :class:`monai.transforms.adaptors.adaptor` to wrap transforms that don't conform |
| | to the requirements. This approach allows you to use transforms from |
| | otherwise incompatible libraries with minimal additional work. |
| | |
| | Note: |
| | |
| | In many cases, Compose is not the best way to create pre-processing |
| | pipelines. Pre-processing is often not a strictly sequential series of |
| | operations, and much of the complexity arises when a not-sequential |
| | set of functions must be called as if it were a sequence. |
| | |
| | Example: images and labels |
| | Images typically require some kind of normalisation that labels do not. |
| | Both are then typically augmented through the use of random rotations, |
| | flips, and deformations. |
| | Compose can be used with a series of transforms that take a dictionary |
| | that contains 'image' and 'label' entries. This might require wrapping |
| | `torchvision` transforms before passing them to compose. |
| | Alternatively, one can create a class with a `__call__` function that |
| | calls your pre-processing functions taking into account that not all of |
| | them are called on the labels. |
| | """ |
| |
|
| | def __init__(self, transforms: Optional[Union[Sequence[Callable], Callable]] = None) -> None: |
| | if transforms is None: |
| | transforms = [] |
| | self.transforms = ensure_tuple(transforms) |
| | self.set_random_state(seed=get_seed()) |
| |
|
| | def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None) -> "Compose": |
| | for _transform in self.transforms: |
| | if not isinstance(_transform, Randomizable): |
| | continue |
| | _transform.set_random_state(seed, state) |
| | return self |
| |
|
| | def randomize(self, data: Optional[Any] = None) -> None: |
| | for _transform in self.transforms: |
| | if not isinstance(_transform, Randomizable): |
| | continue |
| | try: |
| | _transform.randomize(data) |
| | except TypeError as type_error: |
| | tfm_name: str = type(_transform).__name__ |
| | warnings.warn( |
| | f'Transform "{tfm_name}" in Compose not randomized\n{tfm_name}.{type_error}.', RuntimeWarning |
| | ) |
| |
|
| | def __call__(self, input_): |
| | for _transform in self.transforms: |
| | input_ = apply_transform(_transform, input_) |
| | return input_ |
| |
|
| |
|
| | class MapTransform(Transform): |
| | """ |
| | A subclass of :py:class:`monai.transforms.Transform` with an assumption |
| | that the ``data`` input of ``self.__call__`` is a MutableMapping such as ``dict``. |
| | |
| | The ``keys`` parameter will be used to get and set the actual data |
| | item to transform. That is, the callable of this transform should |
| | follow the pattern: |
| | |
| | .. code-block:: python |
| | |
| | def __call__(self, data): |
| | for key in self.keys: |
| | if key in data: |
| | # update output data with some_transform_function(data[key]). |
| | else: |
| | # do nothing or some exceptions handling. |
| | return data |
| | |
| | Raises: |
| | ValueError: When ``keys`` is an empty iterable. |
| | TypeError: When ``keys`` type is not in ``Union[Hashable, Iterable[Hashable]]``. |
| | |
| | """ |
| |
|
| | def __init__(self, keys: KeysCollection) -> None: |
| | self.keys: Tuple[Hashable, ...] = ensure_tuple(keys) |
| | if not self.keys: |
| | raise ValueError("keys must be non empty.") |
| | for key in self.keys: |
| | if not isinstance(key, Hashable): |
| | raise TypeError(f"keys must be one of (Hashable, Iterable[Hashable]) but is {type(keys).__name__}.") |
| |
|
| | @abstractmethod |
| | def __call__(self, data): |
| | """ |
| | ``data`` often comes from an iteration over an iterable, |
| | such as :py:class:`torch.utils.data.Dataset`. |
| | |
| | To simplify the input validations, this method assumes: |
| | |
| | - ``data`` is a Python dictionary |
| | - ``data[key]`` is a Numpy ndarray, PyTorch Tensor or string, where ``key`` is an element |
| | of ``self.keys``, the data shape can be: |
| | |
| | #. string data without shape, `LoadNiftid` and `LoadPNGd` transforms expect file paths |
| | #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``, |
| | except that `AddChanneld` expects (spatial_dim_1[, spatial_dim_2, ...]) and |
| | `AsChannelFirstd` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels) |
| | #. most of the post-processing transforms expect |
| | ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])`` |
| | |
| | - the channel dimension is not omitted even if number of channels is one |
| | |
| | Raises: |
| | NotImplementedError: When the subclass does not override this method. |
| | |
| | returns: |
| | An updated dictionary version of ``data`` by applying the transform. |
| | |
| | """ |
| | raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") |
| |
|