code
stringlengths
17
6.64M
class MonoDepthCfg(TypedDict): 'Monocular depth trainer config. See each sub-class for details.' net: NetCfg loss: LossCfg dataset: DataCfg loader: LoaderCfg optimizer: OptCfg scheduler: SchedCfg trainer: TrainCfg
def _apply_op(img: Tensor, op_name: str, magnitude: float, interpolation: InterpolationMode, fill: Optional[List[float]]): if (op_name == 'ShearX'): raise ValueError(f'Attempted geometric transformation "{op_name}"') elif (op_name == 'ShearY'): raise ValueError(f'Attempted geometric transformation "{op_name}"') elif (op_name == 'TranslateX'): raise ValueError(f'Attempted geometric transformation "{op_name}"') elif (op_name == 'TranslateY'): raise ValueError(f'Attempted geometric transformation "{op_name}"') elif (op_name == 'Rotate'): raise ValueError(f'Attempted geometric transformation "{op_name}"') elif (op_name == 'Brightness'): img = F.adjust_brightness(img, (1.0 + magnitude)) elif (op_name == 'Color'): img = F.adjust_saturation(img, (1.0 + magnitude)) elif (op_name == 'Contrast'): img = F.adjust_contrast(img, (1.0 + magnitude)) elif (op_name == 'Sharpness'): img = F.adjust_sharpness(img, (1.0 + magnitude)) elif (op_name == 'Posterize'): img = F.posterize(img, int(magnitude)) elif (op_name == 'Solarize'): img = F.solarize(img, magnitude) elif (op_name == 'AutoContrast'): img = F.autocontrast(img) elif (op_name == 'Equalize'): img = F.equalize(img) elif (op_name == 'Invert'): img = F.invert(img) elif (op_name == 'Identity'): pass else: raise ValueError('The provided operator {} is not recognized.'.format(op_name)) return img
class AutoAugmentPolicy(Enum): 'AutoAugment policies learned on different data.\n Available policies are IMAGENET, CIFAR10 and SVHN.\n ' IMAGENET = 'imagenet' CIFAR10 = 'cifar10' SVHN = 'svhn'
class AutoAugment(torch.nn.Module): 'AutoAugment data augmentation method based on\n `"AutoAugment: Learning Augmentation Strategies from Data" <https://arxiv.org/pdf/1805.09501.pdf>`_.\n If the image is torch Tensor, it should be of type torch.uint8, and it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode "L" or "RGB".\n\n Args:\n policy (AutoAugmentPolicy): Desired policy enum defined by\n :class:`torchvision.transforms.autoaugment.AutoAugmentPolicy`. Default is ``AutoAugmentPolicy.IMAGENET``.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n fill (sequence or number, optional): Pixel fill value for the area outside the transformed\n image. If given a number, the value is used for all bands respectively.\n ' def __init__(self, policy: AutoAugmentPolicy=AutoAugmentPolicy.IMAGENET, interpolation: InterpolationMode=InterpolationMode.NEAREST, fill: Optional[List[float]]=None) -> None: super().__init__() self.policy = policy self.interpolation = interpolation self.fill = fill self.policies = self._get_policies(policy) def _get_policies(self, policy: AutoAugmentPolicy) -> List[Tuple[(Tuple[(str, float, Optional[int])], Tuple[(str, float, Optional[int])])]]: if (policy == AutoAugmentPolicy.IMAGENET): return [(('Posterize', 0.4, 8), ('Rotate', 0.6, 9)), (('Solarize', 0.6, 5), ('AutoContrast', 0.6, None)), (('Equalize', 0.8, None), ('Equalize', 0.6, None)), (('Posterize', 0.6, 7), ('Posterize', 0.6, 6)), (('Equalize', 0.4, None), ('Solarize', 0.2, 4)), (('Equalize', 0.4, None), ('Rotate', 0.8, 8)), (('Solarize', 0.6, 3), ('Equalize', 0.6, None)), (('Posterize', 0.8, 5), ('Equalize', 1.0, None)), (('Equalize', 0.6, None), ('Posterize', 0.4, 6)), (('Equalize', 0.0, None), ('Equalize', 0.8, None)), (('Invert', 0.6, None), ('Equalize', 1.0, None)), (('Color', 0.6, 4), ('Contrast', 1.0, 8)), (('Color', 0.8, 8), ('Solarize', 0.8, 7)), (('Sharpness', 0.4, 7), ('Invert', 0.6, None)), (('Color', 0.4, 0), ('Equalize', 0.6, None)), (('Equalize', 0.4, None), ('Solarize', 0.2, 4)), (('Solarize', 0.6, 5), ('AutoContrast', 0.6, None)), (('Invert', 0.6, None), ('Equalize', 1.0, None)), (('Color', 0.6, 4), ('Contrast', 1.0, 8)), (('Equalize', 0.8, None), ('Equalize', 0.6, None))] elif (policy == AutoAugmentPolicy.CIFAR10): return [(('Invert', 0.1, None), ('Contrast', 0.2, 6)), (('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)), (('AutoContrast', 0.5, None), ('Equalize', 0.9, None)), (('Color', 0.4, 3), ('Brightness', 0.6, 7)), (('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)), (('Equalize', 0.6, None), ('Equalize', 0.5, None)), (('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)), (('Color', 0.7, 7), ('TranslateX', 0.5, 8)), (('Equalize', 0.3, None), ('AutoContrast', 0.4, None)), (('Brightness', 0.9, 6), ('Color', 0.2, 8)), (('Solarize', 0.5, 2), ('Invert', 0.0, None)), (('Equalize', 0.2, None), ('AutoContrast', 0.6, None)), (('Equalize', 0.2, None), ('Equalize', 0.6, None)), (('Color', 0.9, 9), ('Equalize', 0.6, None)), (('AutoContrast', 0.8, None), ('Solarize', 0.2, 8)), (('Brightness', 0.1, 3), ('Color', 0.7, 0)), (('Solarize', 0.4, 5), ('AutoContrast', 0.9, None)), (('AutoContrast', 0.9, None), ('Solarize', 0.8, 3)), (('Equalize', 0.8, None), ('Invert', 0.1, None))] elif (policy == AutoAugmentPolicy.SVHN): return [(('Equalize', 0.6, None), ('Solarize', 0.6, 6)), (('Invert', 0.9, None), ('Equalize', 0.6, None)), (('Equalize', 0.6, None), ('Rotate', 0.9, 3)), (('Invert', 0.9, None), ('AutoContrast', 0.8, None)), (('Equalize', 0.6, None), ('Rotate', 0.9, 3)), (('Equalize', 0.9, None), ('TranslateY', 0.6, 6)), (('Invert', 0.9, None), ('Equalize', 0.6, None)), (('Contrast', 0.3, 3), ('Rotate', 0.8, 4)), (('Invert', 0.8, None), ('TranslateY', 0.0, 2)), (('ShearY', 0.7, 6), ('Solarize', 0.4, 8)), (('Invert', 0.6, None), ('Rotate', 0.8, 4)), (('Solarize', 0.7, 2), ('TranslateY', 0.6, 7))] else: raise ValueError('The provided policy {} is not recognized.'.format(policy)) def _augmentation_space(self, num_bins: int, image_size: List[int]) -> Dict[(str, Tuple[(Tensor, bool)])]: return {'Brightness': (torch.linspace(0.0, 0.9, num_bins), True), 'Color': (torch.linspace(0.0, 0.9, num_bins), True), 'Contrast': (torch.linspace(0.0, 0.9, num_bins), True), 'Sharpness': (torch.linspace(0.0, 0.9, num_bins), True), 'Posterize': ((8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int()), False), 'Solarize': (torch.linspace(255.0, 0.0, num_bins), False), 'AutoContrast': (torch.tensor(0.0), False), 'Equalize': (torch.tensor(0.0), False), 'Invert': (torch.tensor(0.0), False)} @staticmethod def get_params(transform_num: int) -> Tuple[(int, Tensor, Tensor)]: 'Get parameters for autoaugment transformation\n\n Returns:\n params required by the autoaugment transformation\n ' policy_id = int(torch.randint(transform_num, (1,)).item()) probs = torch.rand((2,)) signs = torch.randint(2, (2,)) return (policy_id, probs, signs) def forward(self, img: Tensor) -> Tensor: '\n img (PIL Image or Tensor): Image to be transformed.\n\n Returns:\n PIL Image or Tensor: AutoAugmented image.\n ' fill = self.fill if isinstance(img, Tensor): if isinstance(fill, (int, float)): fill = ([float(fill)] * F.get_image_num_channels(img)) elif (fill is not None): fill = [float(f) for f in fill] (transform_id, probs, signs) = self.get_params(len(self.policies)) for (i, (op_name, p, magnitude_id)) in enumerate(self.policies[transform_id]): if (probs[i] <= p): op_meta = self._augmentation_space(10, F.get_image_size(img)) (magnitudes, signed) = op_meta[op_name] magnitude = (float(magnitudes[magnitude_id].item()) if (magnitude_id is not None) else 0.0) if (signed and (signs[i] == 0)): magnitude *= (- 1.0) img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill) return img def __repr__(self) -> str: return (self.__class__.__name__ + '(policy={}, fill={})'.format(self.policy, self.fill))
class RandAugment(torch.nn.Module): 'RandAugment data augmentation method based on\n `"RandAugment: Practical automated data augmentation with a reduced search space"\n <https://arxiv.org/abs/1909.13719>`_.\n If the image is torch Tensor, it should be of type torch.uint8, and it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode "L" or "RGB".\n\n Args:\n num_ops (int): Number of augmentation transformations to apply sequentially.\n magnitude (int): Magnitude for all the transformations.\n num_magnitude_bins (int): The number of different magnitude values.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n fill (sequence or number, optional): Pixel fill value for the area outside the transformed\n image. If given a number, the value is used for all bands respectively.\n ' def __init__(self, num_ops: int=2, magnitude: int=9, num_magnitude_bins: int=31, interpolation: InterpolationMode=InterpolationMode.NEAREST, fill: Optional[List[float]]=None) -> None: super().__init__() self.num_ops = num_ops self.magnitude = magnitude self.num_magnitude_bins = num_magnitude_bins self.interpolation = interpolation self.fill = fill def _augmentation_space(self, num_bins: int, image_size: List[int]) -> Dict[(str, Tuple[(Tensor, bool)])]: return {'Identity': (torch.tensor(0.0), False), 'Brightness': (torch.linspace(0.0, 0.9, num_bins), True), 'Color': (torch.linspace(0.0, 0.9, num_bins), True), 'Contrast': (torch.linspace(0.0, 0.9, num_bins), True), 'Sharpness': (torch.linspace(0.0, 0.9, num_bins), True), 'Posterize': ((8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int()), False), 'Solarize': (torch.linspace(255.0, 0.0, num_bins), False), 'AutoContrast': (torch.tensor(0.0), False), 'Equalize': (torch.tensor(0.0), False)} def forward(self, img: Tensor) -> Tensor: '\n img (PIL Image or Tensor): Image to be transformed.\n\n Returns:\n PIL Image or Tensor: Transformed image.\n ' fill = self.fill if isinstance(img, Tensor): if isinstance(fill, (int, float)): fill = ([float(fill)] * F.get_image_num_channels(img)) elif (fill is not None): fill = [float(f) for f in fill] for _ in range(self.num_ops): op_meta = self._augmentation_space(self.num_magnitude_bins, F.get_image_size(img)) op_index = int(torch.randint(len(op_meta), (1,)).item()) op_name = list(op_meta.keys())[op_index] (magnitudes, signed) = op_meta[op_name] magnitude = (float(magnitudes[self.magnitude].item()) if (magnitudes.ndim > 0) else 0.0) if (signed and torch.randint(2, (1,))): magnitude *= (- 1.0) img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill) return img def __repr__(self) -> str: s = (self.__class__.__name__ + '(') s += 'num_ops={num_ops}' s += ', magnitude={magnitude}' s += ', num_magnitude_bins={num_magnitude_bins}' s += ', interpolation={interpolation}' s += ', fill={fill}' s += ')' return s.format(**self.__dict__)
class TrivialAugmentWide(torch.nn.Module): 'Dataset-independent data-augmentation with TrivialAugment Wide, as described in\n `"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" <https://arxiv.org/abs/2103.10158>`.\n If the image is torch Tensor, it should be of type torch.uint8, and it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode "L" or "RGB".\n\n Args:\n num_magnitude_bins (int): The number of different magnitude values.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n fill (sequence or number, optional): Pixel fill value for the area outside the transformed\n image. If given a number, the value is used for all bands respectively.\n ' def __init__(self, num_magnitude_bins: int=31, interpolation: InterpolationMode=InterpolationMode.NEAREST, fill: Optional[List[float]]=None) -> None: super().__init__() self.num_magnitude_bins = num_magnitude_bins self.interpolation = interpolation self.fill = fill def _augmentation_space(self, num_bins: int) -> Dict[(str, Tuple[(Tensor, bool)])]: return {'Brightness': (torch.linspace(0.0, 0.99, num_bins), True), 'Color': (torch.linspace(0.0, 0.99, num_bins), True), 'Contrast': (torch.linspace(0.0, 0.99, num_bins), True), 'Sharpness': (torch.linspace(0.0, 0.99, num_bins), True), 'Posterize': ((8 - (torch.arange(num_bins) / ((num_bins - 1) / 6)).round().int()), False), 'Solarize': (torch.linspace(255.0, 0.0, num_bins), False), 'AutoContrast': (torch.tensor(0.0), False), 'Equalize': (torch.tensor(0.0), False)} def forward(self, img: Tensor) -> Tensor: '\n img (PIL Image or Tensor): Image to be transformed.\n\n Returns:\n PIL Image or Tensor: Transformed image.\n ' fill = self.fill if isinstance(img, Tensor): if isinstance(fill, (int, float)): fill = ([float(fill)] * F.get_image_num_channels(img)) elif (fill is not None): fill = [float(f) for f in fill] op_meta = self._augmentation_space(self.num_magnitude_bins) op_index = int(torch.randint(len(op_meta), (1,)).item()) op_name = list(op_meta.keys())[op_index] (magnitudes, signed) = op_meta[op_name] magnitude = (float(magnitudes[torch.randint(len(magnitudes), (1,), dtype=torch.long)].item()) if (magnitudes.ndim > 0) else 0.0) if (signed and torch.randint(2, (1,))): magnitude *= (- 1.0) return _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill) def __repr__(self) -> str: s = (self.__class__.__name__ + '(') s += 'num_magnitude_bins={num_magnitude_bins}' s += ', interpolation={interpolation}' s += ', fill={fill}' s += ')' return s.format(**self.__dict__)
class RichProgressBar(plc.RichProgressBar): 'Progress bar that removes all `grad norms` from display.' def get_metrics(self, trainer, pl_module): m = super().get_metrics(trainer, pl_module) m = {k: v for (k, v) in m.items() if ('grad' not in k)} return m
class TQDMProgressBar(plc.TQDMProgressBar): 'Progress bar that removes all `grad norms` from display.' def get_metrics(self, trainer, pl_module): m = super().get_metrics(trainer, pl_module) m = {k: v for (k, v) in m.items() if ('grad' not in k)} return m
class TrainingManager(plc.Callback): 'Callback to save a dummy file as an indicator when training has started/finished.' def __init__(self, ckpt_dir: Path): super().__init__() ckpt_dir.mkdir(exist_ok=True, parents=True) self.fstart = (ckpt_dir / 'training') if self.fstart.is_file(): raise ValueError(f'Training already in progress! ({self.fstart})') self.fend = (ckpt_dir / 'finished') if self.fend.is_file(): raise ValueError(f'Training already finished! ({self.fend})') signal.signal(signal.SIGTERM, self._on_sigterm) def _cleanup(self): print('-> Deleting "training" file...') if self.fstart.is_file(): self.fstart.unlink() print('-> Done! Exiting...') def _on_sigterm(self, signum, frame): 'Signature required by `signal.signal`.' raise SystemExit def on_exception(self, trainer, pl_module, exception): self._cleanup() def on_fit_start(self, trainer, pl_module): print('-> Creating "training" file...') self.fstart.touch() def on_fit_end(self, trainer, pl_module): self._cleanup() print('-> Creating "finished"" file...') self.fend.touch()
class DetectAnomaly(plc.Callback): 'Check for NaN/infinite loss at each core step. Replacement for `detect_anomaly=True`.' def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, unused=0): if (not (loss := outputs['loss']).isfinite()): raise ValueError(f'Detected NaN/Infinite loss: "{loss}"')
def default_convert(data): "\n Function that converts each NumPy array element into a :class:`torch.Tensor`. If the input is a `Sequence`,\n `Collection`, or `Mapping`, it tries to convert each element inside to a :class:`torch.Tensor`.\n If the input is not an NumPy array, it is left unchanged.\n This is used as the default function for collation when both `batch_sampler` and\n `batch_size` are NOT defined in :class:`~torch.utils.data.DataLoader`.\n\n The general input type to output type mapping is similar to that\n of :func:`~torch.utils.data.default_collate`. See the description there for more details.\n\n Args:\n data: a single data point to be converted\n\n Examples:\n >>> # Example with `int`\n >>> default_convert(0)\n 0\n >>> # Example with NumPy array\n >>> default_convert(np.array([0, 1]))\n tensor([0, 1])\n >>> # Example with NamedTuple\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> default_convert(Point(0, 0))\n Point(x=0, y=0)\n >>> default_convert(Point(np.array(0), np.array(0)))\n Point(x=tensor(0), y=tensor(0))\n >>> # Example with List\n >>> default_convert([np.array([0, 1]), np.array([2, 3])])\n [tensor([0, 1]), tensor([2, 3])]\n " elem_type = type(data) if isinstance(data, torch.Tensor): return data elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')): if ((elem_type.__name__ == 'ndarray') and (np_str_obj_array_pattern.search(data.dtype.str) is not None)): return data return torch.as_tensor(data) elif isinstance(data, collections.abc.Mapping): try: return elem_type({key: default_convert(data[key]) for key in data}) except TypeError: return {key: default_convert(data[key]) for key in data} elif (isinstance(data, tuple) and hasattr(data, '_fields')): return elem_type(*(default_convert(d) for d in data)) elif isinstance(data, tuple): return [default_convert(d) for d in data] elif (isinstance(data, collections.abc.Sequence) and (not isinstance(data, string_classes))): try: return elem_type([default_convert(d) for d in data]) except TypeError: return [default_convert(d) for d in data] else: return data
def default_collate(batch): "\n Function that takes in a batch of data and puts the elements within the batch\n into a tensor with an additional outer dimension - batch size. The exact output type can be\n a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a\n Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.\n This is used as the default function for collation when\n `batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.\n\n Here is the general input type (based on the type of the element within the batch) to output type mapping:\n\n * :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)\n * NumPy Arrays -> :class:`torch.Tensor`\n * `float` -> :class:`torch.Tensor`\n * `int` -> :class:`torch.Tensor`\n * `str` -> `str` (unchanged)\n * `bytes` -> `bytes` (unchanged)\n * `Mapping[K, V_i]` -> `Mapping[K, default_collate([V_1, V_2, ...])]`\n * `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[default_collate([V1_1, V1_2, ...]),\n default_collate([V2_1, V2_2, ...]), ...]`\n * `Sequence[V1_i, V2_i, ...]` -> `Sequence[default_collate([V1_1, V1_2, ...]),\n default_collate([V2_1, V2_2, ...]), ...]`\n\n Args:\n batch: a single batch to be collated\n\n Examples:\n >>> # Example with a batch of `int`s:\n >>> default_collate([0, 1, 2, 3])\n tensor([0, 1, 2, 3])\n >>> # Example with a batch of `str`s:\n >>> default_collate(['a', 'b', 'c'])\n ['a', 'b', 'c']\n >>> # Example with `Map` inside the batch:\n >>> default_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])\n {'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}\n >>> # Example with `NamedTuple` inside the batch:\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> default_collate([Point(0, 0), Point(1, 1)])\n Point(x=tensor([0, 1]), y=tensor([0, 1]))\n >>> # Example with `Tuple` inside the batch:\n >>> default_collate([(0, 1), (2, 3)])\n [tensor([0, 2]), tensor([1, 3])]\n >>> # Example with `List` inside the batch:\n >>> default_collate([[0, 1], [2, 3]])\n [tensor([0, 2]), tensor([1, 3])]\n " elem = batch[0] elem_type = type(elem) if isinstance(elem, torch.Tensor): out = None if (torch.utils.data.get_worker_info() is not None): numel = sum((x.numel() for x in batch)) storage = elem.storage()._new_shared(numel, device=elem.device) out = elem.new(storage).resize_(len(batch), *list(elem.size())) return torch.stack(batch, 0, out=out) elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')): if ((elem_type.__name__ == 'ndarray') or (elem_type.__name__ == 'memmap')): if (np_str_obj_array_pattern.search(elem.dtype.str) is not None): raise TypeError(default_collate_err_msg_format.format(elem.dtype)) return default_collate([torch.as_tensor(b) for b in batch]) elif (elem.shape == ()): return torch.as_tensor(batch) elif isinstance(elem, float): return torch.tensor(batch, dtype=torch.float64) elif isinstance(elem, int): return torch.tensor(batch) elif isinstance(elem, string_classes): return batch elif isinstance(elem, (Timer, MultiLevelTimer)): return batch elif isinstance(elem, collections.abc.Mapping): try: return elem_type({key: default_collate([d[key] for d in batch]) for key in elem}) except TypeError: return {key: default_collate([d[key] for d in batch]) for key in elem} elif (isinstance(elem, tuple) and hasattr(elem, '_fields')): return elem_type(*(default_collate(samples) for samples in zip(*batch))) elif isinstance(elem, collections.abc.Sequence): it = iter(batch) elem_size = len(next(it)) if (not all(((len(elem) == elem_size) for elem in it))): raise RuntimeError('each element in list of batch should be of equal size') transposed = list(zip(*batch)) if isinstance(elem, tuple): return [default_collate(samples) for samples in transposed] else: try: return elem_type([default_collate(samples) for samples in transposed]) except TypeError: return [default_collate(samples) for samples in transposed] raise TypeError(default_collate_err_msg_format.format(elem_type))
def opt_args_deco(deco: Callable) -> Callable: 'Meta-decorator to make implementing of decorators with optional arguments more intuitive\n\n Recall: Decorators are equivalent to applying functions sequentially\n >>> func = deco(func)\n\n If we want to provide optional arguments, it would be the equivalent of doing:\n >>> func = deco(foo=10)(func)\n I.e. in this case, deco is actually a function that RETURNS a decorator (a.k.a. a decorator factory)\n\n In practice, this is typically implemented with two nested functions as opposed to one.\n Also, the "factory" must always be called, "func = deco()(func)", even if no arguments are provided.\n This is ugly, obfuscated and makes puppies cry. No one wants puppies to cry.\n\n This decorator "hides" one level of nesting by using the \'partial\' function.\n If no optional parameters are provided, we proceed as a regular decorator using the default parameters.\n If any optional kwargs are provided, this returns the decorator that is then applied to the function (this is\n equivalent to the "deco(foo=10)" portion of the second example).\n\n Example (before):\n ```\n def stringify(func=None, *, prefix=\'\', suffix=\'\'):\n if func is None:\n return partial(stringify, prefix=prefix, suffix=suffix)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n out = func(*args, **kwargs)\n return f\'{prefix}{out}{suffix}\'\n return wrapper\n ```\n\n Example (after):\n ```\n @opt_args_deco\n def stringify(func, prefix=\'\', suffix=\'\'):\n @wraps(func)\n def wrapper(*args, **kwargs):\n out = func(*args, **kwargs)\n return f\'{prefix}{out}{suffix}\'\n return wrapper\n ```\n\n :param deco: (Callable) Decorator function with optional parameters to wrap.\n :return: (Callable) If `func` is provided: decorated func, otherwise: decorator to apply to `func`.\n ' @wraps(deco) def wrapper(f: Optional[Callable]=None, **kwargs) -> Callable: if (f is None): return partial(deco, **kwargs) if (not isinstance(f, (types.FunctionType, types.MethodType))): raise TypeError(f'Positional argument must be a function or method, got {f} of type {type(f)}') return deco(f, **kwargs) return wrapper
def delegates(to: Optional[Callable]=None, keep: bool=False): 'From https://www.fast.ai/2019/08/06/delegation/\n Decorator to replace `**kwargs` in signature with params from `to`.\n\n This can be used to decorate either a class\n ```\n @delegates()\n class Child(Parent): ...\n ```\n or a function\n ```\n @delegates(parent)\n def func(a, **kwargs): ...\n ```\n\n :param to: (Callable) Callable containing the params to copy\n :param keep: (bool) If `True`, keep `**kwargs` in the signature.\n :return: (Callable) The decorated class or function with the updated signature.\n ' def wrapper(f: Union[(type, Callable)]) -> Callable: (to_f, from_f) = ((f.__base__.__init__, f.__init__) if (to is None) else (to, f)) sig = inspect.signature(from_f) sigd = dict(sig.parameters) args = sigd.pop('args', None) if args: sigd2 = {k: v for (k, v) in inspect.signature(to_f).parameters.items() if ((v.default == inspect.Parameter.empty) and (k not in sigd))} sigd.update(sigd2) kwargs = sigd.pop('kwargs', None) if kwargs: sigd2 = {k: v for (k, v) in inspect.signature(to_f).parameters.items() if ((v.default != inspect.Parameter.empty) and (k not in sigd))} sigd.update(sigd2) if (keep and args): sigd['args'] = args if (keep and kwargs): sigd['kwargs'] = kwargs from_f.__signature__ = sig.replace(parameters=list(sigd.values())) return f return wrapper
def map_container(f: Callable) -> Callable: "Decorator to recursively apply a function to arbitrary nestings of `dict`, `list`, `tuple` & `set`\n\n NOTE: `f` can have an arbitrary signature, but the first arg must be the item we want to apply `f` to.\n\n Example:\n ```\n @map_apply\n def square(n, bias=0):\n return (n ** 2) + bias\n\n x = {'a': [1, 2, 3], 'b': 4, 'c': {1: 5, 2: 6}}\n print(map_apply(x))\n\n ===>\n {'a': [1, 4, 9], 'b': 16, 'c': {1: 25, 2: 36}}\n\n print(map_apply(x, bias=2))\n\n ===>\n {'a': [3, 6, 11], 'b': 18, 'c': {1: 27, 2: 38}}\n ```\n " @wraps(f) def wrapper(x: Any, *args, **kwargs) -> Any: if isinstance(x, dict): return {k: wrapper(v, *args, **kwargs) for (k, v) in x.items()} elif isinstance(x, list): return [wrapper(v, *args, **kwargs) for v in x] elif isinstance(x, tuple): return tuple((wrapper(v, *args, **kwargs) for v in x)) elif isinstance(x, set): return {wrapper(v, *args, **kwargs) for v in x} else: return f(x, *args, **kwargs) return wrapper
@opt_args_deco def retry_new_on_error(__getitem__: Callable, exc: Union[(BaseException, Sequence[BaseException])]=Exception, silent: bool=False, max: Optional[int]=None, use_blacklist: bool=False) -> Callable: 'Decorator to wrap a BaseDataset __getitem__ function, and retry a different index if there is an error.\n\n The idea is to provide a way of ignoring missing/corrupt data without having to blacklist files,\n change number of items and do "hacky" workarounds.\n Obviously, the less data we have, the less sense this decorator makes, since we\'ll start duplicating more\n and more items (although if we\'re augmenting our data, it shouldn\'t be too tragic).\n Obviously as well, for debugging/evaluation it probably makes more sense to disable this decorator.\n\n NOTE: This decorator assumes we follow the BaseDataset format\n - We return three dicts (x, y, meta)\n - Errors are logged in meta[\'errors\']\n - A \'log_timings\' flag indicates the presence of a \'MultiLevelTimer\' in self.timer\n\n :param __getitem__: (Callable) Dataset `__getitem__` method to decorate.\n :param exc: (tuple|Exception) Expected exceptions to catch and retry on.\n :param silent: (bool) If `False`, log error info to `meta`.\n :param max: (None|int) Maximum number of retries for a single item.\n :param use_blacklist: (bool) If `True`, keep a list of items to avoid.\n :return: (tuple[dict]) x, y, meta returned by `__getitem__`.\n ' n = 0 blacklist = set() exc = (exc or tuple()) if isinstance(exc, list): exc = tuple(exc) @wraps(__getitem__) def wrapper(cls, item): nonlocal n try: (x, y, m) = __getitem__(cls, item) if ((not silent) and ('errors' not in m)): m['errors'] = '' except exc as e: n += 1 if (max and (n >= max)): raise RuntimeError('Exceeded max retries when loading dataset item...') if use_blacklist: blacklist.add(item) if cls.log_time: cls.timer.reset() new = item while ((new == item) or (new in blacklist)): new = random.randrange(len(cls)) (x, y, m) = wrapper(cls, new) if (not silent): m['errors'] += f"{(' - ' if m['errors'] else '')}{(item, e)}" n = 0 return (x, y, m) return wrapper
def readlines(file: PathLike, /, encoding=None) -> list[str]: 'Read file as a list of strings.' with open(file, encoding=encoding) as f: return f.read().splitlines()
def pil2np(img: Image, /) -> NDArray: 'Convert PIL image [0, 255] into numpy [0, 1].' return (np.array(img, dtype=np.float32) / 255.0)
def np2pil(arr: NDArray, /) -> Image: 'Convert numpy image [0, 1] into PIL [0, 255].' return Image.fromarray((arr * 255).astype(np.uint8))
def write_yaml(file: PathLike, data: dict, mkdir: bool=False, sort_keys: bool=False) -> None: 'Write data to a yaml file.' file = Path(file).with_suffix('.yaml') if mkdir: file.parent.mkdir(parents=True, exist_ok=True) with open(file, 'w') as f: yaml.dump(data, f, sort_keys=sort_keys)
def load_yaml(file: PathLike) -> dict: 'Load a single yaml file. ' with open(file) as f: data = yaml.load(f, Loader=yaml.FullLoader) return data
def load_merge_yaml(*files: PathLike) -> dict: 'Load a list of YAML cfg and recursively merge into a single config.\n\n Following dictionary merging rules, the first file is the "base" config, which gets updated by the second file.\n We chain this rule for however many cfg we have, i.e. ((((1 <- 2) <- 3) <- 4) ... <- n)\n\n :param files: (Sequence[PathLike]) List of YAML config files to load, from "oldest" to "newest".\n :return: (dict) The merged config from all given files.\n ' data = [load_yaml(file) for file in files] old = data.pop(0) for new in data: old = _merge_yaml(old, new) return old
def _merge_yaml(old: dict, new: dict) -> dict: 'Recursively merge two YAML cfg.\n Dictionaries are recursively merged. All other types simply update the current value.\n\n NOTE: This means that a "list of dicts" will simply be updated to whatever the new value is,\n not appended to or recursively checked!\n\n :param old: (dict) Base dictionary containing default keys.\n :param new: (dict) New dictionary containing keys to overwrite in `old`.\n :return: (dict) The merge config.\n ' d = old.copy() for (k, v) in new.items(): d[k] = (_merge_yaml(d[k], v) if ((k in d) and isinstance(v, dict)) else v) return d
class BaseMetric(Metric): higher_is_better = False full_state_update = False 'Base class for depth estimation metrics.' def __init__(self, mode: str='raw', **kwargs): super().__init__(**kwargs) assert (mode in MODES) self.mode: str = mode self.sf: int = {'raw': 1, 'log': 100, 'inv': 1000}[self.mode] self.add_state('metric', default=torch.tensor(0.0), dist_reduce_fx='sum') self.add_state('total', default=torch.tensor(0), dist_reduce_fx='sum') def _preprocess(self, input, /): 'Convert input into log-depth or disparity.' if (self.mode == 'raw'): pass elif (self.mode == 'log'): input = input.log() elif (self.mode == 'inv'): input = (1 / input.clip(min=0.001)) return input def _compute(self, pred: Tensor, target: Tensor) -> Tensor: 'Compute an error metric for a single pair.\n\n :param pred: (Tensor) (b, n) Predicted depth.\n :param target: (Tensor) (b, n) Target depth.\n :return: (Tensor) (b,) Computed metric.\n ' raise NotImplementedError def update(self, pred: Tensor, target: Tensor) -> None: 'Compute an error metric for a whole batch of predictions and update the state.\n\n :param pred: (Tensor) (b, n) Predicted depths masked with NaNs.\n :param target: (Tensor) (b, n) Target depths masked with NaNs.\n :return:\n ' self.metric += (self.sf * self._compute(self._preprocess(pred), self._preprocess(target)).sum()) self.total += pred.shape[0] def compute(self) -> Tensor: 'Compute the average metric given the current state.' return (self.metric / self.total)
class MAE(BaseMetric): 'Compute the mean absolute error.' def _compute(self, pred: Tensor, target: Tensor) -> Tensor: return (pred - target).abs().nanmean(dim=1)
class RMSE(BaseMetric): 'Compute the root mean squared error.' def _compute(self, pred: Tensor, target: Tensor) -> Tensor: return (pred - target).pow(2).nanmean(dim=1).sqrt()
class ScaleInvariant(BaseMetric): 'Compute the scale invariant error.' def _compute(self, pred: Tensor, target: Tensor) -> Tensor: err = (pred - target) return (err.pow(2).nanmean(dim=1) - err.nanmean(dim=1).pow(2)).sqrt()
class AbsRel(BaseMetric): 'Compute the absolute relative error.' def __init__(self, **kwargs): super().__init__(**kwargs) self.sf = 100 def _compute(self, pred: Tensor, target: Tensor) -> Tensor: return ((pred - target).abs() / target).nanmean(dim=1)
class SqRel(BaseMetric): 'Compute the absolute relative squared error.' def __init__(self, **kwargs): super().__init__(**kwargs) self.sf = 100 def _compute(self, pred: Tensor, target: Tensor) -> Tensor: return ((pred - target).pow(2) / target.pow(2)).nanmean(dim=1)
class DeltaAcc(BaseMetric): higher_is_better = True 'Compute the accuracy for a given error threshold.' def __init__(self, delta: float, **kwargs): super().__init__(**kwargs) assert (self.mode == 'raw'), 'Accuracy should only be computed using raw depths.' self.delta: float = delta self.sf = 100 def _compute(self, pred: Tensor, target: Tensor) -> Tensor: thresh = torch.max((target / pred), (pred / target)) return ((thresh < self.delta).nansum(dim=1) / thresh.nansum(dim=1))
class Timer(): "Context manager for timing a block of code.\n\n Attributes:\n :param name: (str) Timer label when printing.\n :param as_ms: (bool) If `True`, store time as `milliseconds`, otherwise `seconds`.\n :param sync_gpu: (bool) If `True`, ensure that GPU is synced on Timer enter and exit.\n :param precision: (int) Number of decimal places to print.\n\n Example:\n ```\n with Timer('MyTimer') as t:\n time.sleep(1)\n elapsed = t.elapsed\n print(t)\n\n ===>\n MyTimer: 1.003 s\n ```\n " def __init__(self, name: str='Timer', as_ms: bool=False, sync_gpu: bool=False, precision: int=6) -> None: self.name: str = name self.as_ms: bool = as_ms self.sync_gpu: bool = sync_gpu self.precision: int = precision self._sf: int = (1000 if self.as_ms else 1) self._units: str = ('ms' if self.as_ms else 's') self._sync_fn: Optional[Callable] = None self._start: Optional[float] = None self._end: Optional[float] = None if self.sync_gpu: import torch self._sync_fn = torch.cuda.synchronize def __repr__(self) -> str: 'Convert class constructor into string representation.' sig = inspect.signature(self.__init__) kwargs = {key: getattr(self, key) for key in sig.parameters if hasattr(self, key)} s = ', '.join((f'{k}={v}' for (k, v) in kwargs.items())) return f'{self.__class__.__qualname__}({s})' def __str__(self) -> str: 'Convert into string representation.' return f'{self.name}: {self.elapsed} {self._units}' def __enter__(self) -> 'Timer': 'Start timer and sync GPU.' if self.sync_gpu: self._sync_fn() self._start = time.perf_counter() return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: 'End timer and sync GPU.' if self.sync_gpu: self._sync_fn() self._end = time.perf_counter() @property def elapsed(self) -> float: 'Time taken between enter and exit.' assert self._start, '`Timer` has not begun' assert self._end, '`Timer` has not finished' time_taken = (self._sf * (self._end - self._start)) return round(time_taken, self.precision)
class MultiLevelTimer(): "Context manager Timer capable of being nested across multiple levels.\n\n NOTE: We use the *instance* of this class as a context manager, not the class itself (see examples).\n\n Timers are stored as a dict, mapping labels to (depth, start, end, elapsed).\n In order to allow for the nesting of these timers, we keep track of what timers are active (effectively, a stack).\n On __exit__ we pop the most recent label and end that timer.\n\n Attributes:\n :param name: (str) Global Timer name.\n :param as_ms: (bool) If `True`, store time as `'milliseconds`', otherwise `seconds`.\n :param sync_gpu: (bool) If `True`, ensure that GPU is synced on Timer enter and exit.\n :param precision: (int) Number of decimal places to print.\n\n Examples:\n ```\n timer = MultiLevelTimer(name='MyTimer', as_ms=True, precision=4)\n\n with timer('OuterLevel'):\n time.sleep(2)\n with timer('InnerLevel'):\n time.sleep(1)\n\n print(timer)\n\n ==>\n MyTimer\n OuterLevel: 3002.3414 ms\n InnerLevel: 1000.7601 ms\n ```\n\n Levels can also be named automatically\n ```\n timer = MultiLevelTimer(name='MyTimer')\n\n with timer:\n time.sleep(2)\n\n print(timer)\n\n ==>\n MyTimer\n Level1: 2.002093 s\n ```\n " def __init__(self, name: str='Timer', as_ms: bool=False, sync_gpu: bool=False, precision: int=6) -> None: self.name: str = name self.as_ms: bool = as_ms self.sync_gpu: bool = sync_gpu self.precision: int = precision self.depth: int = 0 self._sf: int = (1000 if self.as_ms else 1) self._units: str = ('ms' if self.as_ms else 's') self._sync_fn: Optional[Callable] = None self._label: Optional[str] = None self._active: list[str] = [] self._data: dict[(str, TimerData)] = {} if self.sync_gpu: import torch self._sync_fn = torch.cuda.synchronize def __repr__(self) -> str: 'Convert class constructor into string representation.' sig = inspect.signature(self.__init__) kwargs = {key: getattr(self, key) for key in sig.parameters if hasattr(self, key)} s = ', '.join((f'{k}={v}' for (k, v) in kwargs.items())) return f'{self.__class__.__qualname__}({s})' def __str__(self) -> str: 'Convert into string representation.' s = [self.name] s += [(('\t' * v['depth']) + f"{k}: {v['elapsed']} {self._units}") for (k, v) in self] return '\n'.join(s) def __getitem__(self, label: str) -> TimerData: 'Return timer info for the given label.' return self._data[label] def __iter__(self) -> Generator[(tuple[(str, TimerData)], None, None)]: 'Iterate through all timers as (`label`, `timer`)' for k in self._data: (yield (k, self[k])) def __call__(self, label: str) -> 'MultiLevelTimer': 'Required to call a `Timer` instance in a context manager and create a new label.' self._label = label return self def __enter__(self) -> 'MultiLevelTimer': 'Context manager entry point.' self.depth += 1 (label, self._label) = (self._label, None) label = (label or f'Level{self.depth}') if (label in self._data): raise KeyError(f'Duplicate Timer key: {label}') if self.sync_gpu: self._sync_fn() self._active.append(label) self._data[label] = {'depth': self.depth, 'start': time.perf_counter(), 'end': None, 'elapsed': None} return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: 'Context manager exit point.' assert self._active, 'What are you doing here??' label = self._active.pop() timer = self._data[label] if self.sync_gpu: self._sync_fn() timer['end'] = time.perf_counter() timer['elapsed'] = round((self._sf * (timer['end'] - timer['start'])), self.precision) self.depth -= 1 def reset(self) -> None: 'Delete all existing `Timer` data.' if self._active: raise RuntimeError(f'Attempt to reset Timer while active: {self._active}') self._data = {} def copy(self) -> 'MultiLevelTimer': 'Return a deep copy of the timer.' return copy.deepcopy(self) def to_dict(self, key: str='elapsed') -> dict: 'Return a dict containing only the data for the specified key.' return {label: data[key] for (label, data) in self} @staticmethod def mean_elapsed(timers: Sequence['MultiLevelTimer']) -> Union[(Sequence, dict[(str, float)])]: 'Return the average elapsed time for each label in a list of timers.' if (not timers): return timers data = {} for t in timers: for (k, v) in t: if (k in data): data[k].append(v['elapsed']) else: data[k] = [v['elapsed']] data = {k: (sum(v) / len(v)) for (k, v) in data.items()} return data
def _get_items(split, mode): file = kr.get_split_file(split, mode) if ((split == 'benchmark') and (mode == 'test')): return [] side2cam = {'l': 'image_02', 'r': 'image_03'} lines = [line.split() for line in io.readlines(file)] items = [{'seq': line[0], 'cam': side2cam[line[2]], 'stem': int(line[1])} for line in lines] return items
class TestKitti(): def test_image_files(self): (splits, seqs) = ([], []) for s in SPLITS: for m in MODES: for i in _get_items(s, m): f = kr.get_image_file(i['seq'], i['cam'], i['stem']) if (not f.is_file()): seqs.append(i['seq']) splits.append(f'{s} {m}') assert (not seqs), f'Missing image files in sequences. {set(splits)} {set(seqs)}' def test_velo_files(self): (splits, seqs) = ([], []) for s in SPLITS: for m in MODES: for i in _get_items(s, m): f = kr.get_velodyne_file(i['seq'], i['stem']) if (not f.is_file()): seqs.append(i['seq']) splits.append(f'{s} {m}') assert (not seqs), f'Missing velodyne files in sequences. {set(splits)} {set(seqs)}' def test_hints_files(self): (splits, seqs) = ([], []) for s in SPLITS: for m in ['train', 'val']: for i in _get_items(s, m): f = kr.get_hint_file(i['seq'], i['cam'], i['stem']) if (not f.is_file()): seqs.append(i['seq']) splits.append(f'{s} {m}') assert (not seqs), f'Missing depth hints files in sequences. {set(splits)} {set(seqs)}' def test_benchmark_files(self): (splits, seqs) = ([], []) for s in ['benchmark', 'eigen_benchmark']: for m in MODES: for i in _get_items(s, m): f = kr.get_depth_file(i['seq'], i['cam'], i['stem']) if (not f.is_file()): seqs.append(i['seq']) splits.append(f'{s} {m}') assert (not seqs), f'Missing benchmark files in sequences. {set(splits)} {set(seqs)}'
def _get_items(split, mode): file = kr.get_split_file(split, mode) if ((split == 'benchmark') and (mode == 'test')): return [] side2cam = {'l': 'image_02', 'r': 'image_03'} lines = [line.split() for line in io.readlines(file)] items = [{'seq': line[0].split('/')[0], 'drive': line[0].split('/')[1], 'cam': side2cam[line[2]], 'stem': int(line[1])} for line in lines] return items
class TestKitti(): def test_image_files(self): (splits, seqs) = ([], []) for s in SPLITS: for m in MODES: for i in _get_items(s, m): f = kr.get_images_path(i['seq'], i['drive'], i['cam']) if (not f.is_dir()): seqs.append(((i['seq'] + '/') + i['drive'])) splits.append(f'{s} {m}') assert (not seqs), f'Missing image files in sequences. {set(splits)} {set(seqs)}' def test_velo_files(self): (splits, seqs) = ([], []) for s in SPLITS: for m in MODES: for i in _get_items(s, m): f = kr.get_velos_path(i['seq'], i['drive']) if (not f.is_dir()): seqs.append(((i['seq'] + '/') + i['drive'])) splits.append(f'{s} {m}') assert (not seqs), f'Missing velodyne files in sequences. {set(splits)} {set(seqs)}' def test_hints_files(self): (splits, seqs) = ([], []) for s in SPLITS: for m in ['train', 'val']: for i in _get_items(s, m): f = kr.get_hints_path(i['seq'], i['drive'], i['cam']) if (not f.is_dir()): seqs.append(((i['seq'] + '/') + i['drive'])) splits.append(f'{s} {m}') assert (not seqs), f'Missing depth hints files in sequences. {set(splits)} {set(seqs)}' def test_benchmark_files(self): (splits, seqs) = ([], []) for s in ['benchmark', 'eigen_benchmark']: for m in MODES: for i in _get_items(s, m): f = kr.get_depths_path(i['seq'], i['drive'], i['cam']) if (not f.is_dir()): seqs.append(((i['seq'] + '/') + i['drive'])) splits.append(f'{s} {m}') assert (not seqs), f'Missing benchmark files in sequences. {set(splits)} {set(seqs)}'
def _get_items(mode): return syp.load_split(mode)[1]
class TestKitti(): def test_image_files(self): (modes, scenes) = ([], []) for m in MODES: for i in _get_items(m): f = syp.get_image_file(*i) if (not f.is_file()): scenes.append(i[0]) modes.append(f'{m}') assert (not scenes), f'Missing image files in sequences. {set(modes)} {set(scenes)}' def test_depth_files(self): (modes, scenes) = ([], []) for m in {'val'}: for i in _get_items(m): f = syp.get_depth_file(*i) if (not f.is_file()): scenes.append(i[0]) modes.append(f'{m}') assert (not scenes), f'Missing depth files in sequences. {set(modes)} {set(scenes)}' def test_edges_files(self): (modes, scenes) = ([], []) for m in {'val'}: for i in _get_items(m): f = syp.get_edges_file(i[0], 'edges', i[1]) if (not f.is_file()): scenes.append(i[0]) modes.append(f'{m}') assert (not scenes), f'Missing edges files in sequences. {set(modes)} {set(scenes)}'
class TmpData(BaseDataset): def __init__(self, n, **kwargs): self.n = range(n) super().__init__(**kwargs) def __len__(self): return len(self.n) def load(self, item, x, y, meta): x['item'] = self.n[item] return (x, y, meta) def augment(self, x, y, meta): x['item'] *= 100 meta['augs'] = 'helloworld' return (x, y, meta) def show(self, x, y, meta, axs=None): ...
class TestBaseDataset(): def test_base(self): 'Test that we have the expected functions.' with pytest.raises(TypeError): _ = BaseDataset() assert hasattr(BaseDataset, '__repr__'), 'Missing attribute from base dataset.' assert hasattr(BaseDataset, '__len__'), 'Missing attribute from base dataset.' assert hasattr(BaseDataset, '__getitem__'), 'Missing attribute from base dataset.' assert hasattr(BaseDataset, 'load'), 'Missing attribute from base dataset.' assert hasattr(BaseDataset, 'collate_fn'), 'Missing attribute from base dataset.' assert hasattr(BaseDataset, 'augment'), 'Missing attribute from base dataset.' assert hasattr(BaseDataset, 'to_torch'), 'Missing attribute from base dataset.' assert hasattr(BaseDataset, 'create_axs'), 'Missing attribute from base dataset.' assert hasattr(BaseDataset, 'show'), 'Missing attribute from base dataset.' assert hasattr(BaseDataset, 'play'), 'Missing attribute from base dataset.' dataset = TmpData(10) assert hasattr(dataset, 'logger'), 'Missing logger in dataset.' assert (dataset.logger.name == 'BaseDataset.TmpData'), 'Incorrect logger name.' def test_timer(self): 'Test that timing can be enabled/disabled.' dataset = TmpData(10, log_time=True) (x, y, meta) = dataset[0] assert isinstance(dataset.timer, MultiLevelTimer), 'Incorrect timer class.' assert ('data_timer' in meta), 'Missing timing information in meta.' dataset = TmpData(10, log_time=False) (x, y, meta) = dataset[0] assert (not isinstance(dataset.timer, MultiLevelTimer)), 'Incorrect timer class.' assert ('data_timer' not in meta), 'Unexpected timing information in meta.' def test_augment(self): 'Test that augmentations can be enabled/disabled.' dataset = TmpData(10, use_aug=True) (x, y, meta) = dataset[5] assert (x['item'] == 500), 'Augmentation not correctly applied.' assert ('augs' in meta), 'Missing augmentations information in meta.' assert (meta['augs'] == 'helloworld'), 'Augmentation not correctly applied.' dataset = TmpData(10, use_aug=False) (x, y, meta) = dataset[5] assert (x['item'] == 5), 'Unexpected augmentation applied.' assert ('augs' not in meta), 'Unexpected augmentation information in meta.' (x2, y2, meta2) = BaseDataset.augment(dataset, x, y, meta) assert (x2 == x), 'Incorrect default augmentation' assert (y2 == y), 'Incorrect default augmentation' assert (meta2 == meta), 'Incorrect default augmentation' def test_as_torch(self): 'Test that conversion to torch can be enabled/disabled.' dataset = TmpData(10, as_torch=True) (x, y, meta) = dataset[0] assert isinstance(x['item'], torch.Tensor), 'Incorrect conversion to torch.' assert isinstance(meta['items'], str), 'Unexpected meta conversion to torch.' dataset = TmpData(10, as_torch=False) (x, y, meta) = dataset[0] assert isinstance(x['item'], int), 'Unexpected conversion to torch.' assert isinstance(meta['items'], str), 'Unexpected meta conversion to torch.' def test_retry(self): 'Test that exception retrying can be enabled/disabled.' class TmpData2(TmpData): def load(self, item, x, y, meta): if ((item % 2) == 0): raise ValueError return super().load(item, x, y, meta) with pytest.raises(ValueError): _ = TmpData2(10)[2] class TmpData3(TmpData2, retry_exc=Exception): pass (x, y, meta) = TmpData3(10)[2] assert (x['item'] != 2), 'Error retrying all exceptions.' class TmpData3(TmpData2, retry_exc=ValueError): pass (x, y, meta) = TmpData3(10)[2] assert (x['item'] != 2), 'Error retrying on a specific exception.' @pytest.mark.skip(reason='Creates multiple windows on PyCharm') def test_play(self): 'Test dataset playing iterates correctly and sets window sizes.' dataset = TmpData(5, as_torch=True) with pytest.raises(ValueError): dataset.play() dataset = TmpData(5, as_torch=False) dataset.show = mock.Mock() dataset.play() assert (dataset.show.call_count == 5), 'Incorrect number of calls to show.' plt.close() @pytest.mark.skip(reason='Images are same size on PyCharm') def test_fullscreen(self): TmpData(5, as_torch=False).play() size1 = plt.get_current_fig_manager().canvas.get_width_height() plt.close() TmpData(5, as_torch=False).play(fullscreen=True) size2 = plt.get_current_fig_manager().canvas.get_width_height() plt.close() assert (size1 != size2), 'Error setting figure to fullscreen.' def test_dataset_collate(self): 'Test that we can collate data by default correctly.' class TmpData2(TmpData, retry_exc=ValueError): def load(self, item, x, y, meta): if ((item % 2) == 0): raise ValueError return super().load(item, x, y, meta) def augment(self, x, y, meta): return (super().augment(x, y, meta) if (torch.rand(1) < 0.5) else (x, y, meta)) dataset = TmpData2(10, as_torch=True, use_aug=True) batch_size = 4 loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=4, collate_fn=dataset.collate_fn) (x, y, meta) = next(iter(loader)) assert isinstance(x['item'], torch.Tensor), 'Incorrect conversion to torch.' assert (x['item'].shape == (batch_size,)), 'Incorrect item batch size.' assert (x['item'][0] != 0), 'Incorrect retry on error.' assert ('items' in meta), 'Missing items in meta.' assert (len(meta['items']) == batch_size), 'Incorrect items batch size.' assert isinstance(meta['items'][0], str), 'Incorrect items type.' assert ('data_timer' in meta), 'Missing data_timer in meta.' assert (len(meta['data_timer']) == batch_size), 'Incorrect data_timer batch size.' assert isinstance(meta['data_timer'][0], MultiLevelTimer), 'Incorrect items type.' assert ('errors' in meta), 'Missing errors in meta.' assert (len(meta['errors']) == batch_size), 'Incorrect errors batch size.' assert isinstance(meta['errors'][0], str), 'Incorrect errors type.' assert ('augs' in meta), 'Missing augmentations in meta.' assert (len(meta['augs']) == batch_size), 'Incorrect augmentations batch size.' assert isinstance(meta['augs'][0], str), 'Incorrect augmentations type.'
def test_all(): 'Check all expected symbols are imported.' items = {'register', 'NET_REG', 'DATA_REG', 'LOSS_REG', 'SCHED_REG'} assert (set(registry.__all__) == items), 'Incorrect keys in `__all__`.'
def test_sched(): 'Check scheduler registry has all expected keys.' keys = {'steplr', 'exp', 'cos', 'cos_warm', 'plateau'} assert (set(SCHED_REG.keys()) == keys), f'Incorrect SCHEDULER keys.'
def test_add_net(): 'Check adding to the network registry.' (name, type) = ('test', 'net') @register(name, type) class Test(): ... assert (name in NET_REG), 'Missing item from NETWORK registry.' NET_REG.pop(name)
def test_add_loss(): 'Check adding to the loss registry.' (name, type) = ('test', 'loss') @register(name, type) class Test(): ... assert (name in LOSS_REG), 'Missing item from LOSS registry.' LOSS_REG.pop(name)
def test_add_dataset(): 'Check adding to the dataset registry.' (name, type) = ('test', 'data') @register(name, type) class Test(): ... assert (name in DATA_REG), 'Missing item from DATASET registry.' DATA_REG.pop(name)
def test_add_auto(): 'Check automatic adding based on class name.' name = 'test' @register(name) class TestNet(): ... assert (name in NET_REG), 'Missing item from automatic NET registry.' NET_REG.pop(name) @register(name) class TestLoss(): ... assert (name in LOSS_REG), 'Missing item from automatic LOSS registry.' LOSS_REG.pop(name) @register((name + '2')) class TestReg(): ... assert ((name + '2') in LOSS_REG), 'Missing item from automatic LOSS registry.' LOSS_REG.pop((name + '2')) @register(name) class TestDataset(): ... assert (name in DATA_REG), 'Missing item from automatic DATA registry.' DATA_REG.pop(name) with pytest.raises(ValueError): @register(name) class Test(): ...
def test_add_multiple(): name = ('test1', 'test2') @register(name, type='net') class TestNet(): ... for n in name: assert (n in NET_REG), 'Missing item from automatic NET registry.' [NET_REG.pop(n) for n in name]
def test_register_types(): 'Check raised exception when adding to unknown registry.' with pytest.raises(TypeError): @register(name='temp', type='foo') class Test(): ...
def test_register_duplicates(): 'Check raised exception when registering a duplicate name.' (name, type) = ('test', 'net') @register(name, type) class Test(): ... with pytest.raises(ValueError): @register(name, type) class Test2(): ... with pytest.raises(ValueError): @register(name, type, overwrite=False) class Test3(): ... with pytest.raises(ValueError): @register(('asdf', name), type, overwrite=False) class Test3(): ... with pytest.raises(ValueError): @register((name, 'asdf'), type, overwrite=False) class Test3(): ... NET_REG.pop(name)
def test_register_overwrite(): 'Check registry items can be overwritten if desired.' (name, type) = ('test', 'net') @register(name, type) class Test(): ... assert (NET_REG[name] == Test), 'Unexpected base class when overwriting.' @register(name, type, overwrite=True) class Test2(): ... assert (NET_REG[name] == Test2), 'Unexpected overwritten class.' NET_REG.pop(name)
def test_ignore_main(): 'Check classes created in `__main__` are ignored.' from unittest.mock import Mock (name, type) = ('test', 'loss') Mock.__module__ = '__main__' with pytest.warns(UserWarning): _ = register(name, type)(Mock) assert (name not in LOSS_REG), 'Class from `__main__` not ignored.'
def test_all(): 'Check all expected symbols are imported.' items = {'MaskReg'} assert (set(mask.__all__) == items), 'Incorrect keys in `__all__`.'
def test_registry(): 'Check MaskReg is added to LOSS_REGISTRY.' assert ('disp_mask' in LOSS_REG), 'Missing key from loss registry.' assert (LOSS_REG['disp_mask'] == MaskReg), 'Incorrect class in loss registry.'
def test_mask(): 'Test MaskReg when all values are 1.' shape = (1, 1, 100, 200) (loss, loss_dict) = MaskReg().forward(torch.ones(shape)) assert (loss == 0), 'Error in correct BCELoss.' assert (not loss_dict), 'Unexpected keys in `loss_dict`.' (loss, loss_dict) = MaskReg().forward(torch.zeros(shape)) assert (loss == 100.0), 'Error in incorrect BCELoss.'
def test_all(): 'Check all expected symbols are imported.' items = {'OccReg'} assert (set(occlusion.__all__) == items), 'Incorrect keys in `__all__`.'
def test_registry(): 'Check OcclusionReg is added to LOSS_REGISTRY.' assert ('disp_occ' in LOSS_REG), 'Missing key from loss registry.' assert (LOSS_REG['disp_occ'] == OccReg), 'Incorrect class in loss registry.'
def test_occlusion_ones(): 'Test OcclusionReg when all values are 1.' shape = (1, 1, 100, 200) input = torch.ones(shape) (loss, loss_dict) = OccReg(invert=False).forward(input) assert (loss == 1.0), 'Error in `invert=False`.' assert (not loss_dict), 'Unexpected keys in `loss_dict`.' (loss, loss_dict) = OccReg(invert=True).forward(input) assert (loss == (- 1.0)), 'Error in `invert=True`.' (loss, loss_dict) = OccReg().forward(input) assert (loss == 1.0), 'Error in default `invert`. Expected `False`.'
def test_occlusion_rand(): 'Test OcclusionReg with a random tensor.' shape = (1, 1, 100, 200) input = torch.rand(shape) mean = input.mean() (loss, loss_dict) = OccReg(invert=False).forward(input) assert (loss == mean), 'Error in `invert=False`.' assert (not loss_dict), 'Unexpected keys in `loss_dict`.' (loss, loss_dict) = OccReg(invert=True).forward(input) assert (loss == (- mean)), 'Error in `invert=True`.' (loss, loss_dict) = OccReg().forward(input) assert (loss == mean), 'Error in default `invert`. Expected `False`.'
def test_all(): 'Check all expected symbols are imported.' items = {'SmoothReg', 'FeatPeakReg', 'FeatSmoothReg'} assert (set(smooth.__all__) == items), 'Incorrect keys in `__all__`.'
def test_registry(): 'Check smoothness regularizations are added to LOSS_REGISTRY.' assert ('disp_smooth' in LOSS_REG), 'Missing key from loss registry.' assert (LOSS_REG['disp_smooth'] == SmoothReg), 'Incorrect class in loss registry.' assert ('feat_peaky' in LOSS_REG), 'Missing key from loss registry.' assert (LOSS_REG['feat_peaky'] == FeatPeakReg), 'Incorrect class in loss registry.' assert ('feat_smooth' in LOSS_REG), 'Missing key from loss registry.' assert (LOSS_REG['feat_smooth'] == FeatSmoothReg), 'Incorrect class in loss registry.'
def test_all(): 'Check all expected symbols are imported.' items = {'rgb_from_disp', 'rgb_from_feat'} assert (set(viz.__all__) == items), 'Incorrect keys in `__all__`.'
class TestRGBfromDisp(): def test_default(self): x = torch.rand(2, 1, 10, 20) out = rgb_from_disp(x) out2 = rgb_from_disp(x, cmap='turbo', vmin=0, vmax=[np.percentile(x[0], 95), np.percentile(x[1], 95)]) assert np.allclose(out, out2), 'Incorrect default params.' def test_range(self): 'Test disparity conversion with custom normalization ranges.' arr = np.array([[0, 0, 0.5, 0.5, 1, 1]]) out = rgb_from_disp(arr).squeeze() out2 = rgb_from_disp(arr, vmin=0.5, vmax=1).squeeze() assert np.allclose(out2[2], out2[3]), 'Incorrect sanity check for same value.' assert (not np.allclose(out2[3], out2[4])), 'Incorrect sanity check for different value.' assert np.allclose(out2[0], out2[2]), 'Incorrect clipping to min value.' assert np.allclose(out[0], out2[0]), 'Inconsistent min value.' assert (not np.allclose(out2[2], out[2])), 'Incorrect clipping to min value.' out3 = rgb_from_disp(arr, vmin=0, vmax=0.5).squeeze() assert np.allclose(out3[2], out3[3]), 'Incorrect sanity check for same value.' assert (not np.allclose(out3[2], out3[0])), 'Incorrect sanity check for different value.' assert np.allclose(out3[2], out3[4]), 'Incorrect clipping to max value.' assert np.allclose(out[5], out3[5]), 'Inconsistent max value.' assert (not np.allclose(out3[2], out[2])), 'Incorrect clipping to max value.' def test_inv(self): x = torch.rand(2, 1, 10, 20) x_inv = (1 / x) out = rgb_from_disp(x, invert=True) out2 = rgb_from_disp(x_inv, invert=False) assert np.allclose(out, out2), 'Incorrect inversion.' def test_shape(self): x = torch.rand(1, 1, 10, 20) out = rgb_from_disp(x) out2 = rgb_from_disp(x.squeeze()) assert np.allclose(out[0], out2), 'Incorrect out with different ndim.' assert (out.ndim == 4), 'Incorrect dim for 4D input.' assert (out2.ndim == 3), 'Incorrect dim for 2D input.' def test_np(self): x = torch.rand(2, 1, 10, 20) x_np = x.permute(0, 2, 3, 1).numpy() out = rgb_from_disp(x) out = out.permute(0, 2, 3, 1).numpy() out2 = rgb_from_disp(x_np) assert np.allclose(out, out2), 'Incorrect conversion to np.'
class TestRGBfromFeat(): def test_norm(self): x = torch.rand(1, 5, 10, 20) out = rgb_from_feat(x).squeeze().flatten((- 2), (- 1)) assert torch.allclose(out.min((- 1))[0], out.new_zeros(3)), 'Incorrect min norm.' assert torch.allclose(out.max((- 1))[0], out.new_ones(3)), 'Incorrect max norm.' def test_shape(self): x = torch.rand(1, 5, 10, 20) out = rgb_from_feat(x) assert (out.shape[1] == 3), 'Expected output to be RGB.' out2 = rgb_from_feat(x[0]) assert (out2.shape[0] == 3), 'Expected output to be RGB.' assert torch.allclose(out[0], out2), 'Incorrect output with different dimensions..' def test_np(self): x = torch.rand(2, 5, 10, 20) x_np = x.permute(0, 2, 3, 1).numpy() out = rgb_from_feat(x) out = out.permute(0, 2, 3, 1).numpy() out2 = rgb_from_feat(x_np) assert np.allclose(out, out2), 'Incorrect conversion to np.'
class TestDefaultCollate(): def test_base(self): torch_collate = torch.utils.data._utils.collate.default_collate input = [torch.rand(3, 100, 200) for _ in range(5)] target = torch_collate(input) out = default_collate(input) assert out.allclose(target), 'Error when matching default tensor collate.' input = ['test' for _ in range(5)] target = torch_collate(input) out = default_collate(input) assert (input == target == out), 'Error when matching default string collate.' def test_timer(self): input = [MultiLevelTimer() for _ in range(5)] out = default_collate(input) assert (input == out), 'Error when matching MultiLevelTimer collate.'
def test_all(): 'Check all expected symbols are imported.' items = {'opt_args_deco', 'delegates', 'map_container', 'retry_new_on_error'} assert (set(deco.__all__) == items), 'Incorrect keys in `__all__`.'
@opt_args_deco def _deco(func, prefix='', suffix=''): 'Helper.' def wrapper(*args, **kwargs): out = func(*args, **kwargs) return (out, f'{prefix}{out}{suffix}') return wrapper
def _add(a, b): 'Helper.' return (a + b)
class TestOptArgsDeco(): def test_base(self): 'Test different ways of instantiating optional arguments.' func = _deco(_add) assert (func(1, 2) == (3, '3')), 'Error with default arguments.' func = _deco(_add, prefix='***') assert (func(1, 2) == (3, '***3')), 'Error with first default arg.' func = _deco(prefix='***', suffix='***')(_add) assert (func(1, 2) == (3, '***3***')), 'Error with default args.' def test_callable(self): 'Test that we raise errors to enforce keyword-only optional arguments.' with pytest.raises(TypeError): _ = _deco(_add, '***') with pytest.raises(TypeError): _ = _deco('***')(_add)
def _parent(a, b=0, c=None, **kwargs): ...
def _child(new, *args, **kwargs): ...
class TestDelegates(): def test_default(self): fn = delegates(_parent)(_child) sig = inspect.signature(fn) sigd = dict(sig.parameters) assert (list(sigd) == ['new', 'a', 'b', 'c']), 'Incorrect delegated signature.'
def _stringify(x, suffix=''): 'Helper.' return f'{x}{suffix}'
class TestMapContainer(): def test_single(self): 'Test with a single input, i.e. equivalent to the original function.' input = 2 func = map_container(_stringify) assert (func(input) == _stringify(input)), "Error in 'map_apply' single input" assert (func(input, suffix='***') == _stringify(input, suffix='***')), "Error in 'map_apply' single input" def test_multi(self): 'Test using nested sequences/dicts.' input = [2, {'a': 'test', 'b': {1}, 'c': (1, 2)}] target1 = ['2', {'a': 'test', 'b': {'1'}, 'c': ('1', '2')}] target2 = ['2***', {'a': 'test***', 'b': {'1***'}, 'c': ('1***', '2***')}] func = map_container(_stringify) assert (func(input) == target1), "Error in 'map_apply' sequence input" assert (func(input, suffix='***') == target2), "Error in 'map_apply' sequence input"
class Dataset(): def __init__(self, n): self.n = n self.log_time = True self.timer = MultiLevelTimer() self.__class__.__getitem__ = retry_new_on_error(self.__class__.getitem, exc=self.retry_exc, silent=self.silent, max=self.max, use_blacklist=self.use_blacklist) def __init_subclass__(cls, retry_exc, silent, max_retries, use_blacklist, **kwargs): super().__init_subclass__(**kwargs) cls.retry_exc = retry_exc cls.silent = silent cls.max = max_retries cls.use_blacklist = use_blacklist def __len__(self): return self.n def getitem(self, item): raise NotImplementedError def __getitem__(self, item): return self.getitem(item)
class TestRetryDifferentOnError(): def test_default(self): 'Test default parameters, catching any exception and logging.' class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise ValueError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) (x, y, meta) = dataset[1] assert (x['item'] == 1), 'Loading of item (without exception) failed.' assert ('errors' in meta), "Missing 'error' key when logging errors" (x, y, meta) = dataset[2] assert (x['item'] != 2), 'Loading of item (with exception) failed.' def test_exc_single(self): 'Test that we can catch a specific exception.' class TmpData(Dataset, retry_exc=ValueError, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise ValueError return ({'item': item}, {}, {'item': str(item)}) (x, y, meta) = TmpData(10)[2] assert (x['item'] != 2), 'Loading of item (with exception) failed.' def test_exc_ignore(self): 'Test that we ignore non-specified exceptions.' class TmpData(Dataset, retry_exc=ValueError, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise TypeError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) _ = dataset[1] with pytest.raises(TypeError): _ = dataset[2] def test_exc_multiple(self): 'Test that we can catch multiple specific exceptions.' class TmpData(Dataset, retry_exc=[ValueError, TypeError], silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise ValueError if ((item % 3) == 0): raise TypeError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) (x, y, meta) = dataset[2] assert (x['item'] != 2), 'Loading of item (with exception) failed.' (x, y, meta) = dataset[3] assert (x['item'] != 2), 'Loading of item (with exception) failed.' def test_exc_none(self): 'Test that we can disable all exceptions.' class TmpData(Dataset, retry_exc=None, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise ValueError if ((item % 3) == 0): raise TypeError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) _ = dataset[5] with pytest.raises(ValueError): _ = dataset[2] with pytest.raises(TypeError): _ = dataset[3] def test_silent(self): class TmpData(Dataset, retry_exc=Exception, silent=True, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise ValueError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) (x, y, meta) = dataset[2] assert (x['item'] != 2), 'Loading of item (with exception) failed.' assert ('errors' not in meta), 'Error when disabling exception catching.' def test_max_retries(self): class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): raise ValueError with pytest.raises(RecursionError): _ = TmpData(10)[0] class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=5, use_blacklist=False): def getitem(self, item): raise ValueError with pytest.raises(RuntimeError): _ = TmpData(10)[0] def test_blacklist(self): 'Test that we can add items to a blacklist, which are excluded from reloading.' class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=True): def getitem(self, item): if (item != 0): raise ValueError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) _ = [dataset[i] for i in range(10)] for i in range(10): (x, y, meta) = dataset[i] assert (x['item'] == 0) for j in range(10): if (i != j): assert (str(j) not in meta['errors']), 'Error including item in blacklist.' def test_blacklist_none(self): 'Test that items causing exceptions can be repeated when not creating a blacklist.' class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if (item != 0): raise ValueError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) num_errors = [] for i in range(10): (x, y, meta) = dataset[i] assert (x['item'] == 0), 'Error loading correct item.' num_errors.append(meta['errors'].count('ValueError')) assert (max(num_errors) > 1), 'Error when repeating exception items.'
def _random_pil(shape): return Image.fromarray(np.random.randint(0, 255, size=shape, dtype=np.uint8))
def _random_np(shape): return np.random.rand(*shape).astype(np.float32)
def _random_torch(shape): return torch.rand(shape, dtype=torch.float32)
def test_all(): 'Check all expected symbols are imported.' items = {'readlines', 'pil2np', 'np2pil', 'write_yaml', 'load_yaml', 'load_merge_yaml'} assert (set(io.__all__) == items), 'Incorrect keys in `__all__`.'
def test_pil2np(): 'Test conversion from PIL to numpy.' shape = (100, 200, 3) image = _random_pil(shape) out = pil2np(image) assert isinstance(out, np.ndarray), 'Output should be a numpy array.' assert (out.dtype == np.float32), 'Output should be float32.' assert (out.shape == shape), 'Output should be same size as input.' assert ((out.max() <= 1) and (out.min() >= 0)), 'Output should be normalized [0, 1].'
def test_np2pil(): 'Test conversion from numpy to PIL.' shape = (h, w, _) = (100, 200, 3) image = _random_np(shape) out = np2pil(image) assert isinstance(out, Image.Image), 'Output should be a PIL Image.' assert (out.size == (w, h)), 'Output should be same size as input.' (vmax, vmin) = out.getextrema()[0] assert ((vmax <= 255) and (vmin >= 0)), 'Output should be [0, 255].'
def test_all(): 'Check all expected symbols are imported.' items = {'get_logger', 'flatten_dict', 'sort_dict', 'apply_cmap'} assert (set(misc.__all__) == items), 'Incorrect keys in `__all__`.'
class TensorGetLogger(): def test_default(self): key = 'test1234' logger = get_logger(key) assert (key in logging.root.manager.loggerDict), 'Logger not created.' assert (logger == logging.root.manager.loggerDict[key]), 'Incorrect logger created.' assert (not logger.propagate), 'Logger propagate should be disabled.' assert (len(logger.handlers) == 1), 'Incorrect number of handlers.' def test_duplicate(self): 'Test that we do not duplicate logger StreamHandlers.' name = 'test_duplicate' logger = logging.getLogger(name) logger.addHandler(logging.StreamHandler()) assert (len(logger.handlers) == 1), 'Logger should only have one handler.' logger.addHandler(logging.StreamHandler()) assert (len(logger.handlers) == 2), 'Logger should have two handlers.' name2 = 'test_duplicate_v2' logger2 = get_logger(name2) assert (len(logger2.handlers) == 1), 'Custom Logger should only have one handler.' logger2 = get_logger(name2) assert (len(logger2.handlers) == 1), 'Custom Logger should only have one handler (after second time).'
class TestFlatten(): def test_default(self): 'Test basic nesting & default separator.' d = {'a': 1, 'b': 2, 'c': dict(a=1, b=2)} tgt = {'a': 1, 'b': 2, 'c/a': 1, 'c/b': 2} out = flatten_dict(d) assert (out == tgt), 'Incorrect flattened keys.' out = flatten_dict(d, sep='/') assert (out == tgt), 'Incorrect separator keys.' def test_separator(self): 'Test custom separator.' d = {'a': 1, 'b': 2, 'c': dict(a=1, b=2)} tgt = {'a': 1, 'b': 2, 'c.a': 1, 'c.b': 2} out = flatten_dict(d, sep='.') assert (out == tgt), 'Incorrect flattened keys.' def test_nesting(self): 'Test multiple nestings.' d = {'a': [0, 1, {}], 'b': {'a': 1, 'b': 2, 'c': []}, 'c': {'a': {'a': 0}, 'b': {'b': 1}, 'c': {'c': 2}}} tgt = {'a': [0, 1, {}], 'b/a': 1, 'b/b': 2, 'b/c': [], 'c/a/a': 0, 'c/b/b': 1, 'c/c/c': 2} out = flatten_dict(d) assert (out == tgt), 'Incorrect flattened keys.'
class TestSortedDict(): def test_sorted_dict(self): 'Test sorting dict keys.' d = dict(b=2, c=1, a=10) tgt = dict(a=10, b=2, c=1) out = sort_dict(d) assert (out == tgt), 'Incorrect sorted order' with pytest.raises(TypeError): sort_dict({'a': 1, 1: 0, 'test': None})
class TestApplyCmap(): def test_default(self): 'Test applying a cmap with default parameters.' arr = np.array([0, 0, 0.5, 0.5, 1, 1]) out = apply_cmap(arr) assert np.allclose(out[0], out[1]), 'Incorrect 0 mapping.' assert np.allclose(out[2], out[3]), 'Incorrect 0.5 mapping.' assert np.allclose(out[4], out[5]), 'Incorrect 1 mapping.' assert (not np.allclose(out[0], out[2])), 'Incorrect 0 vs. 0.5' assert (not np.allclose(out[0], out[4])), 'Incorrect 0 vs. 1' assert (not np.allclose(out[2], out[4])), 'Incorrect 0.5 vs. 1' out2 = apply_cmap(arr, cmap='turbo') assert np.allclose(out, out2), 'Incorrect default colormap, expected "turbo".' out3 = apply_cmap(arr, vmin=arr.min(), vmax=arr.max()) assert np.allclose(out, out3), 'Incorrect default range.' def test_range(self): 'Test applying a cmap with custom normalization ranges.' arr = np.array([0, 0, 0.5, 0.5, 1, 1]) out = apply_cmap(arr) out2 = apply_cmap(arr, vmin=0.5) assert np.allclose(out2[2], out2[3]), 'Incorrect sanity check for same value.' assert (not np.allclose(out2[3], out2[4])), 'Incorrect sanity check for different value.' assert np.allclose(out2[0], out2[2]), 'Incorrect clipping to min value.' assert np.allclose(out[0], out2[0]), 'Inconsistent min value.' assert (not np.allclose(out2[2], out[2])), 'Incorrect clipping to min value.' out3 = apply_cmap(arr, vmax=0.5) assert np.allclose(out3[2], out3[3]), 'Incorrect sanity check for same value.' assert (not np.allclose(out3[2], out3[0])), 'Incorrect sanity check for different value.' assert np.allclose(out3[2], out3[4]), 'Incorrect clipping to max value.' assert np.allclose(out[5], out3[5]), 'Inconsistent max value.' assert (not np.allclose(out3[2], out[2])), 'Incorrect clipping to max value.'
def test_all(): 'Check all expected symbols are imported.' items = {'Timer', 'MultiLevelTimer'} assert (set(timers.__all__) == items), 'Incorrect keys in `__all__`.'
class TestTimer(): def test_options(self): 'Test that formatting options are set correctly.' (name, precision) = ('Test', 4) timer = Timer(name=name, as_ms=True, precision=precision) assert (timer.name == name), 'Incorrect Timer name' assert (repr(timer) == f'Timer(name={name}, as_ms=True, sync_gpu=False, precision={precision})') with timer as t: ... parts = str(t).split(' ') assert (len(parts) == 3), "Incorrect Timer formatting, expected 'name: elapsed units'" assert (parts[2] == 'ms'), 'Incorrect Timer units.' assert (len(parts[1].split('.')[(- 1)]) <= precision), 'Incorrect Timer precision.' def test_accuracy(self): 'Test that timing is accurate (within 2ms).' target = 0.3 with Timer() as t: time.sleep(target) assert np.allclose(t.elapsed, target, atol=0.02), 'Timer off by more than 2ms.' @mock.patch('torch.cuda.synchronize') def test_sync_gpu(self, sync_fn): 'Test that torch synchronize is called correctly.' with Timer(sync_gpu=True): ... assert (sync_fn.call_count == 2), 'Incorrect number of calls to synchronize.'
class TestMultiLevelTimer(): def test_options(self): 'Test that formatting options are set correctly.' (name, precision) = ('Test', 4) timer = MultiLevelTimer(name=name, as_ms=True, sync_gpu=False, precision=precision) assert (repr(timer) == f'MultiLevelTimer(name={name}, as_ms=True, sync_gpu=False, precision={precision})') _ = str(timer) def test_keys(self): 'Test that timer keys are set correctly.' timer = MultiLevelTimer() with timer('Label'): assert ('Label' in timer._data), 'Error setting label name.' with pytest.raises(KeyError): _ = timer('Label').__enter__() timer = MultiLevelTimer() with timer: ... assert ('Level1' in timer._data), 'Error setting default label name.' def test_accuracy(self): 'Test accuracy with single label.' target = 0.3 timer = MultiLevelTimer() with timer('Test'): time.sleep(target) assert np.allclose(timer['Test']['elapsed'], target, atol=0.02), 'Timer off by more than 2ms.' def test_nesting(self): 'Test accuracy with nesting.' target = 0.3 timer = MultiLevelTimer() with timer('Test1'): time.sleep(target) with timer('Test2'): time.sleep((target * 2)) assert (timer['Test2']['depth'] == 2), "Incorrect 'inner' depth level." assert (timer['Test1']['depth'] == 1), "Incorrect 'outer' depth level." target2 = (target * 2) target1 = (target + target2) assert np.allclose(timer['Test2']['elapsed'], target2, atol=0.02), "'Inner' off by more than 2ms." assert np.allclose(timer['Test1']['elapsed'], target1, atol=0.02), "'Outer' off by more than 2ms." @mock.patch('torch.cuda.synchronize') def test_sync_gpu(self, sync_fn): 'Test that torch synchronize is called correctly.' timer = MultiLevelTimer(sync_gpu=True) with timer: ... assert (sync_fn.call_count == 2), 'Incorrect number of calls to synchronize.' def test_copy(self): 'Test that timing data is copied correctly and is independent of original timer.' timer = MultiLevelTimer(name='Test', as_ms=False, precision=4) with timer: time.sleep(0.1) data = timer.copy()._data assert (data == timer._data), 'Incorrect data copied.' assert (data is not timer._data), 'Incorrect deep copy of data.' data['test'] = 0 assert ('test' not in timer._data), 'Incorrect deep copy of data.' def test_reset(self): 'Test that timer data can be reset.' timer = MultiLevelTimer() with timer('Label'): ... assert ('Label' in timer._data) timer.reset() assert (timer._data == {}), 'Incorrect data deletion.' with timer('Label'): with pytest.raises(RuntimeError): timer.reset() def test_mean_elapsed(self): 'Test that timers elapsed time get averaged correctly.' assert (MultiLevelTimer.mean_elapsed([]) == []), 'Error returning empty list.' assert (MultiLevelTimer.mean_elapsed(None) is None), 'Error returning None.' (sleep_time1, sleep_time2) = (0.3, 0.9) target = ((sleep_time1 + sleep_time2) / 2) timer1 = MultiLevelTimer() with timer1('Test'): time.sleep(sleep_time1) timer2 = MultiLevelTimer() with timer2('Test'): time.sleep(sleep_time2) data = MultiLevelTimer.mean_elapsed([timer1, timer2]) assert np.allclose(data['Test'], target, atol=0.02), "'mean_elapsed' off by more than 2ms."
def is_image_file(filename): return any((filename.endswith(extension) for extension in IMG_EXTENSIONS))
def make_dataset(dir, max_dataset_size=float('inf')): cache = (dir.rstrip('/') + '.txt') if os.path.isfile(cache): print(('Using filelist cached at %s' % cache)) with open(cache) as f: images = [line.strip() for line in f] if images[0].startswith(dir): print('Using image list from older version') image_list = [] for image in images: image_list.append(image) else: print('Adding prefix to saved image list') image_list = [] prefix = os.path.dirname(dir.rstrip('/')) for image in images: image_list.append(os.path.join(prefix, image)) return image_list print('Walking directory ...') images = [] assert os.path.isdir(dir), ('%s is not a valid directory' % dir) for (root, _, fnames) in sorted(os.walk(dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) image_list = images[:min(max_dataset_size, len(images))] with open(cache, 'w') as f: prefix = (os.path.dirname(dir.rstrip('/')) + '/') for i in image_list: f.write(('%s\n' % util.remove_prefix(i, prefix))) return image_list
def make_multiple_dataset(dir, max_dataset_size=float('inf')): subdir = ['Deepfakes', 'Face2Face', 'FaceSwap', 'NeuralTextures'] total_image_list = [] (last_dir, dir) = (((dir.split('/')[(- 2)] + '/') + dir.split('/')[(- 1)]), '/'.join(dir.split('/')[:(- 2)])) print(dir) for sdir in subdir: curr_dir = (((((dir + '/') + sdir) + '/') + last_dir) + '/') print(curr_dir) cache = (curr_dir.rstrip('/') + '.txt') if os.path.isfile(cache): print(('Using filelist cached at %s' % cache)) with open(cache) as f: images = [line.strip() for line in f] if images[0].startswith(curr_dir): print('Using image list from older version') image_list = [] for image in images: image_list.append(image) else: print('Adding prefix to saved image list') image_list = [] prefix = os.path.dirname(curr_dir.rstrip('/')) for image in images: image_list.append(os.path.join(prefix, image)) image_list = random.sample(image_list, min(max_dataset_size, len(image_list))) total_image_list += image_list else: print('Walking directory ...') images = [] assert os.path.isdir(curr_dir), ('%s is not a valid directory' % curr_dir) for (root, _, fnames) in sorted(os.walk(curr_dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) image_list = random.sample(images, min(max_dataset_size, len(images))) with open(cache, 'w') as f: prefix = (os.path.dirname(curr_dir.rstrip('/')) + '/') for i in image_list: f.write(('%s\n' % util.remove_prefix(i, prefix))) total_image_list += image_list return total_image_list
def make_multiple_dataset_real(dir, max_dataset_size=float('inf')): subdir = ['faces/celebahq/real-tfr-1024-resized128', 'faces/celebahq/real-tfr-1024-resized128', 'faces/celebahq/real-tfr-1024-resized128', 'faceforensics_aligned/Deepfakes/original', 'faceforensics_aligned/Face2Face/original', 'faceforensics_aligned/FaceSwap/original', 'faceforensics_aligned/NeuralTextures/original'] total_image_list = [] (last_dir, dir) = (dir.split('/')[(- 1)], '/'.join(dir.split('/')[:(- 1)])) print(dir) for sdir in subdir: curr_dir = (((((dir + '/') + sdir) + '/') + last_dir) + '/') print(curr_dir) cache = (curr_dir.rstrip('/') + '.txt') if os.path.isfile(cache): print(('Using filelist cached at %s' % cache)) with open(cache) as f: images = [line.strip() for line in f] if images[0].startswith(curr_dir): print('Using image list from older version') image_list = [] for image in images: image_list.append(image) else: print('Adding prefix to saved image list') image_list = [] prefix = os.path.dirname(curr_dir.rstrip('/')) for image in images: image_list.append(os.path.join(prefix, image)) image_list = random.sample(image_list, min(max_dataset_size, len(image_list))) total_image_list += image_list else: print('Walking directory ...') images = [] assert os.path.isdir(curr_dir), ('%s is not a valid directory' % curr_dir) for (root, _, fnames) in sorted(os.walk(curr_dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) image_list = random.sample(images, min(max_dataset_size, len(images))) with open(cache, 'w') as f: prefix = (os.path.dirname(curr_dir.rstrip('/')) + '/') for i in image_list: f.write(('%s\n' % util.remove_prefix(i, prefix))) total_image_list += image_list return total_image_list
def make_multiple_dataset_fake(dir, max_dataset_size=float('inf')): subdir = ['faces/celebahq/pgan-pretrained-128-png', 'faces/celebahq/sgan-pretrained-128-png', 'faces/celebahq/glow-pretrained-128-png', 'faceforensics_aligned/Deepfakes/manipulated', 'faceforensics_aligned/Face2Face/manipulated', 'faceforensics_aligned/FaceSwap/manipulated', 'faceforensics_aligned/NeuralTextures/manipulated'] total_image_list = [] (last_dir, dir) = (dir.split('/')[(- 1)], '/'.join(dir.split('/')[:(- 1)])) print(dir) for sdir in subdir: curr_dir = (((((dir + '/') + sdir) + '/') + last_dir) + '/') print(curr_dir) cache = (curr_dir.rstrip('/') + '.txt') if os.path.isfile(cache): print(('Using filelist cached at %s' % cache)) with open(cache) as f: images = [line.strip() for line in f] if images[0].startswith(curr_dir): print('Using image list from older version') image_list = [] for image in images: image_list.append(image) else: print('Adding prefix to saved image list') image_list = [] prefix = os.path.dirname(curr_dir.rstrip('/')) for image in images: image_list.append(os.path.join(prefix, image)) image_list = random.sample(image_list, min(max_dataset_size, len(image_list))) total_image_list += image_list else: print('Walking directory ...') images = [] assert os.path.isdir(curr_dir), ('%s is not a valid directory' % curr_dir) for (root, _, fnames) in sorted(os.walk(curr_dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) image_list = random.sample(images, min(max_dataset_size, len(images))) with open(cache, 'w') as f: prefix = (os.path.dirname(curr_dir.rstrip('/')) + '/') for i in image_list: f.write(('%s\n' % util.remove_prefix(i, prefix))) total_image_list += image_list return total_image_list
def make_CNNDetection_dataset(dir, max_dataset_size=float('inf'), mode='real'): classes = os.listdir(dir) total_image_list = [] total_class_list = [] print(dir) if (mode == 'real'): sdir = '0_real' elif (mode == 'fake'): sdir = '1_fake' for cls in classes: curr_dir = ((((dir + '/') + cls) + '/') + sdir) print(curr_dir) cache = (curr_dir.rstrip('/') + '.txt') if os.path.isfile(cache): print(('Using filelist cached at %s' % cache)) with open(cache) as f: images = [line.strip() for line in f] if images[0].startswith(curr_dir): print('Using image list from older version') image_list = [] class_list = [] for image in images: image_list.append(image) class_list.append(cls) else: print('Adding prefix to saved image list') image_list = [] class_list = [] prefix = os.path.dirname(curr_dir.rstrip('/')) for image in images: image_list.append(os.path.join(prefix, image)) class_list.append(cls) total_image_list += image_list total_class_list += class_list else: print('Walking directory ...') images = [] class_list = [] assert os.path.isdir(curr_dir), ('%s is not a valid directory' % curr_dir) for (root, _, fnames) in sorted(os.walk(curr_dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) class_list.append(cls) image_list = images with open(cache, 'w') as f: prefix = (os.path.dirname(curr_dir.rstrip('/')) + '/') for i in image_list: f.write(('%s\n' % util.remove_prefix(i, prefix))) total_image_list += image_list total_class_list += class_list return (total_image_list, total_class_list)
def default_loader(path): return Image.open(path).convert('RGB')
class PairedDataset(data.Dataset): 'A dataset class for paired images\n e.g. corresponding real and manipulated images\n ' def __init__(self, opt, im_path_real, im_path_fake, is_val=False, with_mask=False): 'Initialize this dataset class.\n\n Parameters:\n opt -- experiment options\n im_path_real -- path to folder of real images\n im_path_fake -- path to folder of fake images\n is_val -- is this training or validation? used to determine\n transform\n ' super().__init__() self.dir_real = im_path_real self.dir_fake = im_path_fake self.with_mask = with_mask if self.with_mask: self.real_paths = sorted([os.path.join(self.dir_real, im) for im in os.listdir(self.dir_real)]) self.fake_paths = sorted([os.path.join(self.dir_fake, im) for im in os.listdir(self.dir_fake)]) else: self.real_paths = sorted(make_dataset(self.dir_real, opt.max_dataset_size)) self.fake_paths = sorted(make_dataset(self.dir_fake, opt.max_dataset_size)) self.real_size = len(self.real_paths) self.fake_size = len(self.fake_paths) self.transform = transforms.get_transform(opt, for_val=is_val) if self.with_mask: self.real_mask_paths = sorted([os.path.join(self.dir_real.replace('face', 'mask'), im) for im in os.listdir(self.dir_real.replace('face', 'mask'))]) self.fake_mask_paths = sorted([os.path.join(self.dir_fake.replace('face', 'mask'), im) for im in os.listdir(self.dir_fake.replace('face', 'mask'))]) self.orig_transform = transforms.get_mask_transform(opt, for_val=is_val) self.real_mask_size = len(self.real_mask_paths) self.fake_mask_size = len(self.fake_mask_paths) assert (self.real_mask_size == self.real_size) assert (self.fake_mask_size == self.fake_size) self.opt = opt def __getitem__(self, index): 'Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n ' real_path = self.real_paths[(index % self.real_size)] fake_path = self.fake_paths[(index % self.fake_size)] real_img = Image.open(real_path).convert('RGB') fake_img = Image.open(fake_path).convert('RGB') real = self.transform(real_img) fake = self.transform(fake_img) if self.with_mask: real_mask_path = self.real_mask_paths[(index % self.real_mask_size)] fake_mask_path = self.fake_mask_paths[(index % self.fake_mask_size)] real_mask = Image.open(real_mask_path).convert('L') fake_mask = Image.open(fake_mask_path).convert('L') real_mask = self.orig_transform(real_mask) fake_mask = self.orig_transform(fake_mask) if self.with_mask: return {'manipulated': fake, 'original': real, 'path_manipulated': fake_path, 'path_original': real_path, 'mask_original': real_mask, 'mask_manipulated': fake_mask} else: return {'manipulated': fake, 'original': real, 'path_manipulated': fake_path, 'path_original': real_path} def __len__(self): return max(self.real_size, self.fake_size)
class UnpairedDataset(data.Dataset): 'A dataset class for loading images within a single folder\n ' def __init__(self, opt, im_path, is_val=False): 'Initialize this dataset class.\n\n Parameters:\n opt -- experiment options\n im_path -- path to folder of images\n is_val -- is this training or validation? used to determine\n transform\n ' super().__init__() self.dir = im_path self.paths = sorted(make_dataset(self.dir, opt.max_dataset_size)) self.size = len(self.paths) assert (self.size > 0) self.transform = transforms.get_transform(opt, for_val=is_val) self.opt = opt def __getitem__(self, index): 'Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n ' path = self.paths[index] img = Image.open(path).convert('RGB') img = self.transform(img) return {'img': img, 'path': path} def __len__(self): return self.size
def get_available_masks(): ' Return a list of the available masks for cli ' masks = sorted([name for (name, obj) in inspect.getmembers(sys.modules[__name__]) if (inspect.isclass(obj) and (name != 'Mask'))]) masks.append('none') return masks
def get_default_mask(): ' Set the default mask for cli ' masks = get_available_masks() default = 'dfl_full' default = (default if (default in masks) else masks[0]) return default