code stringlengths 17 6.64M |
|---|
@map_container
def to_numpy(x: Any, /, permute: bool=True) -> Any:
'Convert given input to numpy.ndarrays.\n\n :param x: (Any) Arbitrary structure to convert to ndarrays (see map_apply).\n :param permute: (bool) If `True`, permute from PyTorch convention (b, c, h, w) -> (b, h, w, c).\n :return: (Any) Input structure, converted to ndarrays.\n '
if isinstance(x, (np.ndarray, str, Timer, MultiLevelTimer)):
return x
if (permute and (x.ndim > 2)):
dim = [(- 2), (- 1), (- 3)]
dim = (list(range((x.ndim - 3))) + dim)
x = x.permute(dim)
return x.detach().cpu().numpy()
|
@map_container
def op(_x: Any, /, *args, fn: Union[(str, Callable)], **kwargs) -> Any:
"Apply a function to and arbitrary input structure. `fn` can be either a function or a method to search on `_x`.\n\n Example:\n >>> out = fn(input, device, op='to') # Apply x.to(device) to each item in `x`\n >>> out = fn(input, func=torch.softmax, dim=1) # Apply torch.softmax(x, dim=1) to each item in `x`\n\n :param _x: (Any) Arbitrary structure to convert to tensors (see map_apply).\n :param args: (tuple) `Args` to forward to the given `func`.\n :param fn: (str|callable) Function to apply. If given a string, it will be searched as an attribute of `_x`.\n :param kwargs: (dict) `Kwargs` to forward to the given `op`.\n :return:\n "
if isinstance(_x, (str, Timer, MultiLevelTimer)):
return _x
if isinstance(fn, str):
fn = getattr(_x, fn)
else:
args = (_x, *args)
return fn(*args, **kwargs)
|
@opt_args_deco
def allow_np(fn: Optional[Callable], permute: bool=False) -> Callable:
"Decorator to allow for numpy.ndarray inputs in a torch function.\n\n Main idea is to implement the function using torch ops and apply this decorator to also make it numpy friendly.\n Since numpy.ndarray and torch.Tensor share memory (when on CPU), there shouldn't be any overhead.\n\n The decorated function can have an arbitrary signature. We enforce that there should only be either np.ndarray\n or torch.Tensor inputs. All other args (int, float, str...) are left unchanged.\n "
ann = fn.__annotations__
for (k, type) in ann.items():
if (type == torch.Tensor):
ann[k] = Union[(NDArray, type)]
@wraps(fn)
def wrapper(*args, **kwargs):
all_args = (args + tuple(kwargs.values()))
any_np = any((isinstance(arg, np.ndarray) for arg in all_args))
any_torch = any((isinstance(arg, torch.Tensor) for arg in all_args))
if (any_torch and any_np):
raise ValueError('Must pass only np.ndarray or torch.Tensor!')
if any_np:
(args, kwargs) = to_torch((args, kwargs), permute=permute)
out = fn(*args, **kwargs)
if any_np:
out = to_numpy(out, permute=permute)
return out
return wrapper
|
@allow_np(permute=True)
def standardize(x: Tensor, /, mean: StatsRGB=_mean, std: StatsRGB=_std) -> Tensor:
'Apply standardization. Default uses ImageNet statistics.'
shape = (([1] * (x.ndim - 3)) + [3, 1, 1])
mean = x.new_tensor(mean).view(shape)
std = x.new_tensor(std).view(shape)
x = ((x - mean) / std)
return x
|
@allow_np(permute=True)
def unstandardize(x: Tensor, /, mean: StatsRGB=_mean, std: StatsRGB=_std) -> Tensor:
'Remove standardization. Default uses ImageNet statistics.'
shape = (([1] * (x.ndim - 3)) + [3, 1, 1])
mean = x.new_tensor(mean).view(shape)
std = x.new_tensor(std).view(shape)
x = ((x * std) + mean)
return x
|
@allow_np(permute=True)
def to_gray(x: Tensor, /, coeffs: StatsRGB=_coeffs, keepdim: bool=False) -> Tensor:
'Convert image to grayscale.'
shape = (([1] * (x.ndim - 3)) + [3, 1, 1])
coeffs = x.new_tensor(coeffs).view(shape)
x = (x * coeffs).sum(dim=1, keepdim=keepdim)
return x
|
def mean_normalize(x: Tensor, /, dim: Union[(int, Sequence[int])]=(2, 3)) -> Tensor:
'Apply mean normalization across the specified dimensions.\n\n :param x: (Tensor) (*) Input tensor to normalize of any shape.\n :param dim: (int | Sequence[int]) Dimension(s) to compute the mean across.\n :return: (Tensor) (*) Mean normalized input with the same shape.\n '
return (x / x.mean(dim=dim, keepdim=True).clamp(min=eps(x)))
|
def eye_like(x: Tensor, /) -> Tensor:
'Create an Identity matrix of the same dtype and size as the input.\n\n NOTE: The input can be of any shape, expect the final two dimensions, which must be square.\n\n :param x: (Tensor) (*, n, n) Input reference tensor, where `*` can be any size (including zero).\n :return: (Tensor) (*, n, n) Identity matrix with the same dtype and size as the input.\n '
ndim = x.ndim
if (ndim < 2):
raise ValueError(f'Input must have at least two dimensions! Got "{ndim}"')
(n, n2) = (x.shape[(- 2)], x.shape[(- 1)])
if (n != n2):
raise ValueError(f'Input last two dimensions must be square (*, n, n)! Got "{x.shape}"')
view = (([1] * (ndim - 2)) + [n, n])
I = torch.eye(n, dtype=x.dtype, device=x.device).view(view).expand_as(x).clone()
return I
|
def interpolate_like(input: Tensor, /, other: Tensor, mode: str='nearest', align_corners: bool=False) -> Tensor:
'Interpolate to match the size of `other` tensor.'
if (mode == 'nearest'):
align_corners = None
return F.interpolate(input, size=other.shape[(- 2):], mode=mode, align_corners=align_corners)
|
def expand_dim(x: Tensor, /, num: Union[(int, Sequence[int])], dim: Union[(int, Sequence[int])]=0, insert: bool=False) -> Tensor:
'Expand the specified input tensor dimensions, inserting new ones if required.\n\n >>> expand_dim(torch.rand(1, 1, 1), num=5, dim=1, insert=False) # (1, 1, 1) -> (1, 5, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=5, dim=1, insert=True) # (1, 1, 1) -> (1, 5, 1, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=(5, 3), dim=(0, 1), insert=False) # (1, 1, 1) -> (5, 3, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=(5, 3), dim=(0, 1), insert=True) # (1, 1, 1) -> (5, 3, 1, 1, 1)\n\n :param x: (Tensor) (*) Input tensor of any shape.\n :param num: (int|Sequence[int]) Expansion amount for the target dimension(s).\n :param dim: (int|Sequence[int]) Dimension(s) to expand.\n :param insert: (bool) If `True`, insert a new dimension at the specified location(s).\n :return: (Tensor) (*, num, *) Expanded tensor at the given location(s).\n '
if isinstance(num, int):
if isinstance(dim, int):
(num, dim) = ([num], [dim])
else:
num = ([num] * len(dim))
elif (len(num) != len(dim)):
raise ValueError(f'Non-matching expansion and dims. ({len(num)} vs. {len(dim)})')
for d in (dim if insert else ()):
x = x.unsqueeze(d)
sizes = ([(- 1)] * x.ndim)
for (n, d) in zip(num, dim):
sizes[d] = n
return x.expand(sizes)
|
def get_cls(cls_dict: dict[(str, Type[T])], /, *args, type: str, **kwargs) -> T:
'Instantiate an arbitrary class from a collection.\n\n Including `type` makes it a keyword-only argument. This has the double benefit of forcing the user to pass it as a\n keyword argument, as well as popping it from the config kwargs.\n\n :param cls_dict: (Dict[str, cls]) Dict containing the mappings to all possible classes to choose from.\n :param args: (tuple) Args to forward to target class.\n :param type: (str) Key of the target class. Must be present as a keyword-only argument.\n :param kwargs: (dict) Kwargs to forward to target class.\n :return: Instance of the target class with the desired arguments.\n '
try:
return cls_dict[type](*args, **kwargs)
except Exception as e:
raise ValueError(f'Error using "{type}" in {list(cls_dict)}') from e
|
def get_net(cfg: NetCfg) -> nn.ModuleDict:
"Instantiate the target networks from a config dict.\n\n The depth estimation algorithm typically consists of multiple networks, commonly at least `depth` and `pose`.\n We're assuming that, within a given category, we can use different classes interchangeably.\n For instance, all `depth` networks take a single image as input and produce a multi-scale output, while all\n `pose` networks take multiple images and produce relative poses for each pair.\n\n New types and classes can be added to `NETWORKS` accordingly.\n\n :param cfg: (Dict[str, Dict[str, Any]]) Target network `types` and kwargs to forward to them.\n :return:\n "
nets = {k: get_cls(NET_REG, type=k, **kw) for (k, kw) in cfg.items() if (kw is not None)}
return nn.ModuleDict(OrderedDict(nets))
|
def get_loss(cfg: LossCfg) -> tuple[(nn.ModuleDict, nn.ParameterDict)]:
'Instantiate the target losses from a config dict.\n\n :param cfg: (Dict[str, Dict[str, Any]]) Target loss `types` and kwargs to forward to them.\n :return:\n '
(losses, weights) = (nn.ModuleDict(), nn.ParameterDict())
for (k, kw) in cfg.items():
if (kw is None):
continue
weights[k] = nn.Parameter(torch.as_tensor(kw.pop('weight', 1)), requires_grad=False)
losses[k] = LOSS_REG[k](**kw)
return (losses, weights)
|
def get_ds(cfg: DataCfg) -> Dataset:
'Instantiate the target data from a config dict.\n\n :param cfg: (Dict[str, Any]]) Target loss `types` and kwargs to forward to them.\n :return:\n '
ds = get_cls(DATA_REG, **cfg)
return ds
|
def get_dl(mode: str, cfg_ds: DataCfg, cfg_dl: LoaderCfg) -> DataLoader:
"Instantiate the target dataset loader from a config dict.\n\n Supports the presence of a common config, which gets overriden by specific cfg within each mode.\n Example:\n ```\n dataset:\n type: kitti\n depth_split: eigen_zhou\n\n core:\n mode: core\n aug: True\n\n val:\n mode: val\n aug: False\n ```\n\n By default we set `pin_memory = True` and `collate_fn = dataset.collate_fn`.\n This assumes we are using a `BaseDataset`.\n\n :param mode: (str) Dataset split: {'train', 'val', 'test'}.\n :param cfg_ds: (Dict[str, Any]]) Target dataset `type` and kwargs to forward to it (contains all modes).\n :param cfg_dl: (Dict[str, Any]]) Kwargs to forward to each dataloader (contains all modes).\n :return:\n "
cfg = {k: v for (k, v) in cfg_ds.items() if (k not in {'train', 'val', 'test'})}
cfg.update(cfg_ds.get(mode, {}))
ds = get_ds(cfg)
cfg = {k: v for (k, v) in cfg_dl.items() if (k not in {'train', 'val', 'test'})}
cfg['pin_memory'] = cfg.get('pin_memory', True)
cfg['collate_fn'] = ds.collate_fn
cfg.update(cfg_dl.get(mode, {}))
dl = DataLoader(ds, **cfg)
return dl
|
def get_opt(parameters: Union[(Iterable, nn.Module)], cfg: OptCfg) -> optim.Optimizer:
'Instantiate the target learning rate scheduler from a config dict.\n\n Serves as a wrapper for `timm` `create_optimizer_v2` to maintain consistency in the export interface.\n\n :param parameters: (Iterable | nn.Module) Parameters to forward to the optimizer (in any `torch` format)\n :param cfg: (Dict[str, Any]) Target optimizer `type` and kwargs to forward to it.\n :return:\n '
if ('type' in cfg):
cfg['opt'] = cfg.pop('type')
elif ('opt' not in cfg):
raise KeyError('Must provide a configuration key `type` or `opt` when instantiating an optimizer.')
if cfg.pop('frozen_bn', False):
if (not isinstance(parameters, nn.Module)):
raise ValueError('Cannot freeze batch norm parameters unless given nn.Module')
for m in parameters.modules():
if isinstance(m, nn.BatchNorm2d):
m.requires_grad_(False)
return create_optimizer_v2(parameters, **cfg)
|
def get_sched(opt: optim.Optimizer, cfg: SchedCfg) -> optim.lr_scheduler._LRScheduler:
'Instantiate the target learning rate scheduler from a config dict.\n\n TODO: Deprecate in favour of `timm` schedulers?\n\n :param opt: (optim.Optimizer) Optimizer to forward to the LR scheduler.\n :param cfg: (Dict[str, Any]) Target scheduler `type` and kwargs to forward to it.\n :return:\n '
sched = get_cls(SCHED_REG, opt, **cfg)
return sched
|
def get_metrics() -> nn.ModuleDict:
'Instantiate the collection of depth metrics to monitor.'
return nn.ModuleDict({'MAE': metrics.MAE(), 'RMSE': metrics.RMSE(), 'LogSI': metrics.ScaleInvariant(mode='log'), 'AbsRel': metrics.AbsRel(), 'Acc': metrics.DeltaAcc(delta=1.25)})
|
class TableFormatter():
'Class to format a table as a LaTeX `booktabs` table.\n\n :param header: (Sequence[str]) (m,) Header elements represented as strings.\n :param labels: (Sequence[str]) (n,) Row names represented as strings.\n :param body: (Sequence[Sequence[float]]) (n, m) Table data for each `tag` and each `header`.\n :param metrics: (None|Sequence[int]) Value for each col indicating if a high/low value is better (+1/-1).\n '
def __init__(self, header: Sequence[str], labels: Sequence[Union[(str, Sequence[str])]], body: Sequence[Sequence[float]], metrics: Optional[Union[(int, Sequence[int])]]=None):
self.header = header
self.labels = labels
self.body = np.array(body)
self.metrics = np.array(metrics)[None]
if (self.metrics.ndim == 1):
self.metrics = self.metrics[None].repeat(len(header), axis=1)
if (not isinstance(self.labels[0], str)):
self.labels = [' '.join(l) for l in self.labels]
shape = (len(self.labels), len(self.header))
if (shape != self.shape):
raise ValueError(f'Shape mismatch. ({shape} vs. {self.shape})')
if (self.metrics.shape[1] != self.shape[1]):
raise ValueError(f'Metric type mismatch. ({self.metrics.shape[1]} vs. {self.shape[1]})')
(self.best_mask, self.nbest_mask) = self._get_best()
@classmethod
def from_files(cls, files: Sequence[Path], key: Optional[Callable[([Path], str)]]=None, metrics: Optional[Union[(int, Sequence[int])]]=None):
'Classmethod to create a table from a list of files.\n\n :param files: (Sequence[Path]) Sequence of YAML files containing results.\n :param key: (Optional[Callable[[Path], str]]) Function to convert a file name into a tag for each row.\n :param metrics: (Optional[Sequence[int]]) Value for each col indicating whether a high/low value is better (+1/-1).\n :return:\n '
assert len(files), 'Must provide files to create table.'
if (key is None):
key = (lambda x: x.parents[2].name)
return cls(header=list(load_yaml(files[0])), labels=list(map(key, files)), body=[list(load_yaml(f).values()) for f in files], metrics=metrics)
@classmethod
def from_df(cls, df: pd.DataFrame, metrics: Optional[Union[(int, Sequence[int])]]=None):
'Classmethod to create a table from a `DataFrame`.\n\n :param df: (pd.DataFrame) Pandas dataframe to create the table.\n :param metrics: (Optional[Sequence[int]]) Value for each col indicating if a high/low value is better (+1/-1).\n :return:\n '
return cls(header=df.columns, labels=df.index, body=df.to_numpy(), metrics=metrics)
@classmethod
def from_dict(cls, data):
return cls(header=np.array(list(data)), labels=['Values'], body=np.array(list(data.values()))[None], metrics=None)
def __str__(self) -> str:
'Format as a Latex table using default parameters.'
return self.to_latex()
@property
def shape(self) -> tuple[(int, int)]:
'Table shape as (rows, cols).'
return self.body.shape
def _to_row(self, label: str, data: Sequence[str]) -> str:
'Create a table row.'
return f'''{label} & {' & '.join(data)} \
'''
def _get_best(self) -> tuple[(NDArray, NDArray)]:
'Get a mask indicating the `best` and `next best` performing row per column.'
if (self.metrics[(0, 0)] is None):
return (np.zeros_like(self.body, dtype=bool), np.zeros_like(self.body, dtype=bool))
body = (self.body * self.metrics)
best = body.max(axis=0, keepdims=True)
best_mask = np.equal(body, best)
if (self.shape[0] > 1):
body[best_mask] = (- np.inf)
nbest = body.max(axis=0, keepdims=True)
nbest_mask = np.equal(body, nbest)
else:
nbest_mask = np.zeros_like(body, dtype=bool)
return (best_mask, nbest_mask)
def _get_col_width(self, width: Optional[Union[(int, Sequence[int])]], header: Sequence[str], body: NDArray) -> Sequence[int]:
'Get width for each column: dynamic, fixed or specified. '
if (width is None):
width = np.concatenate(([list(map(len, header))], np.vectorize(len)(body)), axis=0).max(0)
elif isinstance(width, int):
width = ([width] * self.shape[1])
elif (len(width) != self.shape[1]):
raise ValueError('Non-matching columns.')
return width
def to_latex(self, caption: str='CAPTION', precision: int=2, width: int=None) -> str:
'Create a Latex booktags table.\n\n :param caption: (str) Table caption.\n :param precision: (int) Precision when rounding table `body`.\n :param width: (int) Row character width.\n :return: (str) LaTeX table represented as a string.\n '
header = [h.replace('_', ' ') for h in self.header]
labels = [l.replace('_', ' ') for l in self.labels]
body = np.vectorize((lambda i: f'{i:.{precision}f}'))(self.body).astype('<U16')
body[self.best_mask] = [f'est{{{i}}}' for i in body[self.best_mask]]
body[self.nbest_mask] = [f'''
best{{{i}}}''' for i in body[self.nbest_mask]]
ws = self._get_col_width(width, header, body)
header = [f'{h:>{w}}' for (h, w) in zip(header, ws)]
body = np.stack([np.vectorize((lambda i: f'{i:>{w}}'))(col) for (w, col) in zip(ws, body.T)]).T
table = (((('\\begin{table}\n\\renewcommand{\\arraystretch}{1.2}\n\\centering\n\\caption{' + caption) + '}\n\\begin{tabular}{@{}') + ('l' * (len(header) + 1))) + '@{}}\n\\toprule\n')
n = max(map(len, self.labels))
table += self._to_row(label=(' ' * n), data=header)
table += '\\midrule\n'
for (tag, row) in zip(labels, body):
table += self._to_row(label=f'{tag:>{n}}', data=row)
table += '\\bottomrule\n\\end{tabular}\n\\end{table}\n'
return table
|
def _get_percentile(x: NDArray, p: int) -> float:
'Safe percentile to handle NaNs/Inf values.'
try:
return np.percentile(x, p)
except IndexError:
return 0.0
|
@ops.allow_np(permute=True)
def rgb_from_disp(disp: Tensor, invert: bool=False, cmap: str='turbo', vmin: float=0, vmax: Optional[Union[(float, Sequence[float])]]=None) -> Tensor:
'Convert a disparity map into an RGB colormap visualization.\n\n :param disp: (Tensor) (b, 1, h, w) or (h, w)\n :param invert: (bool) If `True` invert depth into disparity.\n :param cmap: (str) Matplotlib colormap name.\n :param vmin: (float) Minimum value to use when normalizing.\n :param vmax: (None|float|list) Maximum value to use when normalizing. If `None` use 95th percentile.\n :return:\n '
if isinstance(vmin, torch.Tensor):
vmin = vmin.tolist()
if isinstance(vmax, torch.Tensor):
vmax = vmax.tolist()
n = disp.ndim
if (n == 2):
disp = disp[(None, None)]
if (n == 3):
disp = disp[None]
if invert:
disp = geo.to_inv(disp)
disp = ops.to_numpy(disp).squeeze((- 1))
if (vmax is None):
vmax = [_get_percentile(d[(d > 0)], 95) for d in disp]
elif (isinstance(vmax, (int, float)) or (isinstance(vmax, torch.Tensor) and (vmax.ndim == 0))):
vmax = ([vmax] * disp.shape[0])
elif (len(vmax) != disp.shape[0]):
raise ValueError(f'Non-matching vmax and disp. ({len(vmax)} vs. {disp.shape[0]})')
rgb = torch.stack(ops.to_torch([apply_cmap(d, cmap=cmap, vmin=vmin, vmax=v) for (d, v) in zip(disp, vmax)]))
if ((n == 2) or (n == 3)):
rgb = rgb.squeeze(0)
return rgb
|
@ops.allow_np(permute=True)
def rgb_from_feat(feat: Tensor) -> Tensor:
'Convert dense features into an RGB image via PCA.\n\n NOTE: PCA is computed using all features in the batch, i.e. the representation is batch dependent.\n\n :param feat: (Tensor) (*b, c, h, w) Dense feature representation.\n :return: (Tensor) (*b, 3, h, w) The PCAd features.\n '
n = feat.ndim
if (n == 3):
feat = feat[None]
(b, _, h, w) = feat.shape
feat = ops.to_numpy(feat.permute(0, 2, 3, 1).flatten(0, 2))
proj = PCA(n_components=3).fit_transform(feat)
proj -= proj.min(0)
proj /= proj.max(0)
proj = ops.to_torch(proj.reshape(b, h, w, 3))
if (n == 3):
proj = proj.squeeze(0)
return proj
|
class BaseNetCfg(TypedDict):
'Confing for a base network.'
type: str
|
class BaseLossCfg(TypedDict):
'Config for a loss without parameters. We only require a weighting factor.'
weight: float
|
class NetCfg(TypedDict):
'Config dict for a collection of networks.'
depth: BaseNetCfg
pose: BaseNetCfg
|
class LossCfg(TypedDict):
'Config dict for a collection of losses.'
recon: BaseLossCfg
smoooth: Optional[BaseLossCfg]
|
class DataCfg(TypedDict):
'Config dict for a collection of BaseDataset. Configs in {core, val, test} override values in main config.'
type: str
mode: str
size: Sequence[int]
supp_idxs: Optional[Sequence[int]]
use_depth: Optional[bool]
use_hints: Optional[bool]
use_benchmark: Optional[bool]
use_strong_aug: Optional[bool]
as_torch: Optional[bool]
use_aug: Optional[bool]
log_time: Optional[bool]
train: Optional['DataCfg']
val: Optional['DataCfg']
test: Optional['DataCfg']
|
class LoaderCfg(TypedDict):
'Config dict for a torch DataLoader.'
batch_size: int
num_workers: Optional[int]
drop_last: Optional[bool]
shuffle: Optional[bool]
pin_memory: Optional[bool]
train: Optional['LoaderCfg']
val: Optional['LoaderCfg']
test: Optional['LoaderCfg']
|
class OptCfg(TypedDict):
'Config dict for a torch Optimizer.'
type: Optional[str]
opt: Optional[str]
lr: float
|
class SchedCfg(TypedDict):
'Config dict for a torch LRScheduler.'
type: str
|
class TrainCfg(TypedDict):
'Config dict for training options.'
max_epochs: bool
resume_training: Optional[bool]
load_ckpt: Optional[PathLike]
log_every_n_steps: Optional[int]
monitor: Optional[str]
benchmark: Optional[bool]
gradient_clip_val: Optional[float]
precision: Optional[int]
accumulate_grad_batches: Optional[int]
swa: Optional[bool]
early_stopping: Optional[bool]
min_depth: Optional[float]
max_depth: Optional[float]
|
class MonoDepthCfg(TypedDict):
'Monocular depth trainer config. See each sub-class for details.'
net: NetCfg
loss: LossCfg
dataset: DataCfg
loader: LoaderCfg
optimizer: OptCfg
scheduler: SchedCfg
trainer: TrainCfg
|
def _apply_op(img: Tensor, op_name: str, magnitude: float, interpolation: InterpolationMode, fill: Optional[List[float]]):
if (op_name == 'ShearX'):
raise ValueError(f'Attempted geometric transformation "{op_name}"')
elif (op_name == 'ShearY'):
raise ValueError(f'Attempted geometric transformation "{op_name}"')
elif (op_name == 'TranslateX'):
raise ValueError(f'Attempted geometric transformation "{op_name}"')
elif (op_name == 'TranslateY'):
raise ValueError(f'Attempted geometric transformation "{op_name}"')
elif (op_name == 'Rotate'):
raise ValueError(f'Attempted geometric transformation "{op_name}"')
elif (op_name == 'Brightness'):
img = F.adjust_brightness(img, (1.0 + magnitude))
elif (op_name == 'Color'):
img = F.adjust_saturation(img, (1.0 + magnitude))
elif (op_name == 'Contrast'):
img = F.adjust_contrast(img, (1.0 + magnitude))
elif (op_name == 'Sharpness'):
img = F.adjust_sharpness(img, (1.0 + magnitude))
elif (op_name == 'Posterize'):
img = F.posterize(img, int(magnitude))
elif (op_name == 'Solarize'):
img = F.solarize(img, magnitude)
elif (op_name == 'AutoContrast'):
img = F.autocontrast(img)
elif (op_name == 'Equalize'):
img = F.equalize(img)
elif (op_name == 'Invert'):
img = F.invert(img)
elif (op_name == 'Identity'):
pass
else:
raise ValueError('The provided operator {} is not recognized.'.format(op_name))
return img
|
class AutoAugmentPolicy(Enum):
'AutoAugment policies learned on different data.\n Available policies are IMAGENET, CIFAR10 and SVHN.\n '
IMAGENET = 'imagenet'
CIFAR10 = 'cifar10'
SVHN = 'svhn'
|
class AutoAugment(torch.nn.Module):
'AutoAugment data augmentation method based on\n `"AutoAugment: Learning Augmentation Strategies from Data" <https://arxiv.org/pdf/1805.09501.pdf>`_.\n If the image is torch Tensor, it should be of type torch.uint8, and it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode "L" or "RGB".\n\n Args:\n policy (AutoAugmentPolicy): Desired policy enum defined by\n :class:`torchvision.transforms.autoaugment.AutoAugmentPolicy`. Default is ``AutoAugmentPolicy.IMAGENET``.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n fill (sequence or number, optional): Pixel fill value for the area outside the transformed\n image. If given a number, the value is used for all bands respectively.\n '
def __init__(self, policy: AutoAugmentPolicy=AutoAugmentPolicy.IMAGENET, interpolation: InterpolationMode=InterpolationMode.NEAREST, fill: Optional[List[float]]=None) -> None:
super().__init__()
self.policy = policy
self.interpolation = interpolation
self.fill = fill
self.policies = self._get_policies(policy)
def _get_policies(self, policy: AutoAugmentPolicy) -> List[Tuple[(Tuple[(str, float, Optional[int])], Tuple[(str, float, Optional[int])])]]:
if (policy == AutoAugmentPolicy.IMAGENET):
return [(('Posterize', 0.4, 8), ('Rotate', 0.6, 9)), (('Solarize', 0.6, 5), ('AutoContrast', 0.6, None)), (('Equalize', 0.8, None), ('Equalize', 0.6, None)), (('Posterize', 0.6, 7), ('Posterize', 0.6, 6)), (('Equalize', 0.4, None), ('Solarize', 0.2, 4)), (('Equalize', 0.4, None), ('Rotate', 0.8, 8)), (('Solarize', 0.6, 3), ('Equalize', 0.6, None)), (('Posterize', 0.8, 5), ('Equalize', 1.0, None)), (('Equalize', 0.6, None), ('Posterize', 0.4, 6)), (('Equalize', 0.0, None), ('Equalize', 0.8, None)), (('Invert', 0.6, None), ('Equalize', 1.0, None)), (('Color', 0.6, 4), ('Contrast', 1.0, 8)), (('Color', 0.8, 8), ('Solarize', 0.8, 7)), (('Sharpness', 0.4, 7), ('Invert', 0.6, None)), (('Color', 0.4, 0), ('Equalize', 0.6, None)), (('Equalize', 0.4, None), ('Solarize', 0.2, 4)), (('Solarize', 0.6, 5), ('AutoContrast', 0.6, None)), (('Invert', 0.6, None), ('Equalize', 1.0, None)), (('Color', 0.6, 4), ('Contrast', 1.0, 8)), (('Equalize', 0.8, None), ('Equalize', 0.6, None))]
elif (policy == AutoAugmentPolicy.CIFAR10):
return [(('Invert', 0.1, None), ('Contrast', 0.2, 6)), (('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)), (('AutoContrast', 0.5, None), ('Equalize', 0.9, None)), (('Color', 0.4, 3), ('Brightness', 0.6, 7)), (('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)), (('Equalize', 0.6, None), ('Equalize', 0.5, None)), (('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)), (('Color', 0.7, 7), ('TranslateX', 0.5, 8)), (('Equalize', 0.3, None), ('AutoContrast', 0.4, None)), (('Brightness', 0.9, 6), ('Color', 0.2, 8)), (('Solarize', 0.5, 2), ('Invert', 0.0, None)), (('Equalize', 0.2, None), ('AutoContrast', 0.6, None)), (('Equalize', 0.2, None), ('Equalize', 0.6, None)), (('Color', 0.9, 9), ('Equalize', 0.6, None)), (('AutoContrast', 0.8, None), ('Solarize', 0.2, 8)), (('Brightness', 0.1, 3), ('Color', 0.7, 0)), (('Solarize', 0.4, 5), ('AutoContrast', 0.9, None)), (('AutoContrast', 0.9, None), ('Solarize', 0.8, 3)), (('Equalize', 0.8, None), ('Invert', 0.1, None))]
elif (policy == AutoAugmentPolicy.SVHN):
return [(('Equalize', 0.6, None), ('Solarize', 0.6, 6)), (('Invert', 0.9, None), ('Equalize', 0.6, None)), (('Equalize', 0.6, None), ('Rotate', 0.9, 3)), (('Invert', 0.9, None), ('AutoContrast', 0.8, None)), (('Equalize', 0.6, None), ('Rotate', 0.9, 3)), (('Equalize', 0.9, None), ('TranslateY', 0.6, 6)), (('Invert', 0.9, None), ('Equalize', 0.6, None)), (('Contrast', 0.3, 3), ('Rotate', 0.8, 4)), (('Invert', 0.8, None), ('TranslateY', 0.0, 2)), (('ShearY', 0.7, 6), ('Solarize', 0.4, 8)), (('Invert', 0.6, None), ('Rotate', 0.8, 4)), (('Solarize', 0.7, 2), ('TranslateY', 0.6, 7))]
else:
raise ValueError('The provided policy {} is not recognized.'.format(policy))
def _augmentation_space(self, num_bins: int, image_size: List[int]) -> Dict[(str, Tuple[(Tensor, bool)])]:
return {'Brightness': (torch.linspace(0.0, 0.9, num_bins), True), 'Color': (torch.linspace(0.0, 0.9, num_bins), True), 'Contrast': (torch.linspace(0.0, 0.9, num_bins), True), 'Sharpness': (torch.linspace(0.0, 0.9, num_bins), True), 'Posterize': ((8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int()), False), 'Solarize': (torch.linspace(255.0, 0.0, num_bins), False), 'AutoContrast': (torch.tensor(0.0), False), 'Equalize': (torch.tensor(0.0), False), 'Invert': (torch.tensor(0.0), False)}
@staticmethod
def get_params(transform_num: int) -> Tuple[(int, Tensor, Tensor)]:
'Get parameters for autoaugment transformation\n\n Returns:\n params required by the autoaugment transformation\n '
policy_id = int(torch.randint(transform_num, (1,)).item())
probs = torch.rand((2,))
signs = torch.randint(2, (2,))
return (policy_id, probs, signs)
def forward(self, img: Tensor) -> Tensor:
'\n img (PIL Image or Tensor): Image to be transformed.\n\n Returns:\n PIL Image or Tensor: AutoAugmented image.\n '
fill = self.fill
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = ([float(fill)] * F.get_image_num_channels(img))
elif (fill is not None):
fill = [float(f) for f in fill]
(transform_id, probs, signs) = self.get_params(len(self.policies))
for (i, (op_name, p, magnitude_id)) in enumerate(self.policies[transform_id]):
if (probs[i] <= p):
op_meta = self._augmentation_space(10, F.get_image_size(img))
(magnitudes, signed) = op_meta[op_name]
magnitude = (float(magnitudes[magnitude_id].item()) if (magnitude_id is not None) else 0.0)
if (signed and (signs[i] == 0)):
magnitude *= (- 1.0)
img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill)
return img
def __repr__(self) -> str:
return (self.__class__.__name__ + '(policy={}, fill={})'.format(self.policy, self.fill))
|
class RandAugment(torch.nn.Module):
'RandAugment data augmentation method based on\n `"RandAugment: Practical automated data augmentation with a reduced search space"\n <https://arxiv.org/abs/1909.13719>`_.\n If the image is torch Tensor, it should be of type torch.uint8, and it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode "L" or "RGB".\n\n Args:\n num_ops (int): Number of augmentation transformations to apply sequentially.\n magnitude (int): Magnitude for all the transformations.\n num_magnitude_bins (int): The number of different magnitude values.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n fill (sequence or number, optional): Pixel fill value for the area outside the transformed\n image. If given a number, the value is used for all bands respectively.\n '
def __init__(self, num_ops: int=2, magnitude: int=9, num_magnitude_bins: int=31, interpolation: InterpolationMode=InterpolationMode.NEAREST, fill: Optional[List[float]]=None) -> None:
super().__init__()
self.num_ops = num_ops
self.magnitude = magnitude
self.num_magnitude_bins = num_magnitude_bins
self.interpolation = interpolation
self.fill = fill
def _augmentation_space(self, num_bins: int, image_size: List[int]) -> Dict[(str, Tuple[(Tensor, bool)])]:
return {'Identity': (torch.tensor(0.0), False), 'Brightness': (torch.linspace(0.0, 0.9, num_bins), True), 'Color': (torch.linspace(0.0, 0.9, num_bins), True), 'Contrast': (torch.linspace(0.0, 0.9, num_bins), True), 'Sharpness': (torch.linspace(0.0, 0.9, num_bins), True), 'Posterize': ((8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int()), False), 'Solarize': (torch.linspace(255.0, 0.0, num_bins), False), 'AutoContrast': (torch.tensor(0.0), False), 'Equalize': (torch.tensor(0.0), False)}
def forward(self, img: Tensor) -> Tensor:
'\n img (PIL Image or Tensor): Image to be transformed.\n\n Returns:\n PIL Image or Tensor: Transformed image.\n '
fill = self.fill
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = ([float(fill)] * F.get_image_num_channels(img))
elif (fill is not None):
fill = [float(f) for f in fill]
for _ in range(self.num_ops):
op_meta = self._augmentation_space(self.num_magnitude_bins, F.get_image_size(img))
op_index = int(torch.randint(len(op_meta), (1,)).item())
op_name = list(op_meta.keys())[op_index]
(magnitudes, signed) = op_meta[op_name]
magnitude = (float(magnitudes[self.magnitude].item()) if (magnitudes.ndim > 0) else 0.0)
if (signed and torch.randint(2, (1,))):
magnitude *= (- 1.0)
img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill)
return img
def __repr__(self) -> str:
s = (self.__class__.__name__ + '(')
s += 'num_ops={num_ops}'
s += ', magnitude={magnitude}'
s += ', num_magnitude_bins={num_magnitude_bins}'
s += ', interpolation={interpolation}'
s += ', fill={fill}'
s += ')'
return s.format(**self.__dict__)
|
class TrivialAugmentWide(torch.nn.Module):
'Dataset-independent data-augmentation with TrivialAugment Wide, as described in\n `"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" <https://arxiv.org/abs/2103.10158>`.\n If the image is torch Tensor, it should be of type torch.uint8, and it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode "L" or "RGB".\n\n Args:\n num_magnitude_bins (int): The number of different magnitude values.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n fill (sequence or number, optional): Pixel fill value for the area outside the transformed\n image. If given a number, the value is used for all bands respectively.\n '
def __init__(self, num_magnitude_bins: int=31, interpolation: InterpolationMode=InterpolationMode.NEAREST, fill: Optional[List[float]]=None) -> None:
super().__init__()
self.num_magnitude_bins = num_magnitude_bins
self.interpolation = interpolation
self.fill = fill
def _augmentation_space(self, num_bins: int) -> Dict[(str, Tuple[(Tensor, bool)])]:
return {'Brightness': (torch.linspace(0.0, 0.99, num_bins), True), 'Color': (torch.linspace(0.0, 0.99, num_bins), True), 'Contrast': (torch.linspace(0.0, 0.99, num_bins), True), 'Sharpness': (torch.linspace(0.0, 0.99, num_bins), True), 'Posterize': ((8 - (torch.arange(num_bins) / ((num_bins - 1) / 6)).round().int()), False), 'Solarize': (torch.linspace(255.0, 0.0, num_bins), False), 'AutoContrast': (torch.tensor(0.0), False), 'Equalize': (torch.tensor(0.0), False)}
def forward(self, img: Tensor) -> Tensor:
'\n img (PIL Image or Tensor): Image to be transformed.\n\n Returns:\n PIL Image or Tensor: Transformed image.\n '
fill = self.fill
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = ([float(fill)] * F.get_image_num_channels(img))
elif (fill is not None):
fill = [float(f) for f in fill]
op_meta = self._augmentation_space(self.num_magnitude_bins)
op_index = int(torch.randint(len(op_meta), (1,)).item())
op_name = list(op_meta.keys())[op_index]
(magnitudes, signed) = op_meta[op_name]
magnitude = (float(magnitudes[torch.randint(len(magnitudes), (1,), dtype=torch.long)].item()) if (magnitudes.ndim > 0) else 0.0)
if (signed and torch.randint(2, (1,))):
magnitude *= (- 1.0)
return _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill)
def __repr__(self) -> str:
s = (self.__class__.__name__ + '(')
s += 'num_magnitude_bins={num_magnitude_bins}'
s += ', interpolation={interpolation}'
s += ', fill={fill}'
s += ')'
return s.format(**self.__dict__)
|
class RichProgressBar(plc.RichProgressBar):
'Progress bar that removes all `grad norms` from display.'
def get_metrics(self, trainer, pl_module):
m = super().get_metrics(trainer, pl_module)
m = {k: v for (k, v) in m.items() if ('grad' not in k)}
return m
|
class TQDMProgressBar(plc.TQDMProgressBar):
'Progress bar that removes all `grad norms` from display.'
def get_metrics(self, trainer, pl_module):
m = super().get_metrics(trainer, pl_module)
m = {k: v for (k, v) in m.items() if ('grad' not in k)}
return m
|
class TrainingManager(plc.Callback):
'Callback to save a dummy file as an indicator when training has started/finished.'
def __init__(self, ckpt_dir: Path):
super().__init__()
ckpt_dir.mkdir(exist_ok=True, parents=True)
self.fstart = (ckpt_dir / 'training')
if self.fstart.is_file():
raise ValueError(f'Training already in progress! ({self.fstart})')
self.fend = (ckpt_dir / 'finished')
if self.fend.is_file():
raise ValueError(f'Training already finished! ({self.fend})')
signal.signal(signal.SIGTERM, self._on_sigterm)
def _cleanup(self):
print('-> Deleting "training" file...')
if self.fstart.is_file():
self.fstart.unlink()
print('-> Done! Exiting...')
def _on_sigterm(self, signum, frame):
'Signature required by `signal.signal`.'
raise SystemExit
def on_exception(self, trainer, pl_module, exception):
self._cleanup()
def on_fit_start(self, trainer, pl_module):
print('-> Creating "training" file...')
self.fstart.touch()
def on_fit_end(self, trainer, pl_module):
self._cleanup()
print('-> Creating "finished"" file...')
self.fend.touch()
|
class DetectAnomaly(plc.Callback):
'Check for NaN/infinite loss at each core step. Replacement for `detect_anomaly=True`.'
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, unused=0):
if (not (loss := outputs['loss']).isfinite()):
raise ValueError(f'Detected NaN/Infinite loss: "{loss}"')
|
def default_convert(data):
"\n Function that converts each NumPy array element into a :class:`torch.Tensor`. If the input is a `Sequence`,\n `Collection`, or `Mapping`, it tries to convert each element inside to a :class:`torch.Tensor`.\n If the input is not an NumPy array, it is left unchanged.\n This is used as the default function for collation when both `batch_sampler` and\n `batch_size` are NOT defined in :class:`~torch.utils.data.DataLoader`.\n\n The general input type to output type mapping is similar to that\n of :func:`~torch.utils.data.default_collate`. See the description there for more details.\n\n Args:\n data: a single data point to be converted\n\n Examples:\n >>> # Example with `int`\n >>> default_convert(0)\n 0\n >>> # Example with NumPy array\n >>> default_convert(np.array([0, 1]))\n tensor([0, 1])\n >>> # Example with NamedTuple\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> default_convert(Point(0, 0))\n Point(x=0, y=0)\n >>> default_convert(Point(np.array(0), np.array(0)))\n Point(x=tensor(0), y=tensor(0))\n >>> # Example with List\n >>> default_convert([np.array([0, 1]), np.array([2, 3])])\n [tensor([0, 1]), tensor([2, 3])]\n "
elem_type = type(data)
if isinstance(data, torch.Tensor):
return data
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
if ((elem_type.__name__ == 'ndarray') and (np_str_obj_array_pattern.search(data.dtype.str) is not None)):
return data
return torch.as_tensor(data)
elif isinstance(data, collections.abc.Mapping):
try:
return elem_type({key: default_convert(data[key]) for key in data})
except TypeError:
return {key: default_convert(data[key]) for key in data}
elif (isinstance(data, tuple) and hasattr(data, '_fields')):
return elem_type(*(default_convert(d) for d in data))
elif isinstance(data, tuple):
return [default_convert(d) for d in data]
elif (isinstance(data, collections.abc.Sequence) and (not isinstance(data, string_classes))):
try:
return elem_type([default_convert(d) for d in data])
except TypeError:
return [default_convert(d) for d in data]
else:
return data
|
def default_collate(batch):
"\n Function that takes in a batch of data and puts the elements within the batch\n into a tensor with an additional outer dimension - batch size. The exact output type can be\n a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a\n Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.\n This is used as the default function for collation when\n `batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.\n\n Here is the general input type (based on the type of the element within the batch) to output type mapping:\n\n * :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)\n * NumPy Arrays -> :class:`torch.Tensor`\n * `float` -> :class:`torch.Tensor`\n * `int` -> :class:`torch.Tensor`\n * `str` -> `str` (unchanged)\n * `bytes` -> `bytes` (unchanged)\n * `Mapping[K, V_i]` -> `Mapping[K, default_collate([V_1, V_2, ...])]`\n * `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[default_collate([V1_1, V1_2, ...]),\n default_collate([V2_1, V2_2, ...]), ...]`\n * `Sequence[V1_i, V2_i, ...]` -> `Sequence[default_collate([V1_1, V1_2, ...]),\n default_collate([V2_1, V2_2, ...]), ...]`\n\n Args:\n batch: a single batch to be collated\n\n Examples:\n >>> # Example with a batch of `int`s:\n >>> default_collate([0, 1, 2, 3])\n tensor([0, 1, 2, 3])\n >>> # Example with a batch of `str`s:\n >>> default_collate(['a', 'b', 'c'])\n ['a', 'b', 'c']\n >>> # Example with `Map` inside the batch:\n >>> default_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])\n {'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}\n >>> # Example with `NamedTuple` inside the batch:\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> default_collate([Point(0, 0), Point(1, 1)])\n Point(x=tensor([0, 1]), y=tensor([0, 1]))\n >>> # Example with `Tuple` inside the batch:\n >>> default_collate([(0, 1), (2, 3)])\n [tensor([0, 2]), tensor([1, 3])]\n >>> # Example with `List` inside the batch:\n >>> default_collate([[0, 1], [2, 3]])\n [tensor([0, 2]), tensor([1, 3])]\n "
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if (torch.utils.data.get_worker_info() is not None):
numel = sum((x.numel() for x in batch))
storage = elem.storage()._new_shared(numel, device=elem.device)
out = elem.new(storage).resize_(len(batch), *list(elem.size()))
return torch.stack(batch, 0, out=out)
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
if ((elem_type.__name__ == 'ndarray') or (elem_type.__name__ == 'memmap')):
if (np_str_obj_array_pattern.search(elem.dtype.str) is not None):
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return default_collate([torch.as_tensor(b) for b in batch])
elif (elem.shape == ()):
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, (Timer, MultiLevelTimer)):
return batch
elif isinstance(elem, collections.abc.Mapping):
try:
return elem_type({key: default_collate([d[key] for d in batch]) for key in elem})
except TypeError:
return {key: default_collate([d[key] for d in batch]) for key in elem}
elif (isinstance(elem, tuple) and hasattr(elem, '_fields')):
return elem_type(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
it = iter(batch)
elem_size = len(next(it))
if (not all(((len(elem) == elem_size) for elem in it))):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = list(zip(*batch))
if isinstance(elem, tuple):
return [default_collate(samples) for samples in transposed]
else:
try:
return elem_type([default_collate(samples) for samples in transposed])
except TypeError:
return [default_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type))
|
def opt_args_deco(deco: Callable) -> Callable:
'Meta-decorator to make implementing of decorators with optional arguments more intuitive\n\n Recall: Decorators are equivalent to applying functions sequentially\n >>> func = deco(func)\n\n If we want to provide optional arguments, it would be the equivalent of doing:\n >>> func = deco(foo=10)(func)\n I.e. in this case, deco is actually a function that RETURNS a decorator (a.k.a. a decorator factory)\n\n In practice, this is typically implemented with two nested functions as opposed to one.\n Also, the "factory" must always be called, "func = deco()(func)", even if no arguments are provided.\n This is ugly, obfuscated and makes puppies cry. No one wants puppies to cry.\n\n This decorator "hides" one level of nesting by using the \'partial\' function.\n If no optional parameters are provided, we proceed as a regular decorator using the default parameters.\n If any optional kwargs are provided, this returns the decorator that is then applied to the function (this is\n equivalent to the "deco(foo=10)" portion of the second example).\n\n Example (before):\n ```\n def stringify(func=None, *, prefix=\'\', suffix=\'\'):\n if func is None:\n return partial(stringify, prefix=prefix, suffix=suffix)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n out = func(*args, **kwargs)\n return f\'{prefix}{out}{suffix}\'\n return wrapper\n ```\n\n Example (after):\n ```\n @opt_args_deco\n def stringify(func, prefix=\'\', suffix=\'\'):\n @wraps(func)\n def wrapper(*args, **kwargs):\n out = func(*args, **kwargs)\n return f\'{prefix}{out}{suffix}\'\n return wrapper\n ```\n\n :param deco: (Callable) Decorator function with optional parameters to wrap.\n :return: (Callable) If `func` is provided: decorated func, otherwise: decorator to apply to `func`.\n '
@wraps(deco)
def wrapper(f: Optional[Callable]=None, **kwargs) -> Callable:
if (f is None):
return partial(deco, **kwargs)
if (not isinstance(f, (types.FunctionType, types.MethodType))):
raise TypeError(f'Positional argument must be a function or method, got {f} of type {type(f)}')
return deco(f, **kwargs)
return wrapper
|
def delegates(to: Optional[Callable]=None, keep: bool=False):
'From https://www.fast.ai/2019/08/06/delegation/\n Decorator to replace `**kwargs` in signature with params from `to`.\n\n This can be used to decorate either a class\n ```\n @delegates()\n class Child(Parent): ...\n ```\n or a function\n ```\n @delegates(parent)\n def func(a, **kwargs): ...\n ```\n\n :param to: (Callable) Callable containing the params to copy\n :param keep: (bool) If `True`, keep `**kwargs` in the signature.\n :return: (Callable) The decorated class or function with the updated signature.\n '
def wrapper(f: Union[(type, Callable)]) -> Callable:
(to_f, from_f) = ((f.__base__.__init__, f.__init__) if (to is None) else (to, f))
sig = inspect.signature(from_f)
sigd = dict(sig.parameters)
args = sigd.pop('args', None)
if args:
sigd2 = {k: v for (k, v) in inspect.signature(to_f).parameters.items() if ((v.default == inspect.Parameter.empty) and (k not in sigd))}
sigd.update(sigd2)
kwargs = sigd.pop('kwargs', None)
if kwargs:
sigd2 = {k: v for (k, v) in inspect.signature(to_f).parameters.items() if ((v.default != inspect.Parameter.empty) and (k not in sigd))}
sigd.update(sigd2)
if (keep and args):
sigd['args'] = args
if (keep and kwargs):
sigd['kwargs'] = kwargs
from_f.__signature__ = sig.replace(parameters=list(sigd.values()))
return f
return wrapper
|
def map_container(f: Callable) -> Callable:
"Decorator to recursively apply a function to arbitrary nestings of `dict`, `list`, `tuple` & `set`\n\n NOTE: `f` can have an arbitrary signature, but the first arg must be the item we want to apply `f` to.\n\n Example:\n ```\n @map_apply\n def square(n, bias=0):\n return (n ** 2) + bias\n\n x = {'a': [1, 2, 3], 'b': 4, 'c': {1: 5, 2: 6}}\n print(map_apply(x))\n\n ===>\n {'a': [1, 4, 9], 'b': 16, 'c': {1: 25, 2: 36}}\n\n print(map_apply(x, bias=2))\n\n ===>\n {'a': [3, 6, 11], 'b': 18, 'c': {1: 27, 2: 38}}\n ```\n "
@wraps(f)
def wrapper(x: Any, *args, **kwargs) -> Any:
if isinstance(x, dict):
return {k: wrapper(v, *args, **kwargs) for (k, v) in x.items()}
elif isinstance(x, list):
return [wrapper(v, *args, **kwargs) for v in x]
elif isinstance(x, tuple):
return tuple((wrapper(v, *args, **kwargs) for v in x))
elif isinstance(x, set):
return {wrapper(v, *args, **kwargs) for v in x}
else:
return f(x, *args, **kwargs)
return wrapper
|
@opt_args_deco
def retry_new_on_error(__getitem__: Callable, exc: Union[(BaseException, Sequence[BaseException])]=Exception, silent: bool=False, max: Optional[int]=None, use_blacklist: bool=False) -> Callable:
'Decorator to wrap a BaseDataset __getitem__ function, and retry a different index if there is an error.\n\n The idea is to provide a way of ignoring missing/corrupt data without having to blacklist files,\n change number of items and do "hacky" workarounds.\n Obviously, the less data we have, the less sense this decorator makes, since we\'ll start duplicating more\n and more items (although if we\'re augmenting our data, it shouldn\'t be too tragic).\n Obviously as well, for debugging/evaluation it probably makes more sense to disable this decorator.\n\n NOTE: This decorator assumes we follow the BaseDataset format\n - We return three dicts (x, y, meta)\n - Errors are logged in meta[\'errors\']\n - A \'log_timings\' flag indicates the presence of a \'MultiLevelTimer\' in self.timer\n\n :param __getitem__: (Callable) Dataset `__getitem__` method to decorate.\n :param exc: (tuple|Exception) Expected exceptions to catch and retry on.\n :param silent: (bool) If `False`, log error info to `meta`.\n :param max: (None|int) Maximum number of retries for a single item.\n :param use_blacklist: (bool) If `True`, keep a list of items to avoid.\n :return: (tuple[dict]) x, y, meta returned by `__getitem__`.\n '
n = 0
blacklist = set()
exc = (exc or tuple())
if isinstance(exc, list):
exc = tuple(exc)
@wraps(__getitem__)
def wrapper(cls, item):
nonlocal n
try:
(x, y, m) = __getitem__(cls, item)
if ((not silent) and ('errors' not in m)):
m['errors'] = ''
except exc as e:
n += 1
if (max and (n >= max)):
raise RuntimeError('Exceeded max retries when loading dataset item...')
if use_blacklist:
blacklist.add(item)
if cls.log_time:
cls.timer.reset()
new = item
while ((new == item) or (new in blacklist)):
new = random.randrange(len(cls))
(x, y, m) = wrapper(cls, new)
if (not silent):
m['errors'] += f"{(' - ' if m['errors'] else '')}{(item, e)}"
n = 0
return (x, y, m)
return wrapper
|
def readlines(file: PathLike, /, encoding=None) -> list[str]:
'Read file as a list of strings.'
with open(file, encoding=encoding) as f:
return f.read().splitlines()
|
def pil2np(img: Image, /) -> NDArray:
'Convert PIL image [0, 255] into numpy [0, 1].'
return (np.array(img, dtype=np.float32) / 255.0)
|
def np2pil(arr: NDArray, /) -> Image:
'Convert numpy image [0, 1] into PIL [0, 255].'
return Image.fromarray((arr * 255).astype(np.uint8))
|
def write_yaml(file: PathLike, data: dict, mkdir: bool=False, sort_keys: bool=False) -> None:
'Write data to a yaml file.'
file = Path(file).with_suffix('.yaml')
if mkdir:
file.parent.mkdir(parents=True, exist_ok=True)
with open(file, 'w') as f:
yaml.dump(data, f, sort_keys=sort_keys)
|
def load_yaml(file: PathLike) -> dict:
'Load a single yaml file. '
with open(file) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
return data
|
def load_merge_yaml(*files: PathLike) -> dict:
'Load a list of YAML cfg and recursively merge into a single config.\n\n Following dictionary merging rules, the first file is the "base" config, which gets updated by the second file.\n We chain this rule for however many cfg we have, i.e. ((((1 <- 2) <- 3) <- 4) ... <- n)\n\n :param files: (Sequence[PathLike]) List of YAML config files to load, from "oldest" to "newest".\n :return: (dict) The merged config from all given files.\n '
data = [load_yaml(file) for file in files]
old = data.pop(0)
for new in data:
old = _merge_yaml(old, new)
return old
|
def _merge_yaml(old: dict, new: dict) -> dict:
'Recursively merge two YAML cfg.\n Dictionaries are recursively merged. All other types simply update the current value.\n\n NOTE: This means that a "list of dicts" will simply be updated to whatever the new value is,\n not appended to or recursively checked!\n\n :param old: (dict) Base dictionary containing default keys.\n :param new: (dict) New dictionary containing keys to overwrite in `old`.\n :return: (dict) The merge config.\n '
d = old.copy()
for (k, v) in new.items():
d[k] = (_merge_yaml(d[k], v) if ((k in d) and isinstance(v, dict)) else v)
return d
|
class BaseMetric(Metric):
higher_is_better = False
full_state_update = False
'Base class for depth estimation metrics.'
def __init__(self, mode: str='raw', **kwargs):
super().__init__(**kwargs)
assert (mode in MODES)
self.mode: str = mode
self.sf: int = {'raw': 1, 'log': 100, 'inv': 1000}[self.mode]
self.add_state('metric', default=torch.tensor(0.0), dist_reduce_fx='sum')
self.add_state('total', default=torch.tensor(0), dist_reduce_fx='sum')
def _preprocess(self, input, /):
'Convert input into log-depth or disparity.'
if (self.mode == 'raw'):
pass
elif (self.mode == 'log'):
input = input.log()
elif (self.mode == 'inv'):
input = (1 / input.clip(min=0.001))
return input
def _compute(self, pred: Tensor, target: Tensor) -> Tensor:
'Compute an error metric for a single pair.\n\n :param pred: (Tensor) (b, n) Predicted depth.\n :param target: (Tensor) (b, n) Target depth.\n :return: (Tensor) (b,) Computed metric.\n '
raise NotImplementedError
def update(self, pred: Tensor, target: Tensor) -> None:
'Compute an error metric for a whole batch of predictions and update the state.\n\n :param pred: (Tensor) (b, n) Predicted depths masked with NaNs.\n :param target: (Tensor) (b, n) Target depths masked with NaNs.\n :return:\n '
self.metric += (self.sf * self._compute(self._preprocess(pred), self._preprocess(target)).sum())
self.total += pred.shape[0]
def compute(self) -> Tensor:
'Compute the average metric given the current state.'
return (self.metric / self.total)
|
class MAE(BaseMetric):
'Compute the mean absolute error.'
def _compute(self, pred: Tensor, target: Tensor) -> Tensor:
return (pred - target).abs().nanmean(dim=1)
|
class RMSE(BaseMetric):
'Compute the root mean squared error.'
def _compute(self, pred: Tensor, target: Tensor) -> Tensor:
return (pred - target).pow(2).nanmean(dim=1).sqrt()
|
class ScaleInvariant(BaseMetric):
'Compute the scale invariant error.'
def _compute(self, pred: Tensor, target: Tensor) -> Tensor:
err = (pred - target)
return (err.pow(2).nanmean(dim=1) - err.nanmean(dim=1).pow(2)).sqrt()
|
class AbsRel(BaseMetric):
'Compute the absolute relative error.'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.sf = 100
def _compute(self, pred: Tensor, target: Tensor) -> Tensor:
return ((pred - target).abs() / target).nanmean(dim=1)
|
class SqRel(BaseMetric):
'Compute the absolute relative squared error.'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.sf = 100
def _compute(self, pred: Tensor, target: Tensor) -> Tensor:
return ((pred - target).pow(2) / target.pow(2)).nanmean(dim=1)
|
class DeltaAcc(BaseMetric):
higher_is_better = True
'Compute the accuracy for a given error threshold.'
def __init__(self, delta: float, **kwargs):
super().__init__(**kwargs)
assert (self.mode == 'raw'), 'Accuracy should only be computed using raw depths.'
self.delta: float = delta
self.sf = 100
def _compute(self, pred: Tensor, target: Tensor) -> Tensor:
thresh = torch.max((target / pred), (pred / target))
return ((thresh < self.delta).nansum(dim=1) / thresh.nansum(dim=1))
|
class Timer():
"Context manager for timing a block of code.\n\n Attributes:\n :param name: (str) Timer label when printing.\n :param as_ms: (bool) If `True`, store time as `milliseconds`, otherwise `seconds`.\n :param sync_gpu: (bool) If `True`, ensure that GPU is synced on Timer enter and exit.\n :param precision: (int) Number of decimal places to print.\n\n Example:\n ```\n with Timer('MyTimer') as t:\n time.sleep(1)\n elapsed = t.elapsed\n print(t)\n\n ===>\n MyTimer: 1.003 s\n ```\n "
def __init__(self, name: str='Timer', as_ms: bool=False, sync_gpu: bool=False, precision: int=6) -> None:
self.name: str = name
self.as_ms: bool = as_ms
self.sync_gpu: bool = sync_gpu
self.precision: int = precision
self._sf: int = (1000 if self.as_ms else 1)
self._units: str = ('ms' if self.as_ms else 's')
self._sync_fn: Optional[Callable] = None
self._start: Optional[float] = None
self._end: Optional[float] = None
if self.sync_gpu:
import torch
self._sync_fn = torch.cuda.synchronize
def __repr__(self) -> str:
'Convert class constructor into string representation.'
sig = inspect.signature(self.__init__)
kwargs = {key: getattr(self, key) for key in sig.parameters if hasattr(self, key)}
s = ', '.join((f'{k}={v}' for (k, v) in kwargs.items()))
return f'{self.__class__.__qualname__}({s})'
def __str__(self) -> str:
'Convert into string representation.'
return f'{self.name}: {self.elapsed} {self._units}'
def __enter__(self) -> 'Timer':
'Start timer and sync GPU.'
if self.sync_gpu:
self._sync_fn()
self._start = time.perf_counter()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
'End timer and sync GPU.'
if self.sync_gpu:
self._sync_fn()
self._end = time.perf_counter()
@property
def elapsed(self) -> float:
'Time taken between enter and exit.'
assert self._start, '`Timer` has not begun'
assert self._end, '`Timer` has not finished'
time_taken = (self._sf * (self._end - self._start))
return round(time_taken, self.precision)
|
class MultiLevelTimer():
"Context manager Timer capable of being nested across multiple levels.\n\n NOTE: We use the *instance* of this class as a context manager, not the class itself (see examples).\n\n Timers are stored as a dict, mapping labels to (depth, start, end, elapsed).\n In order to allow for the nesting of these timers, we keep track of what timers are active (effectively, a stack).\n On __exit__ we pop the most recent label and end that timer.\n\n Attributes:\n :param name: (str) Global Timer name.\n :param as_ms: (bool) If `True`, store time as `'milliseconds`', otherwise `seconds`.\n :param sync_gpu: (bool) If `True`, ensure that GPU is synced on Timer enter and exit.\n :param precision: (int) Number of decimal places to print.\n\n Examples:\n ```\n timer = MultiLevelTimer(name='MyTimer', as_ms=True, precision=4)\n\n with timer('OuterLevel'):\n time.sleep(2)\n with timer('InnerLevel'):\n time.sleep(1)\n\n print(timer)\n\n ==>\n MyTimer\n OuterLevel: 3002.3414 ms\n InnerLevel: 1000.7601 ms\n ```\n\n Levels can also be named automatically\n ```\n timer = MultiLevelTimer(name='MyTimer')\n\n with timer:\n time.sleep(2)\n\n print(timer)\n\n ==>\n MyTimer\n Level1: 2.002093 s\n ```\n "
def __init__(self, name: str='Timer', as_ms: bool=False, sync_gpu: bool=False, precision: int=6) -> None:
self.name: str = name
self.as_ms: bool = as_ms
self.sync_gpu: bool = sync_gpu
self.precision: int = precision
self.depth: int = 0
self._sf: int = (1000 if self.as_ms else 1)
self._units: str = ('ms' if self.as_ms else 's')
self._sync_fn: Optional[Callable] = None
self._label: Optional[str] = None
self._active: list[str] = []
self._data: dict[(str, TimerData)] = {}
if self.sync_gpu:
import torch
self._sync_fn = torch.cuda.synchronize
def __repr__(self) -> str:
'Convert class constructor into string representation.'
sig = inspect.signature(self.__init__)
kwargs = {key: getattr(self, key) for key in sig.parameters if hasattr(self, key)}
s = ', '.join((f'{k}={v}' for (k, v) in kwargs.items()))
return f'{self.__class__.__qualname__}({s})'
def __str__(self) -> str:
'Convert into string representation.'
s = [self.name]
s += [(('\t' * v['depth']) + f"{k}: {v['elapsed']} {self._units}") for (k, v) in self]
return '\n'.join(s)
def __getitem__(self, label: str) -> TimerData:
'Return timer info for the given label.'
return self._data[label]
def __iter__(self) -> Generator[(tuple[(str, TimerData)], None, None)]:
'Iterate through all timers as (`label`, `timer`)'
for k in self._data:
(yield (k, self[k]))
def __call__(self, label: str) -> 'MultiLevelTimer':
'Required to call a `Timer` instance in a context manager and create a new label.'
self._label = label
return self
def __enter__(self) -> 'MultiLevelTimer':
'Context manager entry point.'
self.depth += 1
(label, self._label) = (self._label, None)
label = (label or f'Level{self.depth}')
if (label in self._data):
raise KeyError(f'Duplicate Timer key: {label}')
if self.sync_gpu:
self._sync_fn()
self._active.append(label)
self._data[label] = {'depth': self.depth, 'start': time.perf_counter(), 'end': None, 'elapsed': None}
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
'Context manager exit point.'
assert self._active, 'What are you doing here??'
label = self._active.pop()
timer = self._data[label]
if self.sync_gpu:
self._sync_fn()
timer['end'] = time.perf_counter()
timer['elapsed'] = round((self._sf * (timer['end'] - timer['start'])), self.precision)
self.depth -= 1
def reset(self) -> None:
'Delete all existing `Timer` data.'
if self._active:
raise RuntimeError(f'Attempt to reset Timer while active: {self._active}')
self._data = {}
def copy(self) -> 'MultiLevelTimer':
'Return a deep copy of the timer.'
return copy.deepcopy(self)
def to_dict(self, key: str='elapsed') -> dict:
'Return a dict containing only the data for the specified key.'
return {label: data[key] for (label, data) in self}
@staticmethod
def mean_elapsed(timers: Sequence['MultiLevelTimer']) -> Union[(Sequence, dict[(str, float)])]:
'Return the average elapsed time for each label in a list of timers.'
if (not timers):
return timers
data = {}
for t in timers:
for (k, v) in t:
if (k in data):
data[k].append(v['elapsed'])
else:
data[k] = [v['elapsed']]
data = {k: (sum(v) / len(v)) for (k, v) in data.items()}
return data
|
def _get_items(split, mode):
file = kr.get_split_file(split, mode)
if ((split == 'benchmark') and (mode == 'test')):
return []
side2cam = {'l': 'image_02', 'r': 'image_03'}
lines = [line.split() for line in io.readlines(file)]
items = [{'seq': line[0], 'cam': side2cam[line[2]], 'stem': int(line[1])} for line in lines]
return items
|
class TestKitti():
def test_image_files(self):
(splits, seqs) = ([], [])
for s in SPLITS:
for m in MODES:
for i in _get_items(s, m):
f = kr.get_image_file(i['seq'], i['cam'], i['stem'])
if (not f.is_file()):
seqs.append(i['seq'])
splits.append(f'{s} {m}')
assert (not seqs), f'Missing image files in sequences. {set(splits)} {set(seqs)}'
def test_velo_files(self):
(splits, seqs) = ([], [])
for s in SPLITS:
for m in MODES:
for i in _get_items(s, m):
f = kr.get_velodyne_file(i['seq'], i['stem'])
if (not f.is_file()):
seqs.append(i['seq'])
splits.append(f'{s} {m}')
assert (not seqs), f'Missing velodyne files in sequences. {set(splits)} {set(seqs)}'
def test_hints_files(self):
(splits, seqs) = ([], [])
for s in SPLITS:
for m in ['train', 'val']:
for i in _get_items(s, m):
f = kr.get_hint_file(i['seq'], i['cam'], i['stem'])
if (not f.is_file()):
seqs.append(i['seq'])
splits.append(f'{s} {m}')
assert (not seqs), f'Missing depth hints files in sequences. {set(splits)} {set(seqs)}'
def test_benchmark_files(self):
(splits, seqs) = ([], [])
for s in ['benchmark', 'eigen_benchmark']:
for m in MODES:
for i in _get_items(s, m):
f = kr.get_depth_file(i['seq'], i['cam'], i['stem'])
if (not f.is_file()):
seqs.append(i['seq'])
splits.append(f'{s} {m}')
assert (not seqs), f'Missing benchmark files in sequences. {set(splits)} {set(seqs)}'
|
def _get_items(split, mode):
file = kr.get_split_file(split, mode)
if ((split == 'benchmark') and (mode == 'test')):
return []
side2cam = {'l': 'image_02', 'r': 'image_03'}
lines = [line.split() for line in io.readlines(file)]
items = [{'seq': line[0].split('/')[0], 'drive': line[0].split('/')[1], 'cam': side2cam[line[2]], 'stem': int(line[1])} for line in lines]
return items
|
class TestKitti():
def test_image_files(self):
(splits, seqs) = ([], [])
for s in SPLITS:
for m in MODES:
for i in _get_items(s, m):
f = kr.get_images_path(i['seq'], i['drive'], i['cam'])
if (not f.is_dir()):
seqs.append(((i['seq'] + '/') + i['drive']))
splits.append(f'{s} {m}')
assert (not seqs), f'Missing image files in sequences. {set(splits)} {set(seqs)}'
def test_velo_files(self):
(splits, seqs) = ([], [])
for s in SPLITS:
for m in MODES:
for i in _get_items(s, m):
f = kr.get_velos_path(i['seq'], i['drive'])
if (not f.is_dir()):
seqs.append(((i['seq'] + '/') + i['drive']))
splits.append(f'{s} {m}')
assert (not seqs), f'Missing velodyne files in sequences. {set(splits)} {set(seqs)}'
def test_hints_files(self):
(splits, seqs) = ([], [])
for s in SPLITS:
for m in ['train', 'val']:
for i in _get_items(s, m):
f = kr.get_hints_path(i['seq'], i['drive'], i['cam'])
if (not f.is_dir()):
seqs.append(((i['seq'] + '/') + i['drive']))
splits.append(f'{s} {m}')
assert (not seqs), f'Missing depth hints files in sequences. {set(splits)} {set(seqs)}'
def test_benchmark_files(self):
(splits, seqs) = ([], [])
for s in ['benchmark', 'eigen_benchmark']:
for m in MODES:
for i in _get_items(s, m):
f = kr.get_depths_path(i['seq'], i['drive'], i['cam'])
if (not f.is_dir()):
seqs.append(((i['seq'] + '/') + i['drive']))
splits.append(f'{s} {m}')
assert (not seqs), f'Missing benchmark files in sequences. {set(splits)} {set(seqs)}'
|
def _get_items(mode):
return syp.load_split(mode)[1]
|
class TestKitti():
def test_image_files(self):
(modes, scenes) = ([], [])
for m in MODES:
for i in _get_items(m):
f = syp.get_image_file(*i)
if (not f.is_file()):
scenes.append(i[0])
modes.append(f'{m}')
assert (not scenes), f'Missing image files in sequences. {set(modes)} {set(scenes)}'
def test_depth_files(self):
(modes, scenes) = ([], [])
for m in {'val'}:
for i in _get_items(m):
f = syp.get_depth_file(*i)
if (not f.is_file()):
scenes.append(i[0])
modes.append(f'{m}')
assert (not scenes), f'Missing depth files in sequences. {set(modes)} {set(scenes)}'
def test_edges_files(self):
(modes, scenes) = ([], [])
for m in {'val'}:
for i in _get_items(m):
f = syp.get_edges_file(i[0], 'edges', i[1])
if (not f.is_file()):
scenes.append(i[0])
modes.append(f'{m}')
assert (not scenes), f'Missing edges files in sequences. {set(modes)} {set(scenes)}'
|
class TmpData(BaseDataset):
def __init__(self, n, **kwargs):
self.n = range(n)
super().__init__(**kwargs)
def __len__(self):
return len(self.n)
def load(self, item, x, y, meta):
x['item'] = self.n[item]
return (x, y, meta)
def augment(self, x, y, meta):
x['item'] *= 100
meta['augs'] = 'helloworld'
return (x, y, meta)
def show(self, x, y, meta, axs=None):
...
|
class TestBaseDataset():
def test_base(self):
'Test that we have the expected functions.'
with pytest.raises(TypeError):
_ = BaseDataset()
assert hasattr(BaseDataset, '__repr__'), 'Missing attribute from base dataset.'
assert hasattr(BaseDataset, '__len__'), 'Missing attribute from base dataset.'
assert hasattr(BaseDataset, '__getitem__'), 'Missing attribute from base dataset.'
assert hasattr(BaseDataset, 'load'), 'Missing attribute from base dataset.'
assert hasattr(BaseDataset, 'collate_fn'), 'Missing attribute from base dataset.'
assert hasattr(BaseDataset, 'augment'), 'Missing attribute from base dataset.'
assert hasattr(BaseDataset, 'to_torch'), 'Missing attribute from base dataset.'
assert hasattr(BaseDataset, 'create_axs'), 'Missing attribute from base dataset.'
assert hasattr(BaseDataset, 'show'), 'Missing attribute from base dataset.'
assert hasattr(BaseDataset, 'play'), 'Missing attribute from base dataset.'
dataset = TmpData(10)
assert hasattr(dataset, 'logger'), 'Missing logger in dataset.'
assert (dataset.logger.name == 'BaseDataset.TmpData'), 'Incorrect logger name.'
def test_timer(self):
'Test that timing can be enabled/disabled.'
dataset = TmpData(10, log_time=True)
(x, y, meta) = dataset[0]
assert isinstance(dataset.timer, MultiLevelTimer), 'Incorrect timer class.'
assert ('data_timer' in meta), 'Missing timing information in meta.'
dataset = TmpData(10, log_time=False)
(x, y, meta) = dataset[0]
assert (not isinstance(dataset.timer, MultiLevelTimer)), 'Incorrect timer class.'
assert ('data_timer' not in meta), 'Unexpected timing information in meta.'
def test_augment(self):
'Test that augmentations can be enabled/disabled.'
dataset = TmpData(10, use_aug=True)
(x, y, meta) = dataset[5]
assert (x['item'] == 500), 'Augmentation not correctly applied.'
assert ('augs' in meta), 'Missing augmentations information in meta.'
assert (meta['augs'] == 'helloworld'), 'Augmentation not correctly applied.'
dataset = TmpData(10, use_aug=False)
(x, y, meta) = dataset[5]
assert (x['item'] == 5), 'Unexpected augmentation applied.'
assert ('augs' not in meta), 'Unexpected augmentation information in meta.'
(x2, y2, meta2) = BaseDataset.augment(dataset, x, y, meta)
assert (x2 == x), 'Incorrect default augmentation'
assert (y2 == y), 'Incorrect default augmentation'
assert (meta2 == meta), 'Incorrect default augmentation'
def test_as_torch(self):
'Test that conversion to torch can be enabled/disabled.'
dataset = TmpData(10, as_torch=True)
(x, y, meta) = dataset[0]
assert isinstance(x['item'], torch.Tensor), 'Incorrect conversion to torch.'
assert isinstance(meta['items'], str), 'Unexpected meta conversion to torch.'
dataset = TmpData(10, as_torch=False)
(x, y, meta) = dataset[0]
assert isinstance(x['item'], int), 'Unexpected conversion to torch.'
assert isinstance(meta['items'], str), 'Unexpected meta conversion to torch.'
def test_retry(self):
'Test that exception retrying can be enabled/disabled.'
class TmpData2(TmpData):
def load(self, item, x, y, meta):
if ((item % 2) == 0):
raise ValueError
return super().load(item, x, y, meta)
with pytest.raises(ValueError):
_ = TmpData2(10)[2]
class TmpData3(TmpData2, retry_exc=Exception):
pass
(x, y, meta) = TmpData3(10)[2]
assert (x['item'] != 2), 'Error retrying all exceptions.'
class TmpData3(TmpData2, retry_exc=ValueError):
pass
(x, y, meta) = TmpData3(10)[2]
assert (x['item'] != 2), 'Error retrying on a specific exception.'
@pytest.mark.skip(reason='Creates multiple windows on PyCharm')
def test_play(self):
'Test dataset playing iterates correctly and sets window sizes.'
dataset = TmpData(5, as_torch=True)
with pytest.raises(ValueError):
dataset.play()
dataset = TmpData(5, as_torch=False)
dataset.show = mock.Mock()
dataset.play()
assert (dataset.show.call_count == 5), 'Incorrect number of calls to show.'
plt.close()
@pytest.mark.skip(reason='Images are same size on PyCharm')
def test_fullscreen(self):
TmpData(5, as_torch=False).play()
size1 = plt.get_current_fig_manager().canvas.get_width_height()
plt.close()
TmpData(5, as_torch=False).play(fullscreen=True)
size2 = plt.get_current_fig_manager().canvas.get_width_height()
plt.close()
assert (size1 != size2), 'Error setting figure to fullscreen.'
def test_dataset_collate(self):
'Test that we can collate data by default correctly.'
class TmpData2(TmpData, retry_exc=ValueError):
def load(self, item, x, y, meta):
if ((item % 2) == 0):
raise ValueError
return super().load(item, x, y, meta)
def augment(self, x, y, meta):
return (super().augment(x, y, meta) if (torch.rand(1) < 0.5) else (x, y, meta))
dataset = TmpData2(10, as_torch=True, use_aug=True)
batch_size = 4
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=4, collate_fn=dataset.collate_fn)
(x, y, meta) = next(iter(loader))
assert isinstance(x['item'], torch.Tensor), 'Incorrect conversion to torch.'
assert (x['item'].shape == (batch_size,)), 'Incorrect item batch size.'
assert (x['item'][0] != 0), 'Incorrect retry on error.'
assert ('items' in meta), 'Missing items in meta.'
assert (len(meta['items']) == batch_size), 'Incorrect items batch size.'
assert isinstance(meta['items'][0], str), 'Incorrect items type.'
assert ('data_timer' in meta), 'Missing data_timer in meta.'
assert (len(meta['data_timer']) == batch_size), 'Incorrect data_timer batch size.'
assert isinstance(meta['data_timer'][0], MultiLevelTimer), 'Incorrect items type.'
assert ('errors' in meta), 'Missing errors in meta.'
assert (len(meta['errors']) == batch_size), 'Incorrect errors batch size.'
assert isinstance(meta['errors'][0], str), 'Incorrect errors type.'
assert ('augs' in meta), 'Missing augmentations in meta.'
assert (len(meta['augs']) == batch_size), 'Incorrect augmentations batch size.'
assert isinstance(meta['augs'][0], str), 'Incorrect augmentations type.'
|
def test_all():
'Check all expected symbols are imported.'
items = {'register', 'NET_REG', 'DATA_REG', 'LOSS_REG', 'SCHED_REG'}
assert (set(registry.__all__) == items), 'Incorrect keys in `__all__`.'
|
def test_sched():
'Check scheduler registry has all expected keys.'
keys = {'steplr', 'exp', 'cos', 'cos_warm', 'plateau'}
assert (set(SCHED_REG.keys()) == keys), f'Incorrect SCHEDULER keys.'
|
def test_add_net():
'Check adding to the network registry.'
(name, type) = ('test', 'net')
@register(name, type)
class Test():
...
assert (name in NET_REG), 'Missing item from NETWORK registry.'
NET_REG.pop(name)
|
def test_add_loss():
'Check adding to the loss registry.'
(name, type) = ('test', 'loss')
@register(name, type)
class Test():
...
assert (name in LOSS_REG), 'Missing item from LOSS registry.'
LOSS_REG.pop(name)
|
def test_add_dataset():
'Check adding to the dataset registry.'
(name, type) = ('test', 'data')
@register(name, type)
class Test():
...
assert (name in DATA_REG), 'Missing item from DATASET registry.'
DATA_REG.pop(name)
|
def test_add_auto():
'Check automatic adding based on class name.'
name = 'test'
@register(name)
class TestNet():
...
assert (name in NET_REG), 'Missing item from automatic NET registry.'
NET_REG.pop(name)
@register(name)
class TestLoss():
...
assert (name in LOSS_REG), 'Missing item from automatic LOSS registry.'
LOSS_REG.pop(name)
@register((name + '2'))
class TestReg():
...
assert ((name + '2') in LOSS_REG), 'Missing item from automatic LOSS registry.'
LOSS_REG.pop((name + '2'))
@register(name)
class TestDataset():
...
assert (name in DATA_REG), 'Missing item from automatic DATA registry.'
DATA_REG.pop(name)
with pytest.raises(ValueError):
@register(name)
class Test():
...
|
def test_add_multiple():
name = ('test1', 'test2')
@register(name, type='net')
class TestNet():
...
for n in name:
assert (n in NET_REG), 'Missing item from automatic NET registry.'
[NET_REG.pop(n) for n in name]
|
def test_register_types():
'Check raised exception when adding to unknown registry.'
with pytest.raises(TypeError):
@register(name='temp', type='foo')
class Test():
...
|
def test_register_duplicates():
'Check raised exception when registering a duplicate name.'
(name, type) = ('test', 'net')
@register(name, type)
class Test():
...
with pytest.raises(ValueError):
@register(name, type)
class Test2():
...
with pytest.raises(ValueError):
@register(name, type, overwrite=False)
class Test3():
...
with pytest.raises(ValueError):
@register(('asdf', name), type, overwrite=False)
class Test3():
...
with pytest.raises(ValueError):
@register((name, 'asdf'), type, overwrite=False)
class Test3():
...
NET_REG.pop(name)
|
def test_register_overwrite():
'Check registry items can be overwritten if desired.'
(name, type) = ('test', 'net')
@register(name, type)
class Test():
...
assert (NET_REG[name] == Test), 'Unexpected base class when overwriting.'
@register(name, type, overwrite=True)
class Test2():
...
assert (NET_REG[name] == Test2), 'Unexpected overwritten class.'
NET_REG.pop(name)
|
def test_ignore_main():
'Check classes created in `__main__` are ignored.'
from unittest.mock import Mock
(name, type) = ('test', 'loss')
Mock.__module__ = '__main__'
with pytest.warns(UserWarning):
_ = register(name, type)(Mock)
assert (name not in LOSS_REG), 'Class from `__main__` not ignored.'
|
def test_all():
'Check all expected symbols are imported.'
items = {'MaskReg'}
assert (set(mask.__all__) == items), 'Incorrect keys in `__all__`.'
|
def test_registry():
'Check MaskReg is added to LOSS_REGISTRY.'
assert ('disp_mask' in LOSS_REG), 'Missing key from loss registry.'
assert (LOSS_REG['disp_mask'] == MaskReg), 'Incorrect class in loss registry.'
|
def test_mask():
'Test MaskReg when all values are 1.'
shape = (1, 1, 100, 200)
(loss, loss_dict) = MaskReg().forward(torch.ones(shape))
assert (loss == 0), 'Error in correct BCELoss.'
assert (not loss_dict), 'Unexpected keys in `loss_dict`.'
(loss, loss_dict) = MaskReg().forward(torch.zeros(shape))
assert (loss == 100.0), 'Error in incorrect BCELoss.'
|
def test_all():
'Check all expected symbols are imported.'
items = {'OccReg'}
assert (set(occlusion.__all__) == items), 'Incorrect keys in `__all__`.'
|
def test_registry():
'Check OcclusionReg is added to LOSS_REGISTRY.'
assert ('disp_occ' in LOSS_REG), 'Missing key from loss registry.'
assert (LOSS_REG['disp_occ'] == OccReg), 'Incorrect class in loss registry.'
|
def test_occlusion_ones():
'Test OcclusionReg when all values are 1.'
shape = (1, 1, 100, 200)
input = torch.ones(shape)
(loss, loss_dict) = OccReg(invert=False).forward(input)
assert (loss == 1.0), 'Error in `invert=False`.'
assert (not loss_dict), 'Unexpected keys in `loss_dict`.'
(loss, loss_dict) = OccReg(invert=True).forward(input)
assert (loss == (- 1.0)), 'Error in `invert=True`.'
(loss, loss_dict) = OccReg().forward(input)
assert (loss == 1.0), 'Error in default `invert`. Expected `False`.'
|
def test_occlusion_rand():
'Test OcclusionReg with a random tensor.'
shape = (1, 1, 100, 200)
input = torch.rand(shape)
mean = input.mean()
(loss, loss_dict) = OccReg(invert=False).forward(input)
assert (loss == mean), 'Error in `invert=False`.'
assert (not loss_dict), 'Unexpected keys in `loss_dict`.'
(loss, loss_dict) = OccReg(invert=True).forward(input)
assert (loss == (- mean)), 'Error in `invert=True`.'
(loss, loss_dict) = OccReg().forward(input)
assert (loss == mean), 'Error in default `invert`. Expected `False`.'
|
def test_all():
'Check all expected symbols are imported.'
items = {'SmoothReg', 'FeatPeakReg', 'FeatSmoothReg'}
assert (set(smooth.__all__) == items), 'Incorrect keys in `__all__`.'
|
def test_registry():
'Check smoothness regularizations are added to LOSS_REGISTRY.'
assert ('disp_smooth' in LOSS_REG), 'Missing key from loss registry.'
assert (LOSS_REG['disp_smooth'] == SmoothReg), 'Incorrect class in loss registry.'
assert ('feat_peaky' in LOSS_REG), 'Missing key from loss registry.'
assert (LOSS_REG['feat_peaky'] == FeatPeakReg), 'Incorrect class in loss registry.'
assert ('feat_smooth' in LOSS_REG), 'Missing key from loss registry.'
assert (LOSS_REG['feat_smooth'] == FeatSmoothReg), 'Incorrect class in loss registry.'
|
def test_all():
'Check all expected symbols are imported.'
items = {'rgb_from_disp', 'rgb_from_feat'}
assert (set(viz.__all__) == items), 'Incorrect keys in `__all__`.'
|
class TestRGBfromDisp():
def test_default(self):
x = torch.rand(2, 1, 10, 20)
out = rgb_from_disp(x)
out2 = rgb_from_disp(x, cmap='turbo', vmin=0, vmax=[np.percentile(x[0], 95), np.percentile(x[1], 95)])
assert np.allclose(out, out2), 'Incorrect default params.'
def test_range(self):
'Test disparity conversion with custom normalization ranges.'
arr = np.array([[0, 0, 0.5, 0.5, 1, 1]])
out = rgb_from_disp(arr).squeeze()
out2 = rgb_from_disp(arr, vmin=0.5, vmax=1).squeeze()
assert np.allclose(out2[2], out2[3]), 'Incorrect sanity check for same value.'
assert (not np.allclose(out2[3], out2[4])), 'Incorrect sanity check for different value.'
assert np.allclose(out2[0], out2[2]), 'Incorrect clipping to min value.'
assert np.allclose(out[0], out2[0]), 'Inconsistent min value.'
assert (not np.allclose(out2[2], out[2])), 'Incorrect clipping to min value.'
out3 = rgb_from_disp(arr, vmin=0, vmax=0.5).squeeze()
assert np.allclose(out3[2], out3[3]), 'Incorrect sanity check for same value.'
assert (not np.allclose(out3[2], out3[0])), 'Incorrect sanity check for different value.'
assert np.allclose(out3[2], out3[4]), 'Incorrect clipping to max value.'
assert np.allclose(out[5], out3[5]), 'Inconsistent max value.'
assert (not np.allclose(out3[2], out[2])), 'Incorrect clipping to max value.'
def test_inv(self):
x = torch.rand(2, 1, 10, 20)
x_inv = (1 / x)
out = rgb_from_disp(x, invert=True)
out2 = rgb_from_disp(x_inv, invert=False)
assert np.allclose(out, out2), 'Incorrect inversion.'
def test_shape(self):
x = torch.rand(1, 1, 10, 20)
out = rgb_from_disp(x)
out2 = rgb_from_disp(x.squeeze())
assert np.allclose(out[0], out2), 'Incorrect out with different ndim.'
assert (out.ndim == 4), 'Incorrect dim for 4D input.'
assert (out2.ndim == 3), 'Incorrect dim for 2D input.'
def test_np(self):
x = torch.rand(2, 1, 10, 20)
x_np = x.permute(0, 2, 3, 1).numpy()
out = rgb_from_disp(x)
out = out.permute(0, 2, 3, 1).numpy()
out2 = rgb_from_disp(x_np)
assert np.allclose(out, out2), 'Incorrect conversion to np.'
|
class TestRGBfromFeat():
def test_norm(self):
x = torch.rand(1, 5, 10, 20)
out = rgb_from_feat(x).squeeze().flatten((- 2), (- 1))
assert torch.allclose(out.min((- 1))[0], out.new_zeros(3)), 'Incorrect min norm.'
assert torch.allclose(out.max((- 1))[0], out.new_ones(3)), 'Incorrect max norm.'
def test_shape(self):
x = torch.rand(1, 5, 10, 20)
out = rgb_from_feat(x)
assert (out.shape[1] == 3), 'Expected output to be RGB.'
out2 = rgb_from_feat(x[0])
assert (out2.shape[0] == 3), 'Expected output to be RGB.'
assert torch.allclose(out[0], out2), 'Incorrect output with different dimensions..'
def test_np(self):
x = torch.rand(2, 5, 10, 20)
x_np = x.permute(0, 2, 3, 1).numpy()
out = rgb_from_feat(x)
out = out.permute(0, 2, 3, 1).numpy()
out2 = rgb_from_feat(x_np)
assert np.allclose(out, out2), 'Incorrect conversion to np.'
|
class TestDefaultCollate():
def test_base(self):
torch_collate = torch.utils.data._utils.collate.default_collate
input = [torch.rand(3, 100, 200) for _ in range(5)]
target = torch_collate(input)
out = default_collate(input)
assert out.allclose(target), 'Error when matching default tensor collate.'
input = ['test' for _ in range(5)]
target = torch_collate(input)
out = default_collate(input)
assert (input == target == out), 'Error when matching default string collate.'
def test_timer(self):
input = [MultiLevelTimer() for _ in range(5)]
out = default_collate(input)
assert (input == out), 'Error when matching MultiLevelTimer collate.'
|
def test_all():
'Check all expected symbols are imported.'
items = {'opt_args_deco', 'delegates', 'map_container', 'retry_new_on_error'}
assert (set(deco.__all__) == items), 'Incorrect keys in `__all__`.'
|
@opt_args_deco
def _deco(func, prefix='', suffix=''):
'Helper.'
def wrapper(*args, **kwargs):
out = func(*args, **kwargs)
return (out, f'{prefix}{out}{suffix}')
return wrapper
|
def _add(a, b):
'Helper.'
return (a + b)
|
class TestOptArgsDeco():
def test_base(self):
'Test different ways of instantiating optional arguments.'
func = _deco(_add)
assert (func(1, 2) == (3, '3')), 'Error with default arguments.'
func = _deco(_add, prefix='***')
assert (func(1, 2) == (3, '***3')), 'Error with first default arg.'
func = _deco(prefix='***', suffix='***')(_add)
assert (func(1, 2) == (3, '***3***')), 'Error with default args.'
def test_callable(self):
'Test that we raise errors to enforce keyword-only optional arguments.'
with pytest.raises(TypeError):
_ = _deco(_add, '***')
with pytest.raises(TypeError):
_ = _deco('***')(_add)
|
def _parent(a, b=0, c=None, **kwargs):
...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.