code stringlengths 17 6.64M |
|---|
def freeze(net: nn.Module, /) -> nn.Module:
'Fix all model parameters and prevent training.'
for p in net.parameters():
p.requires_grad = False
return net
|
def unfreeze(net: nn.Module, /) -> nn.Module:
'Make all model parameters trainable.'
for p in net.parameters():
p.requires_grad = True
return net
|
def allclose(net1: nn.Module, net2: nn.Module, /) -> bool:
'Check if two networks have the exact same parameters.'
for (p1, p2) in zip(net1.parameters(), net2.parameters()):
try:
if (not p1.allclose(p2)):
return False
except RuntimeError:
return False
return True
|
def num_parameters(net: nn.Module, /, requires_grad: ty.N[bool]=None) -> int:
'Get number of parameters in a network. By default, all parameters are counted.'
if (requires_grad is None):
key = (lambda p: True)
elif requires_grad:
key = (lambda p: p.requires_grad)
else:
key = (lambda p: (not p.requires_grad))
return sum((p.numel() for p in net.parameters() if key(p)))
|
@map_container
def to_torch(x: ty.Any, /, permute: bool=True, device: ty.N[torch.device]=None) -> ty.Any:
'Convert given input to `torch.Tensors`.\n\n :param x: (ty.Any) Arbitrary structure to convert to tensors (see `map_container`).\n :param permute: (bool) If `True`, permute to PyTorch convention (b, h, w, c) -> (b, c, h, w).\n :param device: (torch.device) Device to send tensors to.\n :return: (ty.Any) Input structure, converted to tensors.\n '
if isinstance(x, (str, Timer, MultiLevelTimer)):
return x
x = torch.as_tensor(x, device=device)
if (permute and (x.ndim > 2)):
dim = [(- 1), (- 3), (- 2)]
dim = (list(range((x.ndim - 3))) + dim)
x = x.permute(dim)
return x
|
@map_container
def to_np(x: ty.Any, /, permute: bool=True) -> ty.Any:
'Convert given input to `numpy.ndarrays`.\n\n :param x: (ty.Any) Arbitrary structure to convert to ndarrays (see map_container).\n :param permute: (bool) If `True`, permute from PyTorch convention (b, c, h, w) -> (b, h, w, c).\n :return: (ty.Any) Input structure, converted to ndarrays.\n '
if isinstance(x, (np.ndarray, str, Timer, MultiLevelTimer)):
return x
if (permute and (x.ndim > 2)):
dim = [(- 2), (- 1), (- 3)]
dim = (list(range((x.ndim - 3))) + dim)
x = x.permute(dim)
return x.detach().cpu().numpy()
|
@map_container
def op(_x: ty.Any, /, *args, fn: ty.U[(str, ty.Callable)], **kwargs) -> ty.Any:
"Apply a function to an arbitrary input structure. `fn` can be either a function or a method to search on `_x`.\n\n Example:\n >>> out = fn(input, device, op='to') # Apply x.to(device) to each item in `input`\n >>> out = fn(input, func=torch.softmax, dim=1) # Apply torch.softmax(x, dim=1) to each item in `input`\n\n :param _x: (ty.Any) Arbitrary structure to convert to tensors (see map_container).\n :param args: (tuple) `Args` to forward to the given `func`.\n :param fn: (str|Callable) Function to apply. If given a string, search as an attribute of `_x`.\n :param kwargs: (dict) `Kwargs` forwarded to `op`.\n :return:\n "
if isinstance(_x, (str, Timer, MultiLevelTimer)):
return _x
if isinstance(fn, str):
fn = getattr(_x, fn)
else:
args = (_x, *args)
return fn(*args, **kwargs)
|
@opt_args_deco
def allow_np(fn: ty.N[ty.Callable], permute: bool=False) -> ty.Callable:
"Decorator to allow for `np.ndarray` inputs into a torch function.\n\n Objective is to implement the function using torch ops and apply this decorator to also make it numpy friendly.\n Since `numpy.ndarray` and `torch.Tensor` share memory (when on CPU), there shouldn't be any overhead.\n\n The decorated function can have an arbitrary signature. We enforce that there should only be either `np.ndarray`\n or `torch.Tensor` inputs. Args of any other type (int, float, str...) are left unchanged.\n\n :param fn: (callable) Function to decorate.\n :param permute: (bool) If `True`, permute from Numpy inputs into PyTorch convention (b, h, w, c) -> (b, c, h, w).\n "
ann = fn.__annotations__
for (k, type) in ann.items():
if (type == ty.T):
ann[k] = ty.U[(ty.A, type)]
@wraps(fn)
def wrapper(*args, **kwargs):
all_args = (args + tuple(kwargs.values()))
any_np = any((isinstance(arg, np.ndarray) for arg in all_args))
any_torch = any((isinstance(arg, torch.Tensor) for arg in all_args))
if (any_torch and any_np):
raise ValueError('Must pass only `np.ndarray` or `torch.Tensor`!')
if any_np:
(args, kwargs) = to_torch((args, kwargs), permute=permute)
out = fn(*args, **kwargs)
if any_np:
out = to_np(out, permute=permute)
return out
return wrapper
|
def dilate_mask(mask: ty.T, kernel_size: int=3) -> ty.T:
'Apply morphological dilation to the input binary mask.\n\n If any pixel within the kernel is a valid pixel (`True`), the central point is added to the mask.\n\n :param mask: (Tensor) (b, 1, h, w) Boolean mask indicating valid pixels.\n :param kernel_size: (int) Kernel size used for dilation.\n :return: (Tensor) (b, 1, h, w) Dilated boolean mask indicating valid pixels.\n '
mask = F.max_pool2d(mask.float(), kernel_size=kernel_size, padding=(kernel_size // 2), stride=1).bool()
return mask
|
def erode_mask(mask: ty.T, kernel_size: int=3) -> ty.T:
'Apply morphological erosion to the given binary mask.\n\n If any pixel within the kernel is not a valid pixel (`False`), the central point is removed from the mask.\n Since PyTorch does not provide `min_pool` we simply invert the dilation process.\n\n :param mask: (Tensor) (b, 1, h, w) Boolean mask indicating valid pixels.\n :param kernel_size: (int) Kernel size used for erosion.\n :return: (Tensor) (b, 1, h, w) Eroded boolean mask indicating valid pixels.\n '
mask = (~ dilate_mask((~ mask), kernel_size=kernel_size))
return mask
|
@allow_np(permute=True)
def standardize(x: ty.T, /, mean: StatsRGB=_mean, std: StatsRGB=_std) -> ty.T:
'Apply standardization. Default uses ImageNet statistics.'
shape = (([1] * (x.ndim - 3)) + [3, 1, 1])
mean = x.new_tensor(mean).view(shape)
std = x.new_tensor(std).view(shape)
x = ((x - mean) / std)
return x
|
@allow_np(permute=True)
def unstandardize(x: ty.T, /, mean: StatsRGB=_mean, std: StatsRGB=_std) -> ty.T:
'Remove standardization. Default uses ImageNet statistics.'
shape = (([1] * (x.ndim - 3)) + [3, 1, 1])
mean = x.new_tensor(mean).view(shape)
std = x.new_tensor(std).view(shape)
x = ((x * std) + mean)
return x
|
@allow_np(permute=True)
def to_gray(x: ty.T, /, coeffs: StatsRGB=_coeffs, keepdim: bool=False) -> ty.T:
'Convert image to grayscale.'
shape = (([1] * (x.ndim - 3)) + [3, 1, 1])
coeffs = x.new_tensor(coeffs).view(shape)
x = (x * coeffs).sum(dim=1, keepdim=keepdim)
return x
|
def mean_normalize(x: ty.T, /, dim: ty.U[(int, ty.S[int])]=(2, 3)) -> ty.T:
'Apply mean normalization across the specified dimensions.\n\n :param x: (Tensor) (*) Input tensor to normalize of any shape.\n :param dim: (int | ty.S[int]) Dimension(s) to compute the mean across.\n :return: (Tensor) (*) Mean normalized input with the same shape.\n '
return (x / x.mean(dim=dim, keepdim=True).clamp(min=eps(x)))
|
def eye_like(x: ty.T, /) -> ty.T:
'Create an Identity matrix of the same dtype and size as the input.\n\n NOTE: The input can be of any shape, except the final two dimensions, which must be square.\n\n :param x: (Tensor) (*, n, n) Input reference tensor, where `*` can be any size (including zero).\n :return: (Tensor) (*, n, n) Identity matrix with the same dtype and size as the input.\n '
ndim = x.ndim
if (ndim < 2):
raise ValueError(f'Input must have at least two dimensions! Got "{ndim}"')
(n, n2) = (x.shape[(- 2)], x.shape[(- 1)])
if (n != n2):
raise ValueError(f'Input last two dimensions must be square (*, n, n)! Got "{x.shape}"')
view = (([1] * (ndim - 2)) + [n, n])
I = torch.eye(n, dtype=x.dtype, device=x.device).view(view).expand_as(x).clone()
return I
|
def interpolate_like(input: ty.T, /, other: ty.T, mode: str='nearest', align_corners: bool=False) -> ty.T:
'Interpolate to match the size of `other` tensor.'
if (mode == 'nearest'):
align_corners = None
return F.interpolate(input, size=other.shape[(- 2):], mode=mode, align_corners=align_corners)
|
def expand_dim(x: ty.T, /, num: ty.U[(int, ty.S[int])], dim: ty.U[(int, ty.S[int])]=0, insert: bool=False) -> ty.T:
'Expand the specified input tensor dimensions, inserting new ones if required.\n\n >>> expand_dim(torch.rand(1, 1, 1), num=5, dim=1, insert=False) # (1, 1, 1) -> (1, 5, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=5, dim=1, insert=True) # (1, 1, 1) -> (1, 5, 1, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=(5, 3), dim=(0, 1), insert=False) # (1, 1, 1) -> (5, 3, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=(5, 3), dim=(0, 1), insert=True) # (1, 1, 1) -> (5, 3, 1, 1, 1)\n\n :param x: (Tensor) (*) Input tensor of any shape.\n :param num: (int|ty.S[int]) Expansion amount for the target dimension(s).\n :param dim: (int|ty.S[int]) Dimension(s) to expand.\n :param insert: (bool) If `True`, insert a new dimension at the specified location(s).\n :return: (Tensor) (*, num, *) Expanded tensor at the given location(s).\n '
if isinstance(num, int):
if isinstance(dim, int):
(num, dim) = ([num], [dim])
else:
num = ([num] * len(dim))
elif (len(num) != len(dim)):
raise ValueError(f'Non-matching expansion and dims. ({len(num)} vs. {len(dim)})')
for d in (dim if insert else ()):
x = x.unsqueeze(d)
sizes = ([(- 1)] * x.ndim)
for (n, d) in zip(num, dim):
sizes[d] = n
return x.expand(sizes)
|
def min(x: ty.T, dim: ty.N[ty.U[(int, ty.S)]]=None, keepdim: bool=False):
'Find the min values of the input tensor along the desired dimension(s).\n Wrapper around `torch.min` that returns only the min value and can be applied to multiple dimensions.\n\n :param x: (Tensor) (*) Input tensor of any shape.\n :param dim: (None|int|ty.S) If `None`, compute min across all dims. Otherwise only specified.\n :param keepdim: (bool) If `True`, keep the reduced dimensions.\n :return: (Tensor) (*) Min values of input tensor. Number of dims depends on `keepdim`.\n '
if (dim is None):
return x.min(keepdim=keepdim)
if isinstance(dim, int):
dim = [dim]
if (not all((((- x.ndim) <= d < x.ndim) for d in dim))):
raise IndexError(f'Dimension out of range (expected to be in range [{(- x.ndim)}, {(x.ndim - 1)}], but got {dim})')
dim = sorted(set(((d if (d >= 0) else (x.ndim + d)) for d in dim)), reverse=True)
for d in dim:
x = x.min(d, keepdim=keepdim)[0]
return x
|
def max(x: ty.T, dim: ty.N[ty.U[(int, ty.S)]]=None, keepdim: bool=False):
'Find the max values of the input tensor along the desired dimension(s).\n Wrapper around `torch.max` that returns only the max value and can be applied to multiple dimensions.\n\n :param x: (Tensor) (*) Input tensor of any shape.\n :param dim: (None|int|ty.S) If `None`, compute max across all dims. Othwerise only specified.\n :param keepdim: (bool) If `True`, keep the reduced dimensions.\n :return: (Tensor) (*) Max values of input tensor. Number of dims depends on `keepdim`.\n '
if (dim is None):
return x.max(keepdim=keepdim)
if isinstance(dim, int):
dim = [dim]
if (not all((((- x.ndim) <= d < x.ndim) for d in dim))):
raise IndexError(f'Dimension out of range (expected to be in range [{(- x.ndim)}, {(x.ndim - 1)}], but got {dim})')
dim = sorted(set(((d if (d >= 0) else (x.ndim + d)) for d in dim)), reverse=True)
for d in dim:
x = x.max(d, keepdim=keepdim)[0]
return x
|
def get_cls(cls_dict: dict[(str, ty.Type[T])], /, *args, type: str, **kwargs) -> T:
'Instantiate an arbitrary class from a collection.\n\n Including `type` makes it a keyword-only argument. This has the double benefit of forcing the user to pass it as a\n keyword argument, as well as popping it from the cfg kwargs.\n\n :param cls_dict: (dict[str, cls]) Dict containing mappings to the classes to choose from.\n :param args: (tuple) Args to forward to target class.\n :param type: (str) Key of the target class. Keyword-only argument.\n :param kwargs: (dict) Kwargs to forward to target class.\n :return: Target class instance.\n '
try:
return cls_dict[type](*args, **kwargs)
except Exception as e:
raise ValueError(f'Error using "{type}" in {list(cls_dict)}') from e
|
def get_net(cfg: dict) -> nn.ModuleDict:
"Instantiate the target networks from a cfg dict.\n\n Depth estimation typically consists of multiple networks, commonly `depth` and `pose`.\n We assume that, within a given category, we can use different classes interchangeably.\n For instance, all `depth` networks take a single image as input and produce a multi-scale output, while all\n `pose` networks take multiple images and produce relative poses for each pair.\n\n Networks can be omitted by setting their cfg to `None`. Useful when overriding the default cfg.\n See `cfg/defaults.yaml` for a full example.\n\n Example:\n ```\n cfg = {\n 'depth': {\n 'enc_name': 'convnext_base',\n 'pretrained': True,\n 'dec_name': 'monodepth',\n 'out_scales': [0, 1, 2, 3],\n },\n 'pose': {\n 'enc_name': 'resnet18',\n 'pretrained': True,\n },\n }\n ```\n\n :param cfg: (NetCfg) Dict of dicts, containing the network `type` and kwargs for each network.\n :return: (nn.ModuleDict) Dict of instantiated networks.\n "
reg.trigger_nets()
reg.trigger_decoders()
nets = {k: get_cls(reg.NET_REG, type=k, **kw) for (k, kw) in cfg.items() if (kw is not None)}
return nn.ModuleDict(OrderedDict(nets))
|
def get_loss(cfg: dict) -> tuple[(nn.ModuleDict, nn.ParameterDict)]:
"Instantiate the target losses from a cfg dict.\n\n In addition to the kwargs required to instantiate the loss, we also expect a `weight` kwarg, used to\n balance the various losses when computing the final loss. (Default: 1)\n\n Losses can be omitted by setting their cfg to `None`. Useful when overriding the default cfg.\n See `cfg/defaults.yaml` for a full example.\n\n Example:\n ```\n cfg = {\n 'img_recon': {\n 'weight': 1,\n 'loss_name': 'ssim',\n 'use_min': True,\n }\n\n 'disp_smooth': {\n 'weight': 0.001,\n 'use_edges': True,\n }\n ```\n\n :param cfg: (LossDict) Dict of dicts, containing the loss `type`, `weight` and kwargs for each loss.\n :return: (nn.ModuleDict) Dict of instantiated losses.\n "
reg.trigger_losses()
(losses, weights) = (nn.ModuleDict(), nn.ParameterDict())
for (k, kw) in cfg.items():
if (kw is None):
continue
weights[k] = nn.Parameter(torch.as_tensor(kw.pop('weight', 1)), requires_grad=False)
losses[k] = reg.LOSS_REG[k](**kw)
return (losses, weights)
|
def get_ds(cfg: dict, mode: ty.N[str]=None) -> dict[(str, Dataset)]:
"Instantiate the target datasets from a cfg dict.\n\n Datasets consist of a default cfg for each class, which can be overriden based on a `mode` sub-dict.\n\n Datasets can be omitted by setting their cfg to `None`. Useful when overriding the default cfg.\n See `cfg/defaults.yaml` for a full example.\n\n Example:\n ```\n cfg = {\n 'kitti_lmdb': {\n 'split': 'eigen_zhou',\n 'shape': (192, 640),\n 'supp_idxs': [-1, 1, 0],\n\n 'train': {'mode': 'train', 'use_aug': True},\n 'val': {'mode': 'val', 'use_aug': False},\n }\n\n 'slow_tv_lmdb': {\n 'split': 'all',\n 'shape': (384, 640),\n 'supp_idxs': [-1, 1],\n\n 'train': {'mode': 'train', 'use_aug': True},\n 'val': {'mode': 'val', 'use_aug': False},\n }\n ```\n\n :param cfg: (DataCfg) Dict of dicts, containing the dataset `type` and kwargs for each dataset.\n :param mode: (str) Mode to use for the dataset. If `None`, use the default cfg.\n :return: (dict[str, Dataset]) Dict of instantiated datasets.\n "
reg.trigger_datas()
ds = {}
for (t, kw) in cfg.items():
if (kw is None):
continue
assert isinstance(kw, dict), f"Expected dict of dicts. Got '{kw}'."
c = {k: v for (k, v) in kw.items() if (k not in {'train', 'val', 'test'})}
if mode:
c.update(kw.get(mode, {}))
ds[t] = get_cls(reg.DATA_REG, type=t, **c)
return ds
|
def get_dl(mode: str, cfg_ds: dict, cfg_dl: dict) -> DataLoader:
"Instantiate the target dataloader from a cfg dict.\n\n Dataloaders consist of a default cfg, which can be overriden based on a `mode` sub-dict.\n The datasets are expected to be a subclass of `BaseDataset`, which provides a `collate_fn` method.\n By default, we use `pin_memory=True`.\n\n If training with multiple datasets, we use the custom `ConcatDataset` class, which concatenates all datasets\n such that each batch contains samples from only one dataset. This is due to each dataset potentially having\n different images shapes.\n\n See `cfg/defaults.yaml` for a full example.\n\n Example:\n ```\n cfg = {\n 'batch_size': 4,\n 'num_workers': 4,\n 'drop_last': True,\n\n 'train': { 'shuffle': True },\n 'val': { 'shuffle': False },\n }\n ```\n\n :param mode: (str) Mode to use for the dataloader. If `None`, use the default cfg.\n :param cfg_ds: (DataCfg) Dict of dicts, containing the dataset `type` and kwargs for each dataset.\n :param cfg_dl: (LoaderCfg) Dict of dicts, containing the dataloader kwargs.\n :return: (DataLoader) Instantiated dataloader.\n "
ds = get_ds(cfg_ds, mode)
ds = list(ds.values())
cfg = ({k: v for (k, v) in cfg_dl.items() if (k not in {'train', 'val', 'test'})} | cfg_dl.get(mode, {}))
cfg['pin_memory'] = cfg.get('pin_memory', True)
cfg['collate_fn'] = ds[0].collate_fn
use_ddp = cfg.pop('use_ddp', False)
seed = cfg.pop('seed', 42)
if use_ddp:
(shuffle, drop_last) = (cfg.pop('shuffle', False), cfg.pop('drop_last', False))
seeds = [(seed * (10 ** i)) for (i, _) in enumerate(ds)]
samplers = [DistributedSampler(d, shuffle=shuffle, drop_last=drop_last, seed=s) for (d, s) in zip(ds, seeds)]
else:
samplers = [None for _ in ds]
dl = [DataLoader(d, sampler=s, **cfg) for (d, s) in zip(ds, samplers)]
return (dl[0] if (len(dl) == 1) else ConcatDataLoader(dl))
|
def get_opt(parameters: ty.U[(ty.Iterable, nn.Module)], cfg: dict) -> optim.Optimizer:
"Instantiate the target optimizer from a cfg dict. Wrapper for `timm` `create_optimizer_v2`.\n\n Example:\n ```\n cfg = {\n 'type': 'adamw',\n 'lr': 1e-3,\n 'weight_decay': 1e-4,\n 'frozen_bn': True,\n }\n ```\n\n :param parameters: (Iterable|nn.Module) Parameters to forward to the optimizer (in any `torch` format).\n :param cfg: (OptCfg) Target optimizer `type` and kwargs to forward to it.\n :return: (Optimizer) Instantiated optimizer.\n "
if ('type' in cfg):
cfg['opt'] = cfg.pop('type')
elif ('opt' not in cfg):
raise KeyError('Must provide a cfg key `type` or `opt` when instantiating an optimizer.')
if cfg.pop('frozen_bn', False):
if (not isinstance(parameters, nn.Module)):
raise ValueError('Cannot freeze batch norm parameters unless given nn.Module')
for m in parameters.modules():
if isinstance(m, nn.BatchNorm2d):
m.requires_grad_(False)
if (blr := cfg.pop('backbone_lr', False)):
if (not isinstance(parameters, nn.Module)):
raise ValueError('Cannot set backbone LR unless given nn.Module')
if (blr == cfg['lr']):
raise ValueError('Backbone LR must be different from the main LR')
LOGGER.info(f"Setting backbone LR to {blr} with base LR {cfg['lr']}...")
parameters = [{'params': (p for (n, p) in parameters.named_parameters() if ('encoder' not in n))}, {'params': (p for (n, p) in parameters.named_parameters() if ('encoder' in n)), 'lr': blr}]
return create_optimizer_v2(parameters, **cfg)
|
def get_sched(opt: optim.Optimizer, cfg: dict[(str, dict)]) -> dict[(str, ty._LRScheduler)]:
"Instantiate the target schedulers from a cfg dict. Wrapper for `timm` `create_scheduler_v2`.\n\n Example:\n ```\n cfg = {\n 'steplr': {\n 'step_size': 10,\n 'gamma': 0.1,\n },\n\n 'linear': {\n 'start_factor: 0.1,\n 'total_iters': 4,\n },\n }\n ```\n\n :param opt: (Optimizer) Optimizer schedule for.\n :param cfg: (SchedCfg) Dict of dicts, containing the scheduler `type` and kwargs for each scheduler.\n :return: (dict[str, _LRScheduler]) Dict of instantiated schedulers.\n "
sch = {k: get_cls(reg.SCHED_REG, opt, type=k, **kw) for (k, kw) in cfg.items() if (kw is not None)}
return sch
|
def get_metrics() -> nn.ModuleDict:
'Instantiate the collection of depth metrics to monitor.'
return nn.ModuleDict({'MAE': metrics.MAE(), 'RMSE': metrics.RMSE(), 'LogSI': metrics.ScaleInvariant(mode='log'), 'AbsRel': metrics.AbsRel(), 'Acc': metrics.DeltaAcc(delta=1.25)})
|
def _get_percentile(x: ty.A, p: int) -> float:
'Safe percentile to handle NaNs/Inf.'
try:
return np.percentile(x, p)
except IndexError:
return 0.0
|
@ops.allow_np(permute=True)
def rgb_from_disp(disp: ty.T, invert: bool=False, cmap: str='turbo', vmin: float=0, vmax: ty.N[ty.U[(float, ty.S[float])]]=None) -> ty.T:
'Convert a disparity map into an RGB colormap visualization.\n\n :param disp: (Tensor) (*b, *1, h, w) Input disparity/depth map.\n :param invert: (bool) If `True` invert depth into disparity.\n :param cmap: (str) Matplotlib colormap name.\n :param vmin: (float) Minimum value to use when normalizing.\n :param vmax: (None|float|list) Maximum value to use when normalizing. If `None` use 95th percentile.\n :return: (Tensor) (*b, 3, h, w) Colourized disparity map.\n '
if isinstance(vmin, ty.T):
vmin = vmin.tolist()
if isinstance(vmax, ty.T):
vmax = vmax.tolist()
n = disp.ndim
if (n == 2):
disp = disp[(None, None)]
if (n == 3):
disp = disp[None]
if invert:
disp = geo.to_inv(disp)
disp = ops.to_np(disp).squeeze((- 1))
if (vmax is None):
vmax = [_get_percentile(d[(d > 0)], 95) for d in disp]
elif isinstance(vmax, (int, float)):
vmax = ([vmax] * disp.shape[0])
elif (len(vmax) != disp.shape[0]):
raise ValueError(f'Non-matching vmax and disp. ({len(vmax)} vs. {disp.shape[0]})')
rgb = torch.stack(ops.to_torch([apply_cmap(d, cmap=cmap, vmin=vmin, vmax=v) for (d, v) in zip(disp, vmax)]))
if ((n == 2) or (n == 3)):
rgb = rgb.squeeze(0)
return rgb
|
@ops.allow_np(permute=True)
def rgb_from_feat(feat: ty.T) -> ty.T:
'Convert dense features into an RGB image via PCA.\n\n NOTE: PCA is computed using all features in the batch, i.e. the representation is batch dependent.\n\n :param feat: (Tensor) (*b, c, h, w) Dense feature representation.\n :return: (Tensor) (*b, 3, h, w) The PCAd features.\n '
n = feat.ndim
if (n == 3):
feat = feat[None]
(b, _, h, w) = feat.shape
feat = ops.to_np(feat.permute(0, 2, 3, 1).flatten(0, 2))
proj = PCA(n_components=3).fit_transform(feat)
proj -= proj.min(0)
proj /= proj.max(0)
proj = ops.to_torch(proj.reshape(b, h, w, 3))
if (n == 3):
proj = proj.squeeze(0)
return proj
|
class SuppImageNotFoundError(FileNotFoundError):
pass
|
class Predictor(Protocol):
@staticmethod
def get_img_shape(data_type: str) -> N[tuple[(int, int)]]:
...
def __call__(self, net: nn.Module, dl: DataLoader, use_stereo_blend: bool, device: N[str]) -> NDArray:
...
def apply(self, net: nn.Module, dl: DataLoader, func: Callable, use_stereo_blend: bool, device: N[str], *args, **kwargs) -> None:
...
def load_model(self, *args, **kwargs) -> nn.Module:
...
def preprocess(self, imgs: T) -> T:
...
def forward(self, net: nn.Module, imgs: T) -> T:
...
def postprocess(self, pred: Tensor, imgs: T) -> T:
...
|
class DepthPred(TypedDict, total=False):
depth_feats: S[T]
disp: dict[(int, T)]
disp_stereo: dict[(int, T)]
mask: dict[(int, T)]
mask_stereo: dict[(int, T)]
|
class PosePred(TypedDict, total=False):
R: T
t: T
fs: T
cs: T
|
class AutoencoderPred(TypedDict, total=True):
autoenc_feats: S[T]
autoenc_imgs: dict[(int, T)]
|
class TQDMProgressBar(plc.TQDMProgressBar):
'Progress bar that removes all `grad norms` from display.'
def get_metrics(self, trainer, pl_module) -> dict:
m = super().get_metrics(trainer, pl_module)
m = {k: v for (k, v) in m.items() if ('grad' not in k)}
return m
|
class RichProgressBar(plc.RichProgressBar):
'Progress bar that removes all `grad norms` from display.'
def get_metrics(self, trainer, pl_module) -> dict:
m = super().get_metrics(trainer, pl_module)
m = {k: v for (k, v) in m.items() if ('grad' not in k)}
return m
|
class DetectAnomaly(plc.Callback):
'Check for NaN/infinite loss at each core step. Replacement for `detect_anomaly=True`.'
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, unused=0) -> None:
if (not (loss := outputs['loss']).isfinite()):
raise ValueError(f'Detected NaN/Infinite loss: "{loss}"')
|
class TrainingManager(plc.Callback):
'Callback to save a dummy file as an indicator when training has started/finished.'
def __init__(self, ckpt_dir: Path):
super().__init__()
self.ckpt_dir = ckpt_dir
self.ckpt_dir.mkdir(exist_ok=True, parents=True)
self.host = socket.gethostname()
self.ftrain = None
self.fend = (ckpt_dir / 'finished')
if self.is_training:
raise ValueError(f'Training already in progress! ({self.ftrain})')
if self.has_finished:
raise ValueError(f'Training already finished! ({self.fend})')
signal.signal(signal.SIGTERM, self._on_sigterm)
@property
def is_training(self) -> bool:
fs = sorted(self.ckpt_dir.glob('training*'))
n = len(fs)
if (n == 0):
return False
if (n == 1):
self.ftrain = fs[0]
return True
raise ValueError(f'Invalid number of training files! {fs}')
@property
def has_finished(self) -> bool:
return self.fend.is_file()
def on_train_epoch_start(self, trainer, pl_module) -> None:
print(f'-> Creating "training" file...')
if self.ftrain:
self.ftrain.unlink(missing_ok=True)
self.ftrain = (self.ckpt_dir / f'training_{trainer.current_epoch}_{self.host}')
self.ftrain.touch()
def on_fit_end(self, trainer, pl_module) -> None:
self._cleanup()
print('-> Creating "finished"" file...')
self.fend.touch()
def on_exception(self, trainer, pl_module, exception) -> None:
self._cleanup()
def _cleanup(self) -> None:
print('-> Deleting "training" file...')
if self.ftrain:
self.ftrain.unlink(missing_ok=True)
print('-> Done! Exiting...')
def _on_sigterm(self, signum, frame) -> None:
'Signature required by `signal.signal`.'
raise SystemExit
|
def default_convert(data):
"\n Function that converts each NumPy array element into a :class:`torch.Tensor`. If the input is a `Sequence`,\n `Collection`, or `Mapping`, it tries to convert each element inside to a :class:`torch.Tensor`.\n If the input is not an NumPy array, it is left unchanged.\n This is used as the default function for collation when both `batch_sampler` and\n `batch_size` are NOT defined in :class:`~torch.utils.data.DataLoader`.\n\n The general input type to output type mapping is similar to that\n of :func:`~torch.utils.data.default_collate`. See the description there for more details.\n\n Args:\n data: a single data point to be converted\n\n Examples:\n >>> # xdoctest: +SKIP\n >>> # Example with `int`\n >>> default_convert(0)\n 0\n >>> # Example with NumPy array\n >>> default_convert(np.array([0, 1]))\n tensor([0, 1])\n >>> # Example with NamedTuple\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> default_convert(Point(0, 0))\n Point(x=0, y=0)\n >>> default_convert(Point(np.array(0), np.array(0)))\n Point(x=tensor(0), y=tensor(0))\n >>> # Example with List\n >>> default_convert([np.array([0, 1]), np.array([2, 3])])\n [tensor([0, 1]), tensor([2, 3])]\n "
elem_type = type(data)
if isinstance(data, torch.Tensor):
return data
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
if ((elem_type.__name__ == 'ndarray') and (np_str_obj_array_pattern.search(data.dtype.str) is not None)):
return data
return torch.as_tensor(data)
elif isinstance(data, collections.abc.Mapping):
try:
return elem_type({key: default_convert(data[key]) for key in data})
except TypeError:
return {key: default_convert(data[key]) for key in data}
elif (isinstance(data, tuple) and hasattr(data, '_fields')):
return elem_type(*(default_convert(d) for d in data))
elif isinstance(data, tuple):
return [default_convert(d) for d in data]
elif (isinstance(data, collections.abc.Sequence) and (not isinstance(data, str))):
try:
return elem_type([default_convert(d) for d in data])
except TypeError:
return [default_convert(d) for d in data]
else:
return data
|
def collate(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None):
"\n General collate function that handles collection type of element within each batch\n and opens function registry to deal with specific element types. `default_collate_fn_map`\n provides default collate functions for tensors, numpy arrays, numbers and strings.\n\n Args:\n batch: a single batch to be collated\n collate_fn_map: Optional dictionary mapping from element type to the corresponding collate function.\n If the element type isn't present in this dictionary,\n this function will go through each key of the dictionary in the insertion order to\n invoke the corresponding collate function if the element type is a subclass of the key.\n\n Examples:\n >>> # Extend this function to handle batch of tensors\n >>> def collate_tensor_fn(batch, *, collate_fn_map):\n ... return torch.stack(batch, 0)\n >>> def custom_collate(batch):\n ... collate_map = {torch.Tensor: collate_tensor_fn}\n ... return collate(batch, collate_fn_map=collate_map)\n >>> # Extend `default_collate` by in-place modifying `default_collate_fn_map`\n >>> default_collate_fn_map.update({torch.Tensor: collate_tensor_fn})\n\n Note:\n Each collate function requires a positional argument for batch and a keyword argument\n for the dictionary of collate functions as `collate_fn_map`.\n "
elem = batch[0]
elem_type = type(elem)
if (collate_fn_map is not None):
if (elem_type in collate_fn_map):
return collate_fn_map[elem_type](batch, collate_fn_map=collate_fn_map)
for collate_type in collate_fn_map:
if isinstance(elem, collate_type):
return collate_fn_map[collate_type](batch, collate_fn_map=collate_fn_map)
if isinstance(elem, collections.abc.Mapping):
try:
return elem_type({key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map) for key in elem})
except TypeError:
return {key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map) for key in elem}
elif (isinstance(elem, tuple) and hasattr(elem, '_fields')):
return elem_type(*(collate(samples, collate_fn_map=collate_fn_map) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
it = iter(batch)
elem_size = len(next(it))
if (not all(((len(elem) == elem_size) for elem in it))):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = list(zip(*batch))
if isinstance(elem, tuple):
return [collate(samples, collate_fn_map=collate_fn_map) for samples in transposed]
else:
try:
return elem_type([collate(samples, collate_fn_map=collate_fn_map) for samples in transposed])
except TypeError:
return [collate(samples, collate_fn_map=collate_fn_map) for samples in transposed]
elif isinstance(elem, (Timer, MultiLevelTimer)):
return batch
raise TypeError(default_collate_err_msg_format.format(elem_type))
|
def collate_tensor_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None):
elem = batch[0]
out = None
if (torch.utils.data.get_worker_info() is not None):
numel = sum((x.numel() for x in batch))
storage = elem._typed_storage()._new_shared(numel, device=elem.device)
out = elem.new(storage).resize_(len(batch), *list(elem.size()))
return torch.stack(batch, 0, out=out)
|
def collate_numpy_array_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None):
elem = batch[0]
if (np_str_obj_array_pattern.search(elem.dtype.str) is not None):
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)
|
def collate_numpy_scalar_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None):
return torch.as_tensor(batch)
|
def collate_float_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None):
return torch.tensor(batch, dtype=torch.float64)
|
def collate_int_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None):
return torch.tensor(batch)
|
def collate_str_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None):
return batch
|
def default_collate(batch):
"\n Function that takes in a batch of data and puts the elements within the batch\n into a tensor with an additional outer dimension - batch size. The exact output type can be\n a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a\n Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.\n This is used as the default function for collation when\n `batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.\n\n Here is the general input type (based on the type of the element within the batch) to output type mapping:\n\n * :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)\n * NumPy Arrays -> :class:`torch.Tensor`\n * `float` -> :class:`torch.Tensor`\n * `int` -> :class:`torch.Tensor`\n * `str` -> `str` (unchanged)\n * `bytes` -> `bytes` (unchanged)\n * `Mapping[K, V_i]` -> `Mapping[K, default_collate([V_1, V_2, ...])]`\n * `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[default_collate([V1_1, V1_2, ...]),\n default_collate([V2_1, V2_2, ...]), ...]`\n * `Sequence[V1_i, V2_i, ...]` -> `Sequence[default_collate([V1_1, V1_2, ...]),\n default_collate([V2_1, V2_2, ...]), ...]`\n\n Args:\n batch: a single batch to be collated\n\n Examples:\n >>> # xdoctest: +SKIP\n >>> # Example with a batch of `int`s:\n >>> default_collate([0, 1, 2, 3])\n tensor([0, 1, 2, 3])\n >>> # Example with a batch of `str`s:\n >>> default_collate(['a', 'b', 'c'])\n ['a', 'b', 'c']\n >>> # Example with `Map` inside the batch:\n >>> default_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])\n {'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}\n >>> # Example with `NamedTuple` inside the batch:\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> default_collate([Point(0, 0), Point(1, 1)])\n Point(x=tensor([0, 1]), y=tensor([0, 1]))\n >>> # Example with `Tuple` inside the batch:\n >>> default_collate([(0, 1), (2, 3)])\n [tensor([0, 2]), tensor([1, 3])]\n >>> # Example with `List` inside the batch:\n >>> default_collate([[0, 1], [2, 3]])\n [tensor([0, 2]), tensor([1, 3])]\n >>> # Two options to extend `default_collate` to handle specific type\n >>> # Option 1: Write custom collate function and invoke `default_collate`\n >>> def custom_collate(batch):\n ... elem = batch[0]\n ... if isinstance(elem, CustomType): # Some custom condition\n ... return ...\n ... else: # Fall back to `default_collate`\n ... return default_collate(batch)\n >>> # Option 2: In-place modify `default_collate_fn_map`\n >>> def collate_customtype_fn(batch, *, collate_fn_map=None):\n ... return ...\n >>> default_collate_fn_map.update(CustoType, collate_customtype_fn)\n >>> default_collate(batch) # Handle `CustomType` automatically\n "
return collate(batch, collate_fn_map=default_collate_fn_map)
|
def opt_args_deco(deco: ty.Callable) -> ty.Callable:
'Meta-decorator to make implementing of decorators with optional arguments more intuitive\n\n Recall: Decorators are equivalent to applying functions sequentially\n >>> func = deco(func)\n\n If we want to provide optional arguments, it would be the equivalent of doing:\n >>> func = deco(foo=10)(func)\n I.e. in this case, deco is actually a function that RETURNS a decorator (a.k.a. a decorator factory)\n\n In practice, this is typically implemented with two nested functions as opposed to one.\n Also, the "factory" must always be called, "func = deco()(func)", even if no arguments are provided.\n This is ugly, obfuscated and makes puppies cry. No one wants puppies to cry.\n\n This decorator "hides" one level of nesting by using the \'partial\' function.\n If no optional parameters are provided, we proceed as a regular decorator using the default parameters.\n If any optional kwargs are provided, this returns the decorator that is then applied to the function (this is\n equivalent to the "deco(foo=10)" portion of the second example).\n\n Example (before):\n ```\n def stringify(func=None, *, prefix=\'\', suffix=\'\'):\n if func is None:\n return partial(stringify, prefix=prefix, suffix=suffix)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n out = func(*args, **kwargs)\n return f\'{prefix}{out}{suffix}\'\n return wrapper\n ```\n\n Example (after):\n ```\n @opt_args_deco\n def stringify(func, prefix=\'\', suffix=\'\'):\n @wraps(func)\n def wrapper(*args, **kwargs):\n out = func(*args, **kwargs)\n return f\'{prefix}{out}{suffix}\'\n return wrapper\n ```\n\n :param deco: (Callable) Decorator function with optional parameters to wrap.\n :return: (Callable) If `func` is provided: decorated func, otherwise: decorator to apply to `func`.\n '
@wraps(deco)
def wrapper(f: ty.N[ty.Callable]=None, **kwargs) -> ty.Callable:
if (f is None):
return partial(deco, **kwargs)
if (not isinstance(f, (types.FunctionType, types.MethodType))):
raise TypeError(f'Positional argument must be a function or method, got {f} of type {type(f)}')
return deco(f, **kwargs)
return wrapper
|
def delegates(to: ty.N[ty.Callable]=None, keep: bool=False):
'From https://www.fast.ai/2019/08/06/delegation/.\n Decorator to replace `**kwargs` in signature with params from `to`.\n\n This can be used to decorate either a class\n ```\n @delegates()\n class Child(Parent): ...\n ```\n or a function\n ```\n @delegates(parent)\n def func(a, **kwargs): ...\n ```\n\n :param to: (Callable) Callable containing the params to copy\n :param keep: (bool) If `True`, keep `**kwargs` in the signature.\n :return: (Callable) The decorated class or function with the updated signature.\n '
def wrapper(f: ty.U[(type, ty.Callable)]) -> ty.Callable:
(to_f, from_f) = ((f.__base__.__init__, f.__init__) if (to is None) else (to, f))
sig = inspect.signature(from_f)
sigd = dict(sig.parameters)
args = sigd.pop('args', None)
if args:
sigd2 = {k: v for (k, v) in inspect.signature(to_f).parameters.items() if ((v.default == inspect.Parameter.empty) and (k not in sigd))}
sigd.update(sigd2)
kwargs = sigd.pop('kwargs', None)
if kwargs:
sigd2 = {k: v for (k, v) in inspect.signature(to_f).parameters.items() if ((v.default != inspect.Parameter.empty) and (k not in sigd))}
sigd.update(sigd2)
if (keep and args):
sigd['args'] = args
if (keep and kwargs):
sigd['kwargs'] = kwargs
from_f.__signature__ = sig.replace(parameters=list(sigd.values()))
return f
return wrapper
|
def map_container(f: ty.Callable) -> ty.Callable:
"Decorator to recursively apply a function to arbitrary nestings of `dict`, `list`, `tuple` & `set`\n\n NOTE: `f` can have an arbitrary signature, but the first arg must be the item we want to apply `f` to.\n\n Example:\n ```\n @map_apply\n def square(n, bias=0):\n return (n ** 2) + bias\n\n x = {'a': [1, 2, 3], 'b': 4, 'c': {1: 5, 2: 6}}\n print(map_apply(x))\n\n ===>\n {'a': [1, 4, 9], 'b': 16, 'c': {1: 25, 2: 36}}\n\n print(map_apply(x, bias=2))\n\n ===>\n {'a': [3, 6, 11], 'b': 18, 'c': {1: 27, 2: 38}}\n ```\n "
@wraps(f)
def wrapper(x: T, *args, **kwargs) -> T:
if isinstance(x, dict):
return {k: wrapper(v, *args, **kwargs) for (k, v) in x.items()}
elif isinstance(x, list):
return [wrapper(v, *args, **kwargs) for v in x]
elif isinstance(x, tuple):
return tuple((wrapper(v, *args, **kwargs) for v in x))
elif isinstance(x, set):
return {wrapper(v, *args, **kwargs) for v in x}
else:
return f(x, *args, **kwargs)
return wrapper
|
def readlines(file: Path, /, encoding: str=None, split: bool=False, sep: ty.N[str]=None) -> ty.U[(list[str], list[list[str]])]:
'Read file as a list of strings.'
with open(file, encoding=encoding) as f:
lines = f.read().splitlines()
if split:
lines = splitlines(lines, sep)
return lines
|
def splitlines(lines: list[str], sep: ty.N[str]=None) -> list[list[str]]:
'Split each line in a list of lines.'
return [l.split(sep) for l in lines]
|
def mymap(fn: str, iterable: ty.Iterable, type: ty.N[T]=list, **kwargs) -> ty.U[(ty.Generator, T)]:
'Apply instance method `fn` to each item in `iterable`.\n\n :param fn: (str) Function name to search as an attribute of each item.\n :param iterable: (Iterable) Iterable to apply function to.\n :param type: (None|type) If provided, convert output generator into this sequence type, e.g. list, tuple, set.\n :param kwargs: (dict) Additional kwargs to forward to `fn`.\n :return: (Iterable) Iterable mapped to the provided function.\n '
if (not isinstance(fn, str)):
raise TypeError(f'`fn` must be a str to search as an attribute of each item!')
gen = (getattr(item, fn)(**kwargs) for item in iterable)
return (type(gen) if type else gen)
|
def _map(fn: ty.Callable, *iterables: ty.Iterable, type: T, star: bool=False) -> T:
'Map `fn` to each iterable item and convert to the specified container `type`.'
map_fn = (itertools.starmap if star else map)
return type(map_fn(fn, *iterables))
|
def iterdir(path: Path, key: ty.N[Key]=None) -> list[Path]:
'Get sorted contents in path, optionally filtered by the `key`.'
key = (key or (lambda f: True))
return sorted(filter(key, path.iterdir()))
|
def get_dirs(path: Path, key: ty.N[Key]=None) -> list[Path]:
'Get sorted directories in a path, optionally filtered by the `key`.'
_key = (lambda p: (p.is_dir() and (key(p) if key else True)))
return iterdir(path, _key)
|
def get_files(path: Path, key: ty.N[Key]=None) -> list[Path]:
'Get sorted files in a path, optionally filtered by the `key`.'
_key = (lambda p: (p.is_file() and (key(p) if key else True)))
return iterdir(path, _key)
|
def has_contents(path: Path) -> bool:
'Check if directory is not empty.'
return (path.is_dir() and bool(iterdir(path)))
|
def mkdirs(*paths: Path, exist_ok: bool=True, parents: bool=True, **kwargs) -> None:
'Create all input directories with laxer defaults.'
for p in paths:
p.mkdir(exist_ok=exist_ok, parents=parents, **kwargs)
|
def pil2np(img: Image, /) -> ty.A:
'Convert PIL image [0, 255] into numpy [0, 1].'
return (np.array(img, dtype=np.float32) / 255.0)
|
def np2pil(arr: ty.A, /) -> Image:
'Convert numpy image [0, 1] into PIL [0, 255].'
if (arr.dtype == np.uint8):
return Image.fromarray(arr)
assert (arr.max() <= 1)
return Image.fromarray((arr * 255).astype(np.uint8))
|
def write_yaml(file: Path, data: dict, mkdir: bool=False, sort_keys: bool=False) -> None:
'Write data to a yaml file.'
file = Path(file).with_suffix('.yaml')
if mkdir:
mkdirs(file.parent)
with open(file, 'w') as f:
yaml.dump(data, f, sort_keys=sort_keys)
|
def load_yaml(file: Path, loader: ty.N[yaml.Loader]=yaml.FullLoader) -> dict:
'Load a single yaml file.'
with open(file) as f:
return yaml.load(f, Loader=loader)
|
def load_merge_yaml(*files: Path) -> dict:
'Load a list of YAML configs and recursively merge into a single config.\n\n Following dictionary merging rules, the first file is the "base" config, which gets updated by the second file.\n We chain this rule for however many cfg we have, i.e. ((((1 <- 2) <- 3) <- 4) ... <- n)\n\n :param files: (Sequence[PathLike]) List of YAML config files to load, from "oldest" to "newest".\n :return: (dict) The merged config from all given files.\n '
(old, *datas) = [load_yaml(file) for file in files]
for new in datas:
old = _merge_yaml(old, new)
return old
|
def _merge_yaml(old: dict, new: dict) -> dict:
'Recursively merge two YAML cfg.\n Dictionaries are recursively merged. All other types simply update the current value.\n\n NOTE: This means that a "list of dicts" will simply be updated to whatever the new value is,\n not appended to or recursively checked!\n\n :param old: (dict) Base dictionary containing default keys.\n :param new: (dict) New dictionary containing keys to overwrite in `old`.\n :return: (dict) The merge config.\n '
d = old.copy()
for (k, v) in new.items():
d[k] = (_merge_yaml(d[k], v) if ((k in d) and isinstance(v, dict)) else v)
return d
|
class ConcatDataLoader():
"Concatenate multiple DataLoaders in a round-robin manner.\n Example:\n dl1 = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n dl2 = ['a', 'b', 'c']\n dl3 = [0.1, 0.2, 0.3, 0.4. 0.5]\n\n [0, 'a', 0.1, 1, 'b', 0.2, 3, 'c', 0.3, 4, 0.4, 5, 0.5, 6, 7, 8]\n\n :param dls: (Sequence[DataLoader]) List of dataloaders to combine.\n "
def __init__(self, dls: ty.S[DataLoader]):
self.dls = dls
print(f'-> Created Concat DataLoader with lengths: {[len(dl) for dl in self.dls]}')
def __len__(self) -> int:
'Number of items across all dataloaders.'
return sum(map(len, self.dls))
def __iter__(self) -> ty.BatchData:
'Iterate over dataloaders in a round-robin manner.'
(yield from (i for i in chain.from_iterable(zip_longest(*self.dls)) if (i is not None)))
def set_epoch(self, epoch: int) -> None:
'Set the epoch number. Required for DitributedSampler to randomize samples across multiple GPUs.'
[dl.sampler.set_epoch(epoch) for dl in self.dls if isinstance(dl.sampler, DistributedSampler)]
|
class BaseMetric(Metric):
'Base class for depth estimation metrics.'
higher_is_better = False
full_state_update = False
def __init__(self, mode: str='raw', **kwargs):
super().__init__(**kwargs)
if (mode not in _MODES):
raise ValueError(f'Invalid mode! ({mode} vs. {_MODES})')
self.mode: str = mode
self.sf: int = {'raw': 1, 'log': 100, 'inv': 1000}[self.mode]
self.add_state('metric', default=torch.tensor(0.0), dist_reduce_fx='sum')
self.add_state('total', default=torch.tensor(0), dist_reduce_fx='sum')
def _preprocess(self, input: ty.T, /):
'Convert input into log-depth or disparity.'
if (self.mode == 'raw'):
pass
elif (self.mode == 'log'):
input = input.log()
elif (self.mode == 'inv'):
input = (1 / input.clip(min=0.001))
return input
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
'Compute an error metric for a single pair.\n\n :param pred: (Tensor) (b, n) Predicted depth.\n :param target: (Tensor) (b, n) Target depth.\n :return: (Tensor) (b,) Computed metric.\n '
raise NotImplementedError
def update(self, pred: ty.T, target: ty.T) -> None:
'Compute an error metric for a whole batch of predictions and update the state.\n\n :param pred: (Tensor) (b, n) Predicted depths masked with NaNs.\n :param target: (Tensor) (b, n) Target depths masked with NaNs.\n :return:\n '
self.metric += (self.sf * self._compute(self._preprocess(pred), self._preprocess(target)).sum())
self.total += pred.shape[0]
def compute(self) -> ty.T:
'Compute the average metric given the current state.'
return (self.metric / self.total)
|
class MAE(BaseMetric):
'Compute the mean absolute error.'
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
return (pred - target).abs().nanmean(dim=1)
|
class RMSE(BaseMetric):
'Compute the root mean squared error.'
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
return (pred - target).pow(2).nanmean(dim=1).sqrt()
|
class ScaleInvariant(BaseMetric):
'Compute the scale invariant error.'
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
err = (pred - target)
return (err.pow(2).nanmean(dim=1) - err.nanmean(dim=1).pow(2)).sqrt()
|
class AbsRel(BaseMetric):
'Compute the absolute relative error.'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.sf = 100
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
return ((pred - target).abs() / target).nanmean(dim=1)
|
class SqRel(BaseMetric):
'Compute the absolute relative squared error.'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.sf = 100
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
return ((pred - target).pow(2) / target.pow(2)).nanmean(dim=1)
|
class DeltaAcc(BaseMetric):
'Compute the accuracy for a given error threshold.'
higher_is_better = True
def __init__(self, delta: float, **kwargs):
super().__init__(**kwargs)
if (self.mode != 'raw'):
raise ValueError('DeltaAcc should only be computed using raw depths.')
self.delta: float = delta
self.sf = 100
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
thresh = torch.max((target / pred), (pred / target))
return ((thresh < self.delta).nansum(dim=1) / thresh.nansum(dim=1))
|
class Timer():
"Context manager for timing a block of code.\n\n Attributes:\n :param name: (str) Timer label when printing.\n :param as_ms: (bool) If `True`, store time as `milliseconds`, otherwise `seconds`.\n :param sync_gpu: (bool) If `True`, ensure that GPU is synced on Timer enter and exit.\n :param precision: (int) Number of decimal places to print.\n\n Example:\n ```\n with Timer('MyTimer') as t:\n time.sleep(1)\n elapsed = t.elapsed\n print(t)\n\n ===>\n MyTimer: 1.003 s\n ```\n "
def __init__(self, name: str='Timer', as_ms: bool=False, sync_gpu: bool=False, precision: int=6) -> None:
self.name: str = name
self.as_ms: bool = as_ms
self.sync_gpu: bool = sync_gpu
self.precision: int = precision
self._sf: int = (1000 if self.as_ms else 1)
self._units: str = ('ms' if self.as_ms else 's')
self._sync_fn: ty.N[ty.Callable] = None
self._start: ty.N[float] = None
self._end: ty.N[float] = None
if self.sync_gpu:
import torch
self._sync_fn = torch.cuda.synchronize
def __repr__(self) -> str:
'Convert class constructor into string representation.'
sig = inspect.signature(self.__init__)
kwargs = {key: getattr(self, key) for key in sig.parameters if hasattr(self, key)}
s = ', '.join((f'{k}={v}' for (k, v) in kwargs.items()))
return f'{self.__class__.__qualname__}({s})'
def __str__(self) -> str:
'Convert into string representation.'
return f'{self.name}: {self.elapsed} {self._units}'
def __enter__(self) -> 'Timer':
'Start timer and sync GPU.'
if self.sync_gpu:
self._sync_fn()
self._start = time.perf_counter()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
'End timer and sync GPU.'
if self.sync_gpu:
self._sync_fn()
self._end = time.perf_counter()
@property
def elapsed(self) -> float:
'Time taken between enter and exit.'
assert self._start, '`Timer` has not begun'
assert self._end, '`Timer` has not finished'
time_taken = (self._sf * (self._end - self._start))
return round(time_taken, self.precision)
|
class MultiLevelTimer():
"Context manager Timer capable of being nested across multiple levels.\n\n NOTE: We use the *instance* of this class as a context manager, not the class itself (see examples).\n\n Timers are stored as a dict, mapping labels to (depth, start, end, elapsed).\n In order to allow for the nesting of these timers, we keep track of what timers are active (effectively, a stack).\n On __exit__ we pop the most recent label and end that timer.\n\n Attributes:\n :param name: (str) Global Timer name.\n :param as_ms: (bool) If `True`, store time as `'milliseconds`', otherwise `seconds`.\n :param sync_gpu: (bool) If `True`, ensure that GPU is synced on Timer enter and exit.\n :param precision: (int) Number of decimal places to print.\n\n Examples:\n ```\n timer = MultiLevelTimer(name='MyTimer', as_ms=True, precision=4)\n\n with timer('OuterLevel'):\n time.sleep(2)\n with timer('InnerLevel'):\n time.sleep(1)\n\n print(timer)\n\n ==>\n MyTimer\n OuterLevel: 3002.3414 ms\n InnerLevel: 1000.7601 ms\n ```\n\n Levels can also be named automatically\n ```\n timer = MultiLevelTimer(name='MyTimer')\n\n with timer:\n time.sleep(2)\n\n print(timer)\n\n ==>\n MyTimer\n Level1: 2.002093 s\n ```\n "
def __init__(self, name: str='Timer', as_ms: bool=False, sync_gpu: bool=False, precision: int=6) -> None:
self.name: str = name
self.as_ms: bool = as_ms
self.sync_gpu: bool = sync_gpu
self.precision: int = precision
self.depth: int = 0
self._sf: int = (1000 if self.as_ms else 1)
self._units: str = ('ms' if self.as_ms else 's')
self._sync_fn: ty.N[ty.Callable] = None
self._label: ty.N[str] = None
self._active: list[str] = []
self._data: dict[(str, ty.TimerData)] = {}
if self.sync_gpu:
import torch
self._sync_fn = torch.cuda.synchronize
def __repr__(self) -> str:
'Convert class constructor into string representation.'
sig = inspect.signature(self.__init__)
kwargs = {key: getattr(self, key) for key in sig.parameters if hasattr(self, key)}
s = ', '.join((f'{k}={v}' for (k, v) in kwargs.items()))
return f'{self.__class__.__qualname__}({s})'
def __str__(self) -> str:
'Convert into string representation.'
s = [self.name]
s += [(('\t' * v['depth']) + f"{k}: {v['elapsed']} {self._units}") for (k, v) in self]
return '\n'.join(s)
def __getitem__(self, label: str) -> ty.TimerData:
'Return timer info for the given label.'
return self._data[label]
def __iter__(self) -> ty.Generator[(tuple[(str, ty.TimerData)], None, None)]:
'Iterate through all timers as (`label`, `timer`)'
for k in self._data:
(yield (k, self[k]))
def __call__(self, label: str) -> 'MultiLevelTimer':
'Required to call a `Timer` instance in a context manager and create a new label.'
self._label = label
return self
def __enter__(self) -> 'MultiLevelTimer':
'Context manager entry point.'
self.depth += 1
(label, self._label) = (self._label, None)
label = (label or f'Level{self.depth}')
if (label in self._data):
raise KeyError(f'Duplicate Timer key: {label}')
if self.sync_gpu:
self._sync_fn()
self._active.append(label)
self._data[label] = {'depth': self.depth, 'start': time.perf_counter(), 'end': None, 'elapsed': None}
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
'Context manager exit point.'
assert self._active, 'What are you doing here??'
label = self._active.pop()
timer = self._data[label]
if self.sync_gpu:
self._sync_fn()
timer['end'] = time.perf_counter()
timer['elapsed'] = round((self._sf * (timer['end'] - timer['start'])), self.precision)
self.depth -= 1
def reset(self) -> None:
'Delete all existing `Timer` data.'
if self._active:
raise RuntimeError(f'Attempt to reset Timer while active: {self._active}')
self._data = {}
def copy(self) -> 'MultiLevelTimer':
'Return a deep copy of the timer.'
return copy.deepcopy(self)
def to_dict(self, key: str='elapsed') -> dict:
'Return a dict containing only the data for the specified key.'
return {label: data[key] for (label, data) in self}
@staticmethod
def mean_elapsed(timers: ty.S['MultiLevelTimer']) -> ty.U[(ty.S, ty.FloatDict)]:
'Return the average elapsed time for each label in a list of timers.'
if (not timers):
return timers
data = {}
for t in timers:
for (k, v) in t:
if (k in data):
data[k].append(v['elapsed'])
else:
data[k] = [v['elapsed']]
data = {k: (sum(v) / len(v)) for (k, v) in data.items()}
return data
|
class EnvWrapper():
def __init__(self, task):
self.action_space = self.brain.vector_action_space_size
|
class CountScore():
def __init__(self):
self.total_episode = 100
self.episode_rewards = np.zeros(self.total_episode)
self.current_episode = 0
def add_score(self, score):
self.episode_rewards[self.current_episode] = score
self.current_episode += 1
self.current_episode = (self.current_episode % 100)
def mean_score(self):
return np.mean(self.episode_rewards)
|
class UnityTask():
def __init__(self, name):
self.brain = None
self.brain_name = None
self.env = self.create_unity_env()
self.action_space = self.brain.vector_action_space_size
self.observation_space = self.brain.vector_observation_space_size
print(f'Action space {self.action_space}')
print(f'State space {self.observation_space}')
self.name = name
self.action_dim = self.action_space
self.state_dim = int(np.prod(self.observation_space))
self.train_mode = True
def extract_env_details(self, env_info):
next_state = env_info.vector_observations
reward = env_info.rewards
done = env_info.local_done
return (next_state, reward, done)
def create_unity_env(self):
env = UnityEnvironment(file_name='Env\\Tennis_Windows_x86_64\\Tennis.exe')
self.brain_name = env.brain_names[0]
self.brain = env.brains[self.brain_name]
return env
def reset(self):
env_info = self.env.reset(train_mode=self.train_mode)[self.brain_name]
return self.extract_env_details(env_info)[0]
def step(self, actions):
actions = ((actions - 0.5) * 2.0)
self.env.step(actions)[self.brain_name]
env_info = self.env.step(actions)[self.brain_name]
(next_states, rewards, dones) = self.extract_env_details(env_info)
return (next_states, rewards, np.array(dones))
|
class PPOAgent_Unity():
def __init__(self, config):
self.config = config
self.task = UnityTask('reacher')
self.network = PPONetwork(self.config.state_dim, self.config.action_dim, 1000).to('cuda:0')
self.opt = torch.optim.Adam(self.network.parameters(), config.lr, amsgrad=True)
self.total_steps = 0
self.online_rewards = np.zeros(config.num_workers)
self.episode_rewards = []
self.states = self.task.reset()
self.state_normalizer = None
self.min_lr = (self.config.lr * 0.3)
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.opt, gamma=0.8, step_size=200)
self.max_score = 0
self.episode_score = CountScore()
if config.play_only:
self.load_model()
def load_model(self):
self.network.load_state_dict(torch.load(self.config.saved_checkpoint))
self.network.to('cuda:0')
def update_lr(self):
if (self.total_steps < 1000):
return
if (self.total_steps % 40000):
if (self.config.ppo_ratio_clip > 0.1):
self.config.ppo_ratio_clip = 0.07
if (((self.total_steps % 30000) == 0) and (self.config.entropy_weight > 0)):
self.config.entropy_weight -= 0.04
if (self.config.entropy_weight < 0):
self.config.entropy_weight = 0.0
def build_trajectory(self, memory_buffer):
states = self.states
episode = 0
for _ in range(self.config.rollout_length):
states = tensor(states)
prediction = self.network(states)
(next_states, rewards, terminals) = self.task.step(prediction['a'].cpu().numpy())
self.online_rewards += rewards
if np.any(terminals):
self.episode_score.add_score(np.max(self.online_rewards))
self.score_data.append(self.episode_score.mean_score())
self.episode_rewards.append(self.online_rewards[(- 1)])
self.online_rewards[:] = 0
memory_buffer.add(prediction)
memory_buffer.add({'r': tensor(rewards).unsqueeze((- 1)), 'm': tensor((1 - terminals)).unsqueeze((- 1)), 's': tensor(states)})
states = next_states
current_score = self.episode_score.mean_score()
print(f'Ep={episode}s current score {current_score} online rewards {self.online_rewards.mean()}')
if (self.max_score < self.online_rewards.mean()):
self.max_score = current_score
return memory_buffer
def step(self):
self.update_lr()
if self.config.play_only:
self.validate(False)
return
config = self.config
memory_buffer = Storage(config.rollout_length)
states = self.states
self.network.eval()
memory_buffer = self.build_trajectory(memory_buffer)
current_score = self.online_rewards.mean()
print(f'Current score {current_score}')
if (not config.play_only):
if ((self.max_score < current_score) and (not config.play_only)):
torch.save(self.network, '/checkpoint/PPO-{}.pth'.format(int(current_score)))
self.max_score = current_score
self.states = states
states = tensor(states)
prediction = self.network(states)
memory_buffer.add(prediction)
memory_buffer.placeholder()
advantages = tensor(np.zeros((config.num_workers, 1)))
returns = prediction['v'].detach()
for i in reversed(range(config.rollout_length)):
returns = (memory_buffer.r[i] + ((config.discount * memory_buffer.m[i]) * returns))
td_error = ((memory_buffer.r[i] + ((config.discount * memory_buffer.m[i]) * memory_buffer.v[(i + 1)])) - memory_buffer.v[i])
advantages = ((((advantages * config.gae_tau) * config.discount) * memory_buffer.m[i]) + td_error)
memory_buffer.adv[i] = advantages.detach()
memory_buffer.ret[i] = returns.detach()
batch_steps = self.train_agent(memory_buffer)
steps = batch_steps
self.total_steps += steps
self.lr_scheduler.step()
if ((self.total_steps % 50000) == 0):
self.validate(False)
else:
self.validate(False)
def train_agent(self, memory_buffer):
(states, actions, log_probs_old, returns, advantages) = memory_buffer.cat(['s', 'a', 'log_pi_a', 'ret', 'adv'])
actions = actions.detach()
log_probs_old = log_probs_old.detach()
advantages = ((advantages - advantages.mean()) / advantages.std())
sum_returns = 0
sum_advantage = 0
sum_policy_loss = 0
sum_critic_loss = 0
sum_entropy = 0
batch_steps = 0
self.network.train()
config = self.config
for ep in range(config.optimization_epochs):
sampler = random_sample(np.arange(states.size(0)), config.mini_batch_size)
for batch_indices in sampler:
batch_indices = tensor(batch_indices).long()
sampled_states = states[batch_indices]
sampled_actions = actions[batch_indices]
sampled_log_probs_old = log_probs_old[batch_indices]
sampled_returns = returns[batch_indices]
sampled_advantages = advantages[batch_indices]
prediction = self.network(sampled_states.cuda(), sampled_actions.cuda())
ratio = (prediction['log_pi_a'] - sampled_log_probs_old).exp()
obj = (ratio * sampled_advantages)
obj_clipped = (ratio.clamp((1.0 - self.config.ppo_ratio_clip), (1.0 + self.config.ppo_ratio_clip)) * sampled_advantages)
policy_loss = (torch.min(obj, obj_clipped).mean() + (config.entropy_weight * prediction['ent'].mean()))
value_loss = F.smooth_l1_loss(prediction['v'], sampled_returns.view((- 1), 1))
(sum_returns, sum_advantage, sum_policy_loss, sum_critic_loss, sum_entropy) = self.log_stats(sampled_returns, sampled_advantages, policy_loss, value_loss, prediction['ent'].mean(), batch_steps, sum_returns, sum_advantage, sum_critic_loss, sum_policy_loss, sum_entropy)
batch_steps += 1
self.opt.zero_grad()
(- (policy_loss - value_loss)).backward()
nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip)
self.opt.step()
return batch_steps
def get_lr(self):
for param_group in self.opt.param_groups:
return param_group['lr']
def log_stats(self, returns, advantage, loss, critic_loss, entropy, batch_step, sum_returns, sum_advantage, sum_critic, sum_loss, sum_entropy):
sum_returns += returns.mean()
sum_advantage += advantage.mean()
sum_critic += critic_loss
sum_loss += loss
sum_entropy += entropy.mean()
logger = self.config.logger
frame_idx = self.total_steps
batch_count = (self.config.optimization_epochs * (self.config.rollout_length / self.config.mini_batch_size))
step_idx = (batch_step + frame_idx)
batch_step += 1
logger.add_scalar('returns', (sum_returns / batch_step), step_idx)
logger.add_scalar('advantage', (sum_advantage / batch_step), step_idx)
logger.add_scalar('loss_critic', (sum_critic / batch_step), step_idx)
logger.add_scalar('entropy', (sum_entropy / batch_step), step_idx)
logger.add_scalar('loss_total', (sum_loss / batch_step), step_idx)
logger.add_scalar('lr', self.get_lr(), step_idx)
return (sum_returns, sum_advantage, sum_loss, sum_critic, sum_entropy)
def validate(self, fast_test=True):
score = np.zeros(self.config.num_workers)
self.network.eval()
self.task.train_mode = fast_test
actual_score = 0
for i in range(10):
print(f'Testing {i} score={np.mean(score)}')
terminals = np.zeros(2)
states = self.task.reset()
ep_scores = []
while (not all(terminals)):
states = tensor(states)
prediction = self.network(states)
(next_states, rewards, terminals) = self.task.step(prediction['a'].cpu().numpy())
score += rewards
states = next_states
self.task.train_mode = False
actual_score = np.mean(score)
print(f'Ep: 100 {actual_score}')
|
class UnityEnv():
def __init__(self, env_path, train_mode=True):
self.brain = None
self.brain_name = None
self.train_mode = train_mode
self.env = self.create_unity_env(env_path)
self.action_space = self.brain.vector_action_space_size
self.observation_space = self.brain.vector_observation_space_size
print(f'Action space {self.action_space}')
print(f'State space {self.observation_space}')
self.action_dim = self.action_space
self.state_dim = int(np.prod(self.observation_space))
def extract_env_details(self, env_info):
next_state = env_info.vector_observations
reward = env_info.rewards
done = env_info.local_done
return (next_state, reward, done)
def create_unity_env(self, env_path):
env = UnityEnvironment(file_name=env_path)
self.brain_name = env.brain_names[0]
self.brain = env.brains[self.brain_name]
return env
def reset(self):
env_info = self.env.reset(train_mode=self.train_mode)[self.brain_name]
return self.extract_env_details(env_info)[0]
def step(self, actions):
actions = np.clip(actions, (- 1), 1)
self.env.step(actions)[self.brain_name]
env_info = self.env.step(actions)[self.brain_name]
(next_states, rewards, dones) = self.extract_env_details(env_info)
return (next_states, rewards, np.array(dones))
|
class Config():
DEVICE = (torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu'))
def __init__(self):
self.device = ('cuda:0' if torch.cuda.is_available() else 'cpu')
self.action_size = 2
self.state_dim = 8
self.action_dim = 2
self.play_only = True
self.lr = 0.0001
self.discount = 0.97
self.gae_tau = 0.95
self.gradient_clip = 4.7
self.rollout_length = 2000
self.optimization_epochs = 10
self.mini_batch_size = 200
self.ppo_ratio_clip = 0.1
self.log_interval = 100
self.max_steps = 400000.0
self.entropy_weight = 0.09
self.num_workers = 2
self.saved_checkpoint = 'checkpoint/ppo.pth'
|
class Env_store():
def __init__(self, dim, state_dim):
self.actions = tensor(dim)
self.rewards = tensor(dim)
self.advantage = tensor(dim)
self.states = tensor(state_dim)
self.network_output = None
self.dones = tensor(dim)
def populate(self, states, rewards, dones, model_output):
self.states = tensor(states)
self.rewards = rewards
self.not_dones = tensor((1 - dones)).unsqueeze((- 1))
self.network_output = model_output
|
class PPONetwork(nn.Module):
'Actor (Policy) Model.'
def __init__(self, state_size, action_size, hidden_size):
'Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n '
super(PPONetwork, self).__init__()
second_hidden_size = 500
third = (second_hidden_size - 100)
frames = 3
agents = 2
self.input_size = (state_size * frames)
self.input = nn.Linear(self.input_size, hidden_size)
self.hidden = nn.Linear(hidden_size, second_hidden_size)
self.actor_body = nn.Linear(third, third)
self.actor_head = nn.Linear(third, action_size)
self.critic_body = nn.Linear(third, third)
self.critic_head = nn.Linear(third, 1)
self.policy_body = nn.Linear(second_hidden_size, third)
self.policy_head = nn.Linear(third, third)
init_layers = [self.input, self.hidden, self.actor_body, self.critic_body, self.policy_body]
self.init_weights(init_layers)
self.batch_norm = nn.BatchNorm1d(second_hidden_size)
self.batch_norm_input = nn.BatchNorm1d(hidden_size)
self.alpha = nn.Linear(third, 2, bias=False)
self.beta = nn.Linear(third, 2, bias=False)
self.alpha.weight.data.mul_(0.125)
self.beta.weight.data.mul_(0.125)
self.std = nn.Parameter(torch.zeros(2))
self.state_size = state_size
device = 'cuda:0'
self.to(device)
summary(self, (1, self.input_size))
def init_weights(self, layers):
for layer in layers:
nn.init.kaiming_normal_(layer.weight)
layer.bias.data.mul_(0.1)
def forward(self, state, action=None):
x = state.view((- 1), self.input_size)
x = F.leaky_relu(self.batch_norm_input(self.input(x)))
x = F.leaky_relu(self.batch_norm(self.hidden(x)))
x = F.leaky_relu(self.policy_body(x))
act_x = F.tanh(self.actor_body(x))
mean = F.tanh(self.actor_head(act_x))
alpha = (F.softplus(self.alpha(act_x)) + 1)
beta = (F.softplus(self.beta(act_x)) + 1)
policy_dist = torch.distributions.Beta(alpha, beta)
if (action is None):
action = policy_dist.sample()
log_prob = policy_dist.log_prob(action).sum((- 1)).unsqueeze((- 1))
entropy = policy_dist.entropy().sum((- 1)).unsqueeze((- 1))
critic_x = F.leaky_relu(self.critic_body(x))
value = self.critic_head(critic_x)
return {'a': action, 'log_pi_a': log_prob, 'ent': entropy, 'mean': mean, 'v': value}
|
class Storage():
def __init__(self, size, keys=None):
if (keys is None):
keys = []
keys = (keys + ['s', 'a', 'r', 'm', 'v', 'q', 'pi', 'log_pi', 'ent', 'adv', 'ret', 'q_a', 'log_pi_a', 'mean'])
self.keys = keys
self.size = size
self.reset()
def add(self, data):
for (k, v) in data.items():
assert (k in self.keys)
getattr(self, k).append(v)
def placeholder(self):
for k in self.keys:
v = getattr(self, k)
if (len(v) == 0):
setattr(self, k, ([None] * self.size))
def reset(self):
for key in self.keys:
setattr(self, key, [])
def cat(self, keys):
data = [getattr(self, k)[:self.size] for k in keys]
return map((lambda x: torch.cat(x, dim=0)), data)
|
def random_sample(indices, batch_size):
indices = np.asarray(np.random.permutation(indices))
batches = indices[:((len(indices) // batch_size) * batch_size)].reshape((- 1), batch_size)
for batch in batches:
(yield batch)
r = (len(indices) % batch_size)
if r:
(yield indices[(- r):])
|
def tensor(x):
if isinstance(x, torch.Tensor):
return x
x = torch.tensor(x, device=Config.DEVICE, dtype=torch.float32)
return x
|
def skip_submodules(app, what, name, obj, skip, options):
return (name.endswith('__init__') or name.startswith('diart.console') or name.startswith('diart.argdoc'))
|
def setup(sphinx):
sphinx.connect('autoapi-skip-member', skip_submodules)
|
class AudioLoader():
def __init__(self, sample_rate: int, mono: bool=True):
self.sample_rate = sample_rate
self.mono = mono
def load(self, filepath: FilePath) -> torch.Tensor:
'Load an audio file into a torch.Tensor.\n\n Parameters\n ----------\n filepath : FilePath\n Path to an audio file\n\n Returns\n -------\n waveform : torch.Tensor, shape (channels, samples)\n '
(waveform, sample_rate) = torchaudio.load(filepath)
if (self.mono and (waveform.shape[0] > 1)):
waveform = waveform.mean(dim=0, keepdim=True)
if (self.sample_rate != sample_rate):
waveform = resample(waveform, sample_rate, self.sample_rate)
return waveform
@staticmethod
def get_duration(filepath: FilePath) -> float:
'Get audio file duration in seconds.\n\n Parameters\n ----------\n filepath : FilePath\n Path to an audio file.\n\n Returns\n -------\n duration : float\n Duration in seconds.\n '
info = torchaudio.info(filepath)
return (info.num_frames / info.sample_rate)
|
class AggregationStrategy(ABC):
'Abstract class representing a strategy to aggregate overlapping buffers\n\n Parameters\n ----------\n cropping_mode: ("strict", "loose", "center"), optional\n Defines the mode to crop buffer chunks as in pyannote.core.\n See https://pyannote.github.io/pyannote-core/reference.html#pyannote.core.SlidingWindowFeature.crop\n Defaults to "loose".\n '
def __init__(self, cropping_mode: Literal[('strict', 'loose', 'center')]='loose'):
assert (cropping_mode in ['strict', 'loose', 'center']), f'Invalid cropping mode `{cropping_mode}`'
self.cropping_mode = cropping_mode
@staticmethod
def build(name: Literal[('mean', 'hamming', 'first')], cropping_mode: Literal[('strict', 'loose', 'center')]='loose') -> 'AggregationStrategy':
'Build an AggregationStrategy instance based on its name'
assert (name in ('mean', 'hamming', 'first'))
if (name == 'mean'):
return AverageStrategy(cropping_mode)
elif (name == 'hamming'):
return HammingWeightedAverageStrategy(cropping_mode)
else:
return FirstOnlyStrategy(cropping_mode)
def __call__(self, buffers: List[SlidingWindowFeature], focus: Segment) -> SlidingWindowFeature:
'Aggregate chunks over a specific region.\n\n Parameters\n ----------\n buffers: list of SlidingWindowFeature, shapes (frames, speakers)\n Buffers to aggregate\n focus: Segment\n Region to aggregate that is shared among the buffers\n\n Returns\n -------\n aggregation: SlidingWindowFeature, shape (cropped_frames, speakers)\n Aggregated values over the focus region\n '
aggregation = self.aggregate(buffers, focus)
resolution = (focus.duration / aggregation.shape[0])
resolution = SlidingWindow(start=focus.start, duration=resolution, step=resolution)
return SlidingWindowFeature(aggregation, resolution)
@abstractmethod
def aggregate(self, buffers: List[SlidingWindowFeature], focus: Segment) -> np.ndarray:
pass
|
class HammingWeightedAverageStrategy(AggregationStrategy):
'Compute the average weighted by the corresponding Hamming-window aligned to each buffer'
def aggregate(self, buffers: List[SlidingWindowFeature], focus: Segment) -> np.ndarray:
(num_frames, num_speakers) = buffers[0].data.shape
(hamming, intersection) = ([], [])
for buffer in buffers:
b = buffer.crop(focus, mode=self.cropping_mode, fixed=focus.duration)
h = np.expand_dims(np.hamming(num_frames), axis=(- 1))
h = SlidingWindowFeature(h, buffer.sliding_window)
h = h.crop(focus, mode=self.cropping_mode, fixed=focus.duration)
hamming.append(h.data)
intersection.append(b.data)
(hamming, intersection) = (np.stack(hamming), np.stack(intersection))
return (np.sum((hamming * intersection), axis=0) / np.sum(hamming, axis=0))
|
class AverageStrategy(AggregationStrategy):
'Compute a simple average over the focus region'
def aggregate(self, buffers: List[SlidingWindowFeature], focus: Segment) -> np.ndarray:
intersection = np.stack([buffer.crop(focus, mode=self.cropping_mode, fixed=focus.duration) for buffer in buffers])
return np.mean(intersection, axis=0)
|
class FirstOnlyStrategy(AggregationStrategy):
'Instead of aggregating, keep the first focus region in the buffer list'
def aggregate(self, buffers: List[SlidingWindowFeature], focus: Segment) -> np.ndarray:
return buffers[0].crop(focus, mode=self.cropping_mode, fixed=focus.duration)
|
class DelayedAggregation():
'Aggregate aligned overlapping windows of the same duration\n across sliding buffers with a specific step and latency.\n\n Parameters\n ----------\n step: float\n Shift between two consecutive buffers, in seconds.\n latency: float, optional\n Desired latency, in seconds. Defaults to step.\n The higher the latency, the more overlapping windows to aggregate.\n strategy: ("mean", "hamming", "first"), optional\n Specifies how to aggregate overlapping windows. Defaults to "hamming".\n "mean": simple average\n "hamming": average weighted by the Hamming window values (aligned to the buffer)\n "first": no aggregation, pick the first overlapping window\n cropping_mode: ("strict", "loose", "center"), optional\n Defines the mode to crop buffer chunks as in pyannote.core.\n See https://pyannote.github.io/pyannote-core/reference.html#pyannote.core.SlidingWindowFeature.crop\n Defaults to "loose".\n\n Example\n --------\n >>> duration = 5\n >>> frames = 500\n >>> step = 0.5\n >>> speakers = 2\n >>> start_time = 10\n >>> resolution = duration / frames\n >>> dagg = DelayedAggregation(step=step, latency=2, strategy="mean")\n >>> buffers = [\n >>> SlidingWindowFeature(\n >>> np.random.rand(frames, speakers),\n >>> SlidingWindow(start=(i + start_time) * step, duration=resolution, step=resolution)\n >>> )\n >>> for i in range(dagg.num_overlapping_windows)\n >>> ]\n >>> dagg.num_overlapping_windows\n ... 4\n >>> dagg(buffers).data.shape\n ... (51, 2) # Rounding errors are possible when cropping the buffers\n '
def __init__(self, step: float, latency: Optional[float]=None, strategy: Literal[('mean', 'hamming', 'first')]='hamming', cropping_mode: Literal[('strict', 'loose', 'center')]='loose'):
self.step = step
self.latency = latency
self.strategy = strategy
assert (cropping_mode in ['strict', 'loose', 'center']), f'Invalid cropping mode `{cropping_mode}`'
self.cropping_mode = cropping_mode
if (self.latency is None):
self.latency = self.step
assert (self.step <= self.latency), 'Invalid latency requested'
self.num_overlapping_windows = int(round((self.latency / self.step)))
self.aggregate = AggregationStrategy.build(self.strategy, self.cropping_mode)
def _prepend(self, output_window: SlidingWindowFeature, output_region: Segment, buffers: List[SlidingWindowFeature]):
last_buffer = buffers[(- 1)].extent
if ((len(buffers) == 1) and (last_buffer.start == 0)):
num_frames = output_window.data.shape[0]
first_region = Segment(0, output_region.end)
first_output = buffers[0].crop(first_region, mode=self.cropping_mode, fixed=first_region.duration)
first_output[(- num_frames):] = output_window.data
resolution = (output_region.end / first_output.shape[0])
output_window = SlidingWindowFeature(first_output, SlidingWindow(start=0, duration=resolution, step=resolution))
return output_window
def __call__(self, buffers: List[SlidingWindowFeature]) -> SlidingWindowFeature:
start = (buffers[(- 1)].extent.end - self.latency)
region = Segment(start, (start + self.step))
return self._prepend(self.aggregate(buffers, region), region, buffers)
|
@dataclass
class HyperParameter():
'Represents a pipeline hyper-parameter that can be tuned by diart'
name: Text
'Name of the hyper-parameter (e.g. tau_active)'
low: float
'Lowest value that this parameter can take'
high: float
'Highest value that this parameter can take'
@staticmethod
def from_name(name: Text) -> 'HyperParameter':
'Create a HyperParameter object given its name.\n\n Parameters\n ----------\n name: str\n Name of the hyper-parameter\n\n Returns\n -------\n HyperParameter\n '
if (name == 'tau_active'):
return TauActive
if (name == 'rho_update'):
return RhoUpdate
if (name == 'delta_new'):
return DeltaNew
raise ValueError(f"Hyper-parameter '{name}' not recognized")
|
class PipelineConfig(ABC):
'Configuration containing the required\n parameters to build and run a pipeline'
@property
@abstractmethod
def duration(self) -> float:
'The duration of an input audio chunk (in seconds)'
pass
@property
@abstractmethod
def step(self) -> float:
'The step between two consecutive input audio chunks (in seconds)'
pass
@property
@abstractmethod
def latency(self) -> float:
'The algorithmic latency of the pipeline (in seconds).\n At time `t` of the audio stream, the pipeline will\n output predictions for time `t - latency`.\n '
pass
@property
@abstractmethod
def sample_rate(self) -> int:
'The sample rate of the input audio stream'
pass
def get_file_padding(self, filepath: FilePath) -> Tuple[(float, float)]:
file_duration = AudioLoader(self.sample_rate, mono=True).get_duration(filepath)
right = utils.get_padding_right(self.latency, self.step)
left = utils.get_padding_left((file_duration + right), self.duration)
return (left, right)
|
class Pipeline(ABC):
'Represents a streaming audio pipeline'
@staticmethod
@abstractmethod
def get_config_class() -> type:
pass
@staticmethod
@abstractmethod
def suggest_metric() -> BaseMetric:
pass
@staticmethod
@abstractmethod
def hyper_parameters() -> Sequence[HyperParameter]:
pass
@property
@abstractmethod
def config(self) -> PipelineConfig:
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def set_timestamp_shift(self, shift: float):
pass
@abstractmethod
def __call__(self, waveforms: Sequence[SlidingWindowFeature]) -> Sequence[Tuple[(Any, SlidingWindowFeature)]]:
'Runs the next steps of the pipeline\n given a list of consecutive audio chunks.\n\n Parameters\n ----------\n waveforms: Sequence[SlidingWindowFeature]\n Consecutive chunk waveforms for the pipeline to ingest\n\n Returns\n -------\n Sequence[Tuple[Any, SlidingWindowFeature]]\n For each input waveform, a tuple containing\n the pipeline output and its respective audio\n '
pass
|
class SpeakerDiarizationConfig(base.PipelineConfig):
def __init__(self, segmentation: (m.SegmentationModel | None)=None, embedding: (m.EmbeddingModel | None)=None, duration: float=5, step: float=0.5, latency: ((float | Literal[('max', 'min')]) | None)=None, tau_active: float=0.6, rho_update: float=0.3, delta_new: float=1, gamma: float=3, beta: float=10, max_speakers: int=20, normalize_embedding_weights: bool=False, device: (torch.device | None)=None, sample_rate: int=16000, **kwargs):
self.segmentation = (segmentation or m.SegmentationModel.from_pyannote('pyannote/segmentation'))
self.embedding = (embedding or m.EmbeddingModel.from_pyannote('pyannote/embedding'))
self._duration = duration
self._sample_rate = sample_rate
self._step = step
self._latency = latency
if ((self._latency is None) or (self._latency == 'min')):
self._latency = self._step
elif (self._latency == 'max'):
self._latency = self._duration
self.tau_active = tau_active
self.rho_update = rho_update
self.delta_new = delta_new
self.gamma = gamma
self.beta = beta
self.max_speakers = max_speakers
self.normalize_embedding_weights = normalize_embedding_weights
self.device = (device or torch.device(('cuda' if torch.cuda.is_available() else 'cpu')))
@property
def duration(self) -> float:
return self._duration
@property
def step(self) -> float:
return self._step
@property
def latency(self) -> float:
return self._latency
@property
def sample_rate(self) -> int:
return self._sample_rate
|
class SpeakerDiarization(base.Pipeline):
def __init__(self, config: (SpeakerDiarizationConfig | None)=None):
self._config = (SpeakerDiarizationConfig() if (config is None) else config)
msg = f'Latency should be in the range [{self._config.step}, {self._config.duration}]'
assert (self._config.step <= self._config.latency <= self._config.duration), msg
self.segmentation = SpeakerSegmentation(self._config.segmentation, self._config.device)
self.embedding = OverlapAwareSpeakerEmbedding(self._config.embedding, self._config.gamma, self._config.beta, norm=1, normalize_weights=self._config.normalize_embedding_weights, device=self._config.device)
self.pred_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='hamming', cropping_mode='loose')
self.audio_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='first', cropping_mode='center')
self.binarize = Binarize(self._config.tau_active)
self.timestamp_shift = 0
self.clustering = None
(self.chunk_buffer, self.pred_buffer) = ([], [])
self.reset()
@staticmethod
def get_config_class() -> type:
return SpeakerDiarizationConfig
@staticmethod
def suggest_metric() -> BaseMetric:
return DiarizationErrorRate(collar=0, skip_overlap=False)
@staticmethod
def hyper_parameters() -> Sequence[base.HyperParameter]:
return [base.TauActive, base.RhoUpdate, base.DeltaNew]
@property
def config(self) -> SpeakerDiarizationConfig:
return self._config
def set_timestamp_shift(self, shift: float):
self.timestamp_shift = shift
def reset(self):
self.set_timestamp_shift(0)
self.clustering = OnlineSpeakerClustering(self.config.tau_active, self.config.rho_update, self.config.delta_new, 'cosine', self.config.max_speakers)
(self.chunk_buffer, self.pred_buffer) = ([], [])
def __call__(self, waveforms: Sequence[SlidingWindowFeature]) -> Sequence[tuple[(Annotation, SlidingWindowFeature)]]:
'Diarize the next audio chunks of an audio stream.\n\n Parameters\n ----------\n waveforms: Sequence[SlidingWindowFeature]\n A sequence of consecutive audio chunks from an audio stream.\n\n Returns\n -------\n Sequence[tuple[Annotation, SlidingWindowFeature]]\n Speaker diarization of each chunk alongside their corresponding audio.\n '
batch_size = len(waveforms)
msg = 'Pipeline expected at least 1 input'
assert (batch_size >= 1), msg
batch = torch.stack([torch.from_numpy(w.data) for w in waveforms])
expected_num_samples = int(np.rint((self.config.duration * self.config.sample_rate)))
msg = f'Expected {expected_num_samples} samples per chunk, but got {batch.shape[1]}'
assert (batch.shape[1] == expected_num_samples), msg
segmentations = self.segmentation(batch)
embeddings = self.embedding(batch, segmentations)
seg_resolution = (waveforms[0].extent.duration / segmentations.shape[1])
outputs = []
for (wav, seg, emb) in zip(waveforms, segmentations, embeddings):
sw = SlidingWindow(start=wav.extent.start, duration=seg_resolution, step=seg_resolution)
seg = SlidingWindowFeature(seg.cpu().numpy(), sw)
permuted_seg = self.clustering(seg, emb)
self.chunk_buffer.append(wav)
self.pred_buffer.append(permuted_seg)
agg_waveform = self.audio_aggregation(self.chunk_buffer)
agg_prediction = self.pred_aggregation(self.pred_buffer)
agg_prediction = self.binarize(agg_prediction)
if (self.timestamp_shift != 0):
shifted_agg_prediction = Annotation(agg_prediction.uri)
for (segment, track, speaker) in agg_prediction.itertracks(yield_label=True):
new_segment = Segment((segment.start + self.timestamp_shift), (segment.end + self.timestamp_shift))
shifted_agg_prediction[(new_segment, track)] = speaker
agg_prediction = shifted_agg_prediction
outputs.append((agg_prediction, agg_waveform))
if (len(self.chunk_buffer) == self.pred_aggregation.num_overlapping_windows):
self.chunk_buffer = self.chunk_buffer[1:]
self.pred_buffer = self.pred_buffer[1:]
return outputs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.