code
stringlengths
17
6.64M
class DenseL1Error(nn.Module): 'Dense L1 loss averaged over channels.' def forward(self, pred: ty.T, target: ty.T) -> ty.T: return (pred - target).abs().mean(dim=1, keepdim=True)
class DenseL2Error(nn.Module): 'Dense L2 distance.' def forward(self, pred: ty.T, target: ty.T) -> ty.T: return (pred - target).pow(2).sum(dim=1, keepdim=True).clamp(min=ops.eps(pred)).sqrt()
class SSIMError(nn.Module): 'Structural similarity error.' def __init__(self): super().__init__() self.pool: nn.Module = nn.AvgPool2d(kernel_size=3, stride=1) self.refl: nn.Module = nn.ReflectionPad2d(padding=1) self.eps1: float = (0.01 ** 2) self.eps2: float = (0.03 ** 2) def forward(self, pred: ty.T, target: ty.T) -> ty.T: 'Compute the structural similarity error between two images.\n\n :param pred: (Tensor) (b, c, h, w) Predicted reconstructed images.\n :param target: (Tensor) (b, c, h, w) Target images to reconstruct.\n :return: (Tensor) (b, c, h, w) Structural similarity error.\n ' (x, y) = (self.refl(pred), self.refl(target)) (mu_x, mu_y) = (self.pool(x), self.pool(y)) sig_x = (self.pool((x ** 2)) - (mu_x ** 2)) sig_y = (self.pool((y ** 2)) - (mu_y ** 2)) sig_xy = (self.pool((x * y)) - (mu_x * mu_y)) num = ((((2 * mu_x) * mu_y) + self.eps1) * ((2 * sig_xy) + self.eps2)) den = ((((mu_x ** 2) + (mu_y ** 2)) + self.eps1) * ((sig_x + sig_y) + self.eps2)) loss = ((1 - (num / den)) / 2).clamp(min=0, max=1) return loss
class PhotoError(nn.Module): 'Class for computing the photometric error.\n From Monodepth (https://arxiv.org/abs/1609.03677)\n\n The SSIMLoss can be deactivated by setting `weight_ssim=0`.\n The L1Loss can be deactivated by setting `weight_ssim=1`.\n Otherwise, the loss is a weighted combination of both.\n\n Attributes:\n :param weight_ssim: (float) Weight controlling the contribution of the SSIMLoss. L1 weight is `1 - ssim_weight`.\n ' def __init__(self, weight_ssim: float=0.85): super().__init__() if (not (0 <= weight_ssim <= 1)): raise ValueError(f'Invalid SSIM weight. ({weight_ssim} vs. [0, 1])') self.weight_ssim: float = weight_ssim self.weight_l1: float = (1 - self.weight_ssim) self.ssim: Optional[nn.Module] = (SSIMError() if (self.weight_ssim > 0) else None) self.l1: Optional[nn.Module] = (DenseL1Error() if (self.weight_l1 > 0) else None) def forward(self, pred: ty.T, target: ty.T) -> ty.T: 'Compute the photometric error between two images.\n\n :param pred: (Tensor) (b, c, h, w) Predicted reconstructed images.\n :param target: (Tensor) (b, c, h, w) Target images to reconstruct.\n :return: (Tensor) (b, 1, h, w) Photometric error.\n ' (b, _, h, w) = pred.shape loss = pred.new_zeros((b, 1, h, w)) if self.ssim: loss += (self.weight_ssim * self.ssim(pred, target).mean(dim=1, keepdim=True)) if self.l1: loss += (self.weight_l1 * self.l1(pred, target)) return loss
@register(('img_recon', 'feat_recon', 'autoenc_recon')) class ReconstructionLoss(nn.Module): "Class to compute the reconstruction loss when synthesising new views.\n\n Contributions:\n - Min reconstruction error: From Monodepth2 (https://arxiv.org/abs/1806.01260)\n - Static pixel automasking: From Monodepth2 (https://arxiv.org/abs/1806.01260)\n - Explainability mask: From SfM-Learner (https://arxiv.org/abs/1704.07813)\n - Uncertainty mask: From Klodt (https://openaccess.thecvf.com/content_ECCV_2018/papers/Maria_Klodt_Supervising_the_new_ECCV_2018_paper.pdf)\n\n :param loss_name: (str) Loss type to use.\n :param use_min: (bool) If `True`, take the final loss as the minimum across all available views.\n :param use_automask: (bool) If `True`, mask pixels where the original support image has a lower loss than the warped counterpart.\n :param mask_name: (None|str) Weighting mask used. {'explainability', 'uncertainty', None}\n " def __init__(self, loss_name: str='ssim', use_min: bool=False, use_automask: bool=False, mask_name: ty.U[str]=None): super().__init__() self.loss_name = loss_name self.use_min = use_min self.use_automask = use_automask self.mask_name = mask_name if (self.mask_name not in {'explainability', 'uncertainty', None}): raise ValueError(f'Invalid mask type: {self.mask_name}') self._photo = {'ssim': PhotoError(weight_ssim=0.85), 'l1': DenseL1Error(), 'l2': DenseL2Error()}[self.loss_name] self._reduce = ((lambda x: x.min(dim=1, keepdim=True)[0]) if self.use_min else (lambda x: x.mean(dim=1, keepdim=True))) def apply_mask(self, err: ty.T, mask: ty.N[ty.T]=None) -> ty.T: 'Apply a weighting mask to a photometric loss error.\n\n :param err: (Tensor) (b, n, h, w) Photometric error to mask.\n :param mask: (None|ty.T) (b, n, h, w) Optional weighting mask to apply.\n :return: (Tensor) (b, n, h, w) The weighted photometric error.\n ' if (self.mask_name and (mask is None)): raise ValueError("Must provide a 'mask' when masking...") if (self.mask_name == 'explainability'): err *= mask elif (self.mask_name == 'uncertainty'): err = ((err * (- mask).exp()) + mask) return err def apply_automask(self, err: ty.T, source: ty.T, target: ty.T, mask: ty.N[ty.T]=None) -> tuple[(ty.T, ty.T)]: 'Compute and apply an automask based on the identity reconstruction error.\n\n :param err: (Tensor) (b, 1, h, w) The photometric error between target and warped support frames.\n :param target: (Tensor) (b, 3, h, w) Target image to reconstruct.\n :param source: (None|Tensor) (*n, b, 3, h, w) Original support images.\n :param mask: (None|Tensor) (b, n, h, w) Optional weighting mask for the photometric error.\n :return: (\n err: (Tensor) (b, 1, h, w) The automasked photometric error.\n automask: (Tensor) (b, 1, h, w) Boolean mask indicating valid pixels after automasking.\n )\n ' err_static = self.compute_photo(source, target, mask=mask) err_static += (ops.eps(err_static) * torch.randn_like(err_static)) err = torch.cat((err, err_static), dim=1) (err, idxs) = torch.min(err, dim=1, keepdim=True) automask = (idxs == 0) return (err, automask) def compute_photo(self, pred: ty.T, target: ty.T, mask: ty.N[ty.T]=None) -> ty.T: 'Compute the dense photometric between multiple predictions and a single target.\n\n :param pred: (Tensor) (*n, b, 3, h, w) Synthesized warped support images.\n :param target: (Tensor) (b, 3, h, w) Target image to reconstruct.\n :param mask: (None|Tensor) (b, n, h, w) Optional weighting mask for the photometric error.\n :return: (Tensor) (b, 1, h, w) The reduced photometric error.\n ' if (pred.ndim == 4): err = self._photo(pred, target) else: target = target[None].expand_as(pred) err = self._photo(pred.flatten(0, 1), target.flatten(0, 1)) err = err.squeeze(1).unflatten(0, pred.shape[:2]).permute(1, 0, 2, 3) err = self.apply_mask(err, mask) err = self._reduce(err) return err def forward(self, pred: ty.T, target: ty.T, source: ty.N[ty.T]=None, mask: ty.N[ty.T]=None) -> ty.LossData: 'Compute the reconstruction loss between two images.\n\n :param pred: (Tensor) (*n, b, 3, h, w) Synthesized warped support images.\n :param target: (Tensor) (b, 3, h, w) Target image to reconstruct.\n :param source: (None|Tensor) (*n, b, 3, h, w) Original support images.\n :param mask: (None|Tensor) (b, n, h, w) Optional weighting mask for the photometric error.\n :return: (\n loss: (Tensor) (,) Scalar loss.\n loss_dict: {\n (Optional) (If using automasking)\n automask: (Tensor) (b, 1, h, w) Boolean mask indicating valid pixels after automasking.\n }\n )\n ' ld = {} err = self.compute_photo(pred, target, mask) if self.use_automask: if (source is None): raise ValueError("Must provide the original 'source' images when automasking...") (err, automask) = self.apply_automask(err, source, target, mask) ld['automask'] = automask loss = err.mean() return (loss, ld)
def l1_loss(pred: ty.T, target: ty.T) -> ty.T: 'Dense L1 loss.' loss = (pred - target).abs() return loss
def log_l1_loss(pred: ty.T, target: ty.T) -> ty.T: 'Dense Log L1 loss.' loss = (1 + l1_loss(pred, target)).log() return loss
def berhu_loss(pred: ty.T, target: ty.T, delta: float=0.2, dynamic: bool=True) -> ty.T: 'Dense berHu loss.\n\n :param pred: (Tensor) (*) Network prediction.\n :param target: (Tensor) (*) Ground-truth target.\n :param delta: (float) Threshold above which the loss switches from L1.\n :param dynamic: (bool) If `True`, set threshold dynamically, using `delta` as the max percentage error.\n :return: (Tensor) (*) The computed `berhu` loss.\n ' diff = l1_loss(pred, target) delta = (delta if (not dynamic) else (delta * diff.max())) diff_delta = ((diff.pow(2) + delta.pow(2)) / ((2 * delta) + ops.eps(pred))) loss = torch.where((diff <= delta), diff, diff_delta) return loss
@register(('depth_regr', 'stereo_const')) class RegressionLoss(nn.Module): 'Class implementing a supervised regression loss.\n\n NOTE: The DepthHints automask is not computed here. Instead, we rely on the `MonoDepthModule` to compute it.\n Probably not the best way of doing it, but it keeps this loss clean...\n\n Contributions:\n - Virtual stereo consistency: From Monodepth (https://arxiv.org/abs/1609.03677)\n - Proxy berHu regression: From Kuznietsov (https://arxiv.org/abs/1702.02706)\n - Proxy LogL1 regression: From Depth Hints (https://arxiv.org/abs/1909.09051)\n - Proxy loss automasking: From Depth Hints/Monodepth2 (https://arxiv.org/abs/1909.09051)\n\n :param loss_name: (str) Loss type to use. {l1, log_l1, berhu}\n :param invert: (bool) If `True`, convert depth inputs into disparity.\n :param use_automask: (bool) If `True`, use DepthHints automask based on the pred/hints errors.\n ' def __init__(self, loss_name: str='berhu', invert: bool=False, use_automask: bool=False): super().__init__() self.loss_name = loss_name self.use_automask = use_automask self.invert = invert self.criterion = {'l1': l1_loss, 'log_l1': log_l1_loss, 'berhu': berhu_loss}[self.loss_name] def forward(self, pred: ty.T, target: ty.T, mask: ty.N[ty.T]=None) -> ty.LossData: if self.invert: (pred, target) = (to_inv(pred), to_inv(target)) if (mask is None): mask = torch.ones_like(target) err = (mask * self.criterion(pred, target)) loss = (err.sum() / mask.sum()) return (loss, {'err_regr': err, 'mask_regr': mask})
@register('autoencoder') class AutoencoderNet(nn.Module): "Image autoencoder network. From FeatDepth (https://arxiv.org/abs/2007.10603).\n\n Heavily based on the Depth network with some changes:\n - Does not accept DPT encoders\n - Single decoder\n - Produces 3 sigmoid channels (RGB)\n - No skip connections, it's an autoencoder!\n\n :param enc_name: (str) `timm` encoder key (check `timm.list_models()`).\n :param pretrained: (bool) If `True`, load an encoder pretrained on ImageNet.\n :param dec_name: (str) Custom decoder class to use.\n :param out_scales: (int|list[int]) Multi-scale outputs to return as `2**s`.\n " def __init__(self, enc_name: str='resnet18', pretrained: bool=True, dec_name: str='monodepth', out_scales: ty.U[(int, ty.S[int])]=(0, 1, 2, 3)): super().__init__() self.enc_name = enc_name self.pretrained = pretrained self.dec_name = dec_name self.out_scales = ([out_scales] if isinstance(out_scales, int) else out_scales) if (self.dec_name not in DEC_REG): raise KeyError(f'Invalid decoder key. ({self.dec_name} vs. {DEC_REG.keys()}') self.encoder = timm.create_model(self.enc_name, features_only=True, pretrained=pretrained) self.num_ch_enc = self.encoder.feature_info.channels() self.enc_sc = self.encoder.feature_info.reduction() self.decoder = DEC_REG[self.dec_name](num_ch_enc=self.num_ch_enc, enc_sc=self.enc_sc, upsample_mode='nearest', use_skip=False, out_sc=self.out_scales, out_ch=3, out_act='sigmoid') def forward(self, x: ty.T) -> ty.AutoencoderPred: 'Run image autoencoder forward pass.\n\n :param x: (Tensor) (b, 3, h, w) Input image.\n :return: {\n autoenc_feats: (list[Tensor]) Autoencoder encoder multi-scale features.\n autoenc_imgs: (TensorDict) (b, 3, h/2**s, w/2**s) Multi-scale image reconstructions.\n }\n ' feat = self.encoder(x) out = {'autoenc_feats': feat, 'autoenc_imgs': sort_dict(self.decoder(feat))} return out
class StructurePerception(nn.Module): 'Self-attention Structure Perception Module.' def forward(self, x: ty.T) -> ty.T: (b, c, h, w) = x.shape v = x.view(b, c, (- 1)) (q, k) = (v, v.permute(0, 2, 1)) att = (q @ k) att = (att.max(dim=(- 1), keepdim=True)[0] - att) out = (att.softmax(dim=(- 1)) @ v) out = (x + out.view(b, c, h, w)) return out
class DetailEmphasis(nn.Module): 'Detail Emphasis Module.' def __init__(self, ch: int): super().__init__() self.conv = nn.Sequential(conv3x3(ch, ch), nn.BatchNorm2d(ch), nn.ReLU(inplace=True)) self.att = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(ch, ch, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True), nn.Conv2d(ch, ch, kernel_size=1, stride=1, padding=0), nn.Sigmoid()) def forward(self, x: ty.T) -> ty.T: x = self.conv(x) x = (x + (x * self.att(x))) return x
@register('cadepth') class CaDepthDecoder(nn.Module): "From CADepth (https://arxiv.org/abs/2112.13047)\n\n :param num_ch_enc: (list[int]) List of channels per encoder stage.\n :param enc_sc: (list[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (list[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: ty.S[int], enc_sc: ty.S[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: ty.S[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = OrderedDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = conv_block(num_ch_in, num_ch_out) num_ch_in = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) self.convs[f'detail_emphasis_{i}'] = DetailEmphasis(num_ch_in) for i in self.out_sc: self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], self.out_ch) self.structure_perception = StructurePerception() self.decoder = nn.ModuleList(list(self.convs.values())) self.activation = ACT[self.out_act] def forward(self, feat: ty.S[ty.T]) -> dict[(int, ty.T)]: out = {} x = self.structure_perception(feat[(- 1)]) for i in range(4, (- 1), (- 1)): x = self.convs[f'upconv_{i}_{0}'](x) x = [F.interpolate(x, scale_factor=2, mode=self.upsample_mode)] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) x += [feat[idx]] x = torch.cat(x, 1) x = self.convs[f'detail_emphasis_{i}'](x) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): out[i] = self.activation(self.convs[f'outconv_{i}'](x)) return out
def get_discrete_bins(n: int, mode: str='linear') -> ty.T: 'Get the discretized disparity value depending on number of bins and quantization mode.\n\n All modes assume that we are quantizing sigmoid disparity, and therefore are in range [0, 1].\n Quantization modes:\n - linear: Evenly spaces out all bins.\n - exp: Spaces bins out exponentially, providing finer detail at low disparity values, ie higher depth values.\n\n :param n: (int) Number of bins to use.\n :param mode: (str) Quantization mode. {linear, exp}\n :return: (ty.T) (1, n, 1, 1) Computed discrete disparity bins.\n ' bins = (torch.arange(n) / n) if (mode == 'linear'): pass elif (mode == 'exp'): max_depth = torch.tensor(200) bins = torch.exp((torch.log(max_depth) * (bins - 1))) else: raise ValueError(f"Invalid discretization mode. '{mode}'") return bins.view(1, n, 1, 1)
class SelfAttentionBlock(nn.Module): 'Self-Attention Block.' def __init__(self, ch: int): super().__init__() self.query_conv = nn.Sequential(nn.Conv2d(ch, ch, kernel_size=1, padding=0), nn.ReLU(inplace=True)) self.key_conv = nn.Sequential(nn.Conv2d(ch, ch, kernel_size=1, padding=0), nn.ReLU(inplace=True)) self.value_conv = nn.Sequential(nn.Conv2d(ch, ch, kernel_size=1, padding=0), nn.ReLU(inplace=True)) def forward(self, x): (b, c, h, w) = x.shape q = self.query_conv(x).flatten((- 2), (- 1)) k = self.key_conv(x).flatten((- 2), (- 1)).permute(0, 2, 1) v = self.value_conv(x).flatten((- 2), (- 1)) att = (q @ k) out = (att.softmax(dim=(- 1)) @ v) out = out.view(b, c, h, w) return out
@register('ddvnet') class DDVNetDecoder(nn.Module): "From DDVNet (https://arxiv.org/abs/2003.13951)\n\n :param num_ch_enc: (list[int]) List of channels per encoder stage.\n :param enc_sc: (list[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (list[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: ty.S[int], enc_sc: ty.S[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: ty.S[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.num_ch_dec = [16, 32, 64, 128, 256] self.num_bins = 128 self.bins = nn.Parameter(get_discrete_bins(self.num_bins, mode='linear'), requires_grad=False) self.convs = OrderedDict() self.convs['att'] = SelfAttentionBlock(self.num_ch_enc[(- 1)]) for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = conv_block(num_ch_in, num_ch_out) num_ch_in = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) for i in self.out_sc: self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], (self.num_bins * self.out_ch)) self.decoder = nn.ModuleList(list(self.convs.values())) self.activation = ACT[self.out_act] self.logits = {} def expected_disparity(self, logits: ty.T) -> ty.T: 'Maps discrete disparity logits into the expected weighted disparity.\n\n :param logits: (ty.T) (b, n, h, w) Raw unnormalized predicted probabilities.\n :return: (ty.T) (b, 1, h, w) Expected disparity map.\n ' probs = logits.softmax(dim=1) disp = (probs * self.bins).sum(dim=1, keepdim=True) return disp def argmax_disparity(self, logits: ty.T) -> ty.T: idx = logits.argmax(dim=1) one_hot = F.one_hot(idx, self.num_bins).permute(0, 3, 1, 2) disp = (one_hot * self.bins).sum(dim=1, keepdim=True) return disp def forward(self, feat: ty.S[ty.T]) -> dict[(int, ty.T)]: out = {} x = self.convs['att'](feat[(- 1)]) for i in range(4, (- 1), (- 1)): x = self.convs[f'upconv_{i}_{0}'](x) x = [F.interpolate(x, scale_factor=2, mode=self.upsample_mode)] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) x += [feat[idx]] x = torch.cat(x, 1) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): logits = self.convs[f'outconv_{i}'](x) self.logits[i] = logits out[i] = torch.cat([self.expected_disparity(l) for l in logits.chunk(self.out_ch, dim=1)], dim=1) return out
def upsample_block(in_ch: int, out_ch: int, upsample_mode: str='nearest') -> nn.Module: 'Layer to upsample the input by a factor of 2 without skip connections.' return nn.Sequential(conv_block(in_ch, out_ch), nn.Upsample(scale_factor=2, mode=upsample_mode), conv_block(out_ch, out_ch))
class ChannelAttention(nn.Module): 'Channel Attention Module incorporating Squeeze & Exicitation.\n\n :param in_ch: (int) Number of input channels.\n :param ratio: (int) Channels reduction ratio in bottleneck.\n ' def __init__(self, in_ch: int, ratio: int=16): super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential(nn.Linear(in_ch, (in_ch // ratio), bias=False), nn.ReLU(inplace=True), nn.Linear((in_ch // ratio), in_ch, bias=False)) self.init_weights() def init_weights(self): 'Kaiming weight initialization.' for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') def forward(self, x): att = self.avg_pool(x) att = self.fc(att.squeeze()).sigmoid() return (x * att[(..., None, None)])
class AttentionBlock(nn.Module): "Attention Block incorporating channel attention.\n\n :param in_ch: (int) Number of input channels.\n :param skip_ch: (int) Number of channels in skip connection features.\n :param out_ch: (None|int) Number of output channels.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n " def __init__(self, in_ch: int, skip_ch: int, out_ch: ty.N[int]=None, upsample_mode: str='nearest'): super().__init__() self.in_ch = (in_ch + skip_ch) self.out_ch = (out_ch or in_ch) self.upsample_mode = upsample_mode self.layers = nn.Sequential(ChannelAttention(self.in_ch), conv3x3(self.in_ch, self.out_ch), nn.ReLU(inplace=True)) def forward(self, x, x_skip): return self.layers(torch.cat((F.interpolate(x, scale_factor=2, mode=self.upsample_mode), x_skip), dim=1))
@register('diffnet') class DiffNetDecoder(nn.Module): "From DiffNet (https://arxiv.org/abs/2110.09482)\n\n :param num_ch_enc: (list[int]) List of channels per encoder stage.\n :param enc_sc: (list[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (list[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: ty.S[int], enc_sc: ty.S[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: ty.S[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = nn.ModuleDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_skip = self.num_ch_enc[idx] self.convs[f'upconv_{i}'] = AttentionBlock(num_ch_in, num_ch_skip, num_ch_out, self.upsample_mode) else: self.convs[f'upconv_{i}'] = upsample_block(num_ch_in, num_ch_out, self.upsample_mode) for i in range(4): self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], self.out_ch) self.decoder = nn.ModuleList(list(self.convs.values())) self.activation = ACT[self.out_act] def forward(self, feat: ty.S[ty.T]) -> dict[(int, ty.T)]: out = {} x = feat[(- 1)] for i in range(4, (- 1), (- 1)): scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) x = self.convs[f'upconv_{i}'](x, feat[idx]) else: x = self.convs[f'upconv_{i}'](x) if (i in self.out_sc): out[i] = self.activation(self.convs[f'outconv_{i}'](x)) return out
class FSEBlock(nn.Module): def __init__(self, in_ch: int, skip_ch: int, out_ch: ty.N[int]=None, upsample_mode: str='nearest'): super().__init__() self.in_ch = (in_ch + skip_ch) self.out_ch = (out_ch or in_ch) self.upsample_mode = upsample_mode self.reduction = 16 self.avg_pool = nn.AdaptiveAvgPool2d(1) self.se = nn.Sequential(nn.Linear(self.in_ch, (self.in_ch // self.reduction), bias=False), nn.ReLU(inplace=True), nn.Linear((self.in_ch // self.reduction), self.in_ch, bias=False)) self.conv = nn.Sequential(conv1x1(self.in_ch, self.out_ch, bias=True), nn.ReLU(inplace=True)) def forward(self, x: ty.T, xs_skip: ty.S[ty.T]) -> ty.T: x = F.interpolate(x, scale_factor=2, mode=self.upsample_mode) x = torch.cat([x, *xs_skip], dim=1) y = self.avg_pool(x).squeeze() y = self.se(y).sigmoid() y = y[(..., None, None)].expand_as(x) x = self.conv((x * y)) return x
@register('hrdepth') class HRDepthDecoder(nn.Module): "From HRDepth (https://arxiv.org/pdf/2012.07356.pdf)\n\n :param num_ch_enc: (ty.S[int]) List of channels per encoder stage.\n :param enc_sc: (ty.S[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param out_sc: (ty.S[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: ty.S[int], enc_sc: ty.S[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: ty.S[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (not self.use_skip): raise ValueError('HRDepth decoder must use skip connections.') if (len(self.enc_sc) == 4): warnings.warn('HRDepth requires 5 scales, but the provided backbone has only 4. The first scale will be duplicated and upsampled!') self.enc_sc = ([(self.enc_sc[0] // 2)] + self.enc_sc) self.num_ch_enc = ([self.num_ch_enc[0]] + self.num_ch_enc) if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.activation = ACT[self.out_act] self.num_ch_dec = [(ch // 2) for ch in self.num_ch_enc[1:]] self.num_ch_dec = ([(self.num_ch_dec[0] // 2)] + self.num_ch_dec) self.all_idx = ['01', '11', '21', '31', '02', '12', '22', '03', '13', '04'] self.att_idx = ['31', '22', '13', '04'] self.non_att_idx = ['01', '11', '21', '02', '12', '03'] self.convs = nn.ModuleDict() for j in range(5): for i in range((5 - j)): ch_in = self.num_ch_enc[i] if ((i == 0) and (j != 0)): ch_in //= 2 if ((i == 0) and (j == 4)): ch_in = (self.num_ch_enc[(i + 1)] // 2) ch_out = (ch_in // 2) self.convs[f'{i}{j}_conv_0'] = conv_block(ch_in, ch_out) if ((i == 0) and (j == 4)): ch_in = ch_out ch_out = self.num_ch_dec[i] self.convs[f'{i}{j}_conv_1'] = conv_block(ch_in, ch_out) for idx in self.att_idx: (row, col) = (int(idx[0]), int(idx[1])) self.convs[f'{idx}_att'] = FSEBlock(in_ch=(self.num_ch_enc[(row + 1)] // 2), skip_ch=(self.num_ch_enc[row] + (self.num_ch_dec[(row + 1)] * (col - 1))), upsample_mode=self.upsample_mode) for idx in self.non_att_idx: (row, col) = (int(idx[0]), int(idx[1])) if (col == 1): self.convs[f'{(row + 1)}{(col - 1)}_conv_1'] = conv_block(in_ch=((self.num_ch_enc[(row + 1)] // 2) + self.num_ch_enc[row]), out_ch=self.num_ch_dec[(row + 1)]) else: self.convs[f'{idx}_down'] = conv1x1(in_ch=(((self.num_ch_enc[(row + 1)] // 2) + self.num_ch_enc[row]) + (self.num_ch_dec[(row + 1)] * (col - 1))), out_ch=(2 * self.num_ch_dec[(row + 1)]), bias=False) self.convs[f'{(row + 1)}{(col - 1)}_conv_1'] = conv_block(in_ch=(2 * self.num_ch_dec[(row + 1)]), out_ch=self.num_ch_dec[(row + 1)]) channels = self.num_ch_dec for (i, c) in enumerate(channels): if (i in self.out_sc): self.convs[f'outconv_{i}'] = nn.Sequential(conv3x3(c, self.out_ch), self.activation) self.decoder = nn.ModuleList(list(self.convs.values())) def nested_conv(self, convs: ty.S[nn.Module], x: ty.T, xs_skip: ty.S[ty.T]) -> ty.T: x = F.interpolate(convs[0](x), scale_factor=2, mode=self.upsample_mode) x = torch.cat([x, *xs_skip], dim=1) if (len(convs) == 3): x = convs[2](x) x = convs[1](x) return x def forward(self, enc_features: ty.S[ty.T]) -> dict[(int, ty.T)]: if (len(enc_features) == 4): enc_features = ([F.interpolate(enc_features[0], scale_factor=2, mode=self.upsample_mode)] + enc_features) feat = {f'{i}0': f for (i, f) in enumerate(enc_features)} for idx in self.all_idx: (row, col) = (int(idx[0]), int(idx[1])) xs_skip = [feat[f'{row}{i}'] for i in range(col)] if (idx in self.att_idx): feat[f'{idx}'] = self.convs[f'{idx}_att'](self.convs[f'{(row + 1)}{(col - 1)}_conv_0'](feat[f'{(row + 1)}{(col - 1)}']), xs_skip) elif (idx in self.non_att_idx): conv = [self.convs[f'{(row + 1)}{(col - 1)}_conv_0'], self.convs[f'{(row + 1)}{(col - 1)}_conv_1']] if (col != 1): conv.append(self.convs[f'{idx}_down']) feat[f'{idx}'] = self.nested_conv(conv, feat[f'{(row + 1)}{(col - 1)}'], xs_skip) x = feat['04'] x = self.convs['04_conv_0'](x) x = self.convs['04_conv_1'](F.interpolate(x, scale_factor=2, mode=self.upsample_mode)) out_feat = [x, feat['04'], feat['13'], feat['22']] out = {i: self.convs[f'outconv_{i}'](f) for (i, f) in enumerate(out_feat) if (i in self.out_sc)} return out
def main(): num_enc_ch = [64, 64, 128, 256, 512] enc_sc = [2, 4, 8, 16, 32] (b, h, w) = (4, 256, 512) enc_features = [torch.rand((b, c, (h // s), (w // s))) for (s, c) in zip(enc_sc, num_enc_ch)] net = HRDepthDecoder(num_ch_enc=num_enc_ch, enc_sc=enc_sc, out_sc=range(4), out_ch=1) out = net(enc_features) [print(key, val.shape) for (key, val) in out.items()]
@register('monodepth') class MonodepthDecoder(nn.Module): "From Monodepth(2) (https://arxiv.org/abs/1806.01260)\n\n Generic convolutional decoder incorporating multi-scale predictions and skip connections.\n\n :param num_ch_enc: (ty.S[int]) List of channels per encoder stage.\n :param enc_sc: (ty.S[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (ty.S[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: ty.S[int], enc_sc: ty.S[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: ty.S[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.act = ACT[self.out_act] self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = OrderedDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = conv_block(num_ch_in, num_ch_out) num_ch_in = self.num_ch_dec[i] sf = (2 ** i) if (self.use_skip and (sf in self.enc_sc)): idx = self.enc_sc.index(sf) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) for i in self.out_sc: self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], self.out_ch) self.decoder = nn.ModuleList(list(self.convs.values())) def forward(self, feat: ty.S[ty.T]) -> dict[(int, ty.T)]: out = {} x = feat[(- 1)] for i in range(4, (- 1), (- 1)): x = self.convs[f'upconv_{i}_{0}'](x) x = [F.interpolate(x, scale_factor=2, mode=self.upsample_mode)] sf = (2 ** i) if (self.use_skip and (sf in self.enc_sc)): idx = self.enc_sc.index(sf) x += [feat[idx]] x = torch.cat(x, 1) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): out[i] = self.act(self.convs[f'outconv_{i}'](x)) return out
class SubPixelConv(nn.Module): def __init__(self, ch_in: int, up_factor: int): super().__init__() ch_out = (ch_in * (up_factor ** 2)) self.conv = nn.Conv2d(ch_in, ch_out, kernel_size=(3, 3), groups=ch_in, padding=1) self.shuffle = nn.PixelShuffle(up_factor) self.init_weights() def init_weights(self): nn.init.zeros_(self.conv.bias) self.conv.weight = nn.Parameter(self.conv.weight[::4].repeat_interleave(4, 0)) def forward(self, x): return self.shuffle(self.conv(x))
@register('superdepth') class SuperdepthDecoder(nn.Module): "From SuperDepth (https://arxiv.org/abs/1806.01260)\n\n Generic convolutional decoder incorporating multi-scale predictions and skip connections.\n\n :param num_ch_enc: (ty.S[int]) List of channels per encoder stage.\n :param enc_sc: (ty.S[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (ty.S[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: ty.S[int], enc_sc: ty.S[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: ty.S[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.activation = ACT[self.out_act] self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = OrderedDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = nn.Sequential(conv_block(num_ch_in, num_ch_out), SubPixelConv(num_ch_out, up_factor=2), nn.ReLU(inplace=True)) num_ch_in = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) for i in self.out_sc: if (i == 0): self.convs[f'outconv_{i}'] = nn.Sequential(conv3x3(self.num_ch_dec[i], self.out_ch), self.activation) else: self.convs[f'outconv_{i}'] = nn.Sequential(conv_block(self.num_ch_dec[i], self.out_ch), SubPixelConv(self.out_ch, up_factor=(2 ** i)), self.activation) self.decoder = nn.ModuleList(list(self.convs.values())) def forward(self, feat: ty.S[ty.T]) -> dict[(int, ty.T)]: out = {} x = feat[(- 1)] for i in range(4, (- 1), (- 1)): x = [self.convs[f'upconv_{i}_{0}'](x)] sf = (2 ** i) if (self.use_skip and (sf in self.enc_sc)): idx = self.enc_sc.index(sf) x += [feat[idx]] x = torch.cat(x, 1) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): out[i] = self.convs[f'outconv_{i}'](x) return out
def _load_roots() -> tuple[(Paths, Paths)]: 'Helper to load the additional model & data roots from the repo config.' file = (REPO_ROOT / 'PATHS.yaml') if (not file.is_file()): warnings.warn(_msg.format(file=file)) return ([], []) paths = io.load_yaml(file) return (io.lmap(Path, paths['MODEL_ROOTS']), io.lmap(Path, paths['DATA_ROOTS']))
def _build_paths(names: ty.StrDict, roots: Paths, key: str='') -> ty.PathDict: 'Helper to build the paths from a list of possible `roots`.\n NOTE: This returns the FIRST found path given by the order of roots. I.e. ordered by priority.\n ' paths = {} for (k, v) in names.items(): try: paths[k] = next((p for r in roots if (p := (r / v)).exists())) logging.debug(f"Found {key} path '{k}': {paths[k]}") except StopIteration: logging.warning(f"No valid {key} path found for '{k}:{v}'!") return paths
def find_model_file(name: str) -> Path: 'Helper to find a model file in the available roots.' if (p := Path(name)).is_file(): return p try: return next((p for r in MODEL_ROOTS if (p := (r / name)).is_file())) except StopIteration: raise FileNotFoundError(f'No valid path found for {name} in {MODEL_ROOTS}...')
def find_data_dir(name: str) -> Path: 'Helper to find a dataset directory in the available roots.' if (p := Path(name)).is_dir(): return p try: return next((p for r in DATA_ROOTS if (p := (r / name)).is_file())) except StopIteration: raise FileNotFoundError(f'No valid path found for {name} in {DATA_ROOTS}...')
def trigger_nets() -> None: 'Trigger adding all networks to the registry.' with Timer(as_ms=True) as t: from src import networks logger.debug(f'Triggered registry networks in {t.elapsed}ms...')
def trigger_datas() -> None: 'Trigger adding all datasets to the registry.' with Timer(as_ms=True) as t: from src import datasets logger.debug(f'Triggered registry datasets in {t.elapsed}ms...')
def trigger_losses() -> None: 'Trigger adding all losses to the registry.' with Timer(as_ms=True) as t: from src import losses, regularizers logger.debug(f'Triggered registry losses in {t.elapsed}ms...')
def trigger_preds() -> None: 'Trigger adding all predictors to the registry.' with Timer(as_ms=True) as t: from src.core import predictors logger.debug(f'Triggered registry predictors in {t.elapsed}ms...')
def trigger_decoders() -> None: 'Trigger adding all predictors to the registry.' with Timer(as_ms=True) as t: from src.networks import decoders logger.debug(f'Triggered registry decoders in {t.elapsed}ms...')
def register(name: ty.U[(str, tuple[str])], type: ty.N[str]=None, overwrite: bool=False) -> CLS: "Class decorator to build a registry of networks, losses & data available during training.\n\n Example:\n ```\n # Register using default naming conventions. See `_NAME2TYPE`.\n @register('my_net')\n class MyNet(nn.Module): ...\n\n # Register to specific type.\n @register('my_loss', type='loss')\n class MyClass(nn.Module): ...\n\n # Register multiple names for the same class.\n @register(('my_dataset1', 'my_dataset2'))\n class MyDataset(Dataset): ...\n ```\n\n :param name: (str|Sequence[str]) Key(s) used to access class in the registry.\n :param type: (None|str) Registry to use. If `None`, guess from class name. {None, net, loss, data, pred}\n :param overwrite: (bool) If `True`, overwrite class `name` in registry `type`.\n :return:\n " def _guess_type(cls: CLS) -> str: 'Helper to identify registry `type` from class name.' try: return next((v for (k, v) in _NAME2TYPE.items() if cls.__name__.endswith(k))) except StopIteration: raise ValueError(f'Class matched no known patterns. ({cls.__name__} vs. {set(_NAME2TYPE)})') def wrapper(cls: CLS) -> CLS: 'Decorator adding `cls` to the specified registry.' if (cls.__module__ == '__main__'): logger.warning(f"Ignoring class '{cls.__name__}' created in the '__main__' module.") return cls ns = ((name,) if isinstance(name, str) else name) t = (type or _guess_type(cls)) if (t not in _REG): raise TypeError(f'Invalid `type`. ({t} vs. {set(_REG)})') reg = _REG[t] for n in ns: if ((not overwrite) and (tgt := reg.get(n))): raise ValueError(f"'{n}' already in '{t}' registry ({tgt} vs. {cls}). Set `overwrite=True` to overwrite.") logger.debug(f"Added '{n}' to the '{t}' registry...") reg[n] = cls return cls return wrapper
@register('disp_mask') class MaskReg(nn.Module): 'Class implementing photometric loss masking regularization.\n From SfM-Learner (https://arxiv.org/abs/1704.07813)\n\n Based on the `explainability` mask, which predicts a weighting factor for each pixel in the photometric loss.\n To avoid the degenerate solution where all pixels are ignored, this regularization pushes all values towards 1\n using binary cross-entropy.\n ' def forward(self, x: ty.T) -> ty.LossData: 'Mask regularization forward pass.\n\n :param x: (Tensor) (*) Input sigmoid explainability mask.\n :return: {\n loss: (Tensor) (,) Computed loss.\n loss_dict: (TensorDict) {}.\n }\n ' loss = F.binary_cross_entropy(x, torch.ones_like(x)) return (loss, {})
@register('disp_occ') class OccReg(nn.Module): 'Class implementing disparity occlusion regularization.\n From DVSO (https://arxiv.org/abs/1807.02570)\n\n This regularization penalizes the overall disparity in the image, encouraging the network to select background\n disparities.\n\n NOTE: In this case we CANNOT apply mean normalization to the input disparity. By definition, this fixes the mean of\n all elements to 1, meaning the loss is impossible to minimize.\n\n NOTE: The benefits of applying this regularization to purely monocular supervision are unclear,\n since the loss could simply be optimized by making all disparities smaller.\n\n :param invert: (bool) If `True`, encourage foreground disparities instead of background.\n ' def __init__(self, invert: bool=False): super().__init__() self.invert = invert self._sign = ((- 1) if self.invert else 1) def forward(self, x: ty.T) -> ty.LossData: 'Occlusion regularization forward pass.\n\n :param x: (Tensor) (*) Input sigmoid disparities.\n :return: {\n loss: (Tensor) (,) Computed loss.\n loss_dict: (TensorDict) {}.\n }\n ' loss = (self._sign * x.mean()) return (loss, {})
def get_device(device: ty.N[ty.U[(str, torch.device)]]=None, /) -> torch.device: 'Create torch device from str or device. Defaults to CUDA if available.' if isinstance(device, torch.device): return device device = (device or ('cuda' if torch.cuda.is_available() else 'cpu')) return torch.device(device)
def get_latest_ckpt(path: PathLike, ignore: ty.S[str]=None, reverse: bool=False, suffix: str='.ckpt') -> ty.N[Path]: 'Return latest or earliest checkpoint in the directory. Assumes files can be sorted in a meaningful way.\n\n :param path: (PathLike) Directory to search in.\n :param ignore: (ty.S[str]) Filenames to ignore, e.g. corrupted?\n :param reverse: (bool) If `True`, return earliest checkpoint.\n :param suffix: (str) Expected checkpoint file extension.\n :return: (Path) Latest checkpoint file or `None`.\n ' path = Path(path) ignore = (ignore or []) if (('last' not in ignore) and (last_file := (path / ('last' + suffix))).is_file()): return last_file files = filter((lambda f: ((f.suffix == suffix) and (f.name not in ignore))), sorted(path.iterdir(), reverse=(not reverse))) try: file = next(files) except StopIteration: file = None return file
def eps(x: ty.N[torch.Tensor]=None, /) -> float: 'Return the `eps` value for the given `input` dtype. (default=float32 ~= 1.19e-7)' dtype = (torch.float32 if (x is None) else x.dtype) return torch.finfo(dtype).eps
def freeze(net: nn.Module, /) -> nn.Module: 'Fix all model parameters and prevent training.' for p in net.parameters(): p.requires_grad = False return net
def unfreeze(net: nn.Module, /) -> nn.Module: 'Make all model parameters trainable.' for p in net.parameters(): p.requires_grad = True return net
def allclose(net1: nn.Module, net2: nn.Module, /) -> bool: 'Check if two networks have the exact same parameters.' for (p1, p2) in zip(net1.parameters(), net2.parameters()): try: if (not p1.allclose(p2)): return False except RuntimeError: return False return True
def num_parameters(net: nn.Module, /, requires_grad: ty.N[bool]=None) -> int: 'Get number of parameters in a network. By default, all parameters are counted.' if (requires_grad is None): key = (lambda p: True) elif requires_grad: key = (lambda p: p.requires_grad) else: key = (lambda p: (not p.requires_grad)) return sum((p.numel() for p in net.parameters() if key(p)))
@map_container def to_torch(x: ty.Any, /, permute: bool=True, device: ty.N[torch.device]=None) -> ty.Any: 'Convert given input to `torch.Tensors`.\n\n :param x: (ty.Any) Arbitrary structure to convert to tensors (see `map_container`).\n :param permute: (bool) If `True`, permute to PyTorch convention (b, h, w, c) -> (b, c, h, w).\n :param device: (torch.device) Device to send tensors to.\n :return: (ty.Any) Input structure, converted to tensors.\n ' if isinstance(x, (str, Timer, MultiLevelTimer)): return x x = torch.as_tensor(x, device=device) if (permute and (x.ndim > 2)): dim = [(- 1), (- 3), (- 2)] dim = (list(range((x.ndim - 3))) + dim) x = x.permute(dim) return x
@map_container def to_np(x: ty.Any, /, permute: bool=True) -> ty.Any: 'Convert given input to `numpy.ndarrays`.\n\n :param x: (ty.Any) Arbitrary structure to convert to ndarrays (see map_container).\n :param permute: (bool) If `True`, permute from PyTorch convention (b, c, h, w) -> (b, h, w, c).\n :return: (ty.Any) Input structure, converted to ndarrays.\n ' if isinstance(x, (np.ndarray, str, Timer, MultiLevelTimer)): return x if (permute and (x.ndim > 2)): dim = [(- 2), (- 1), (- 3)] dim = (list(range((x.ndim - 3))) + dim) x = x.permute(dim) return x.detach().cpu().numpy()
@map_container def op(_x: ty.Any, /, *args, fn: ty.U[(str, ty.Callable)], **kwargs) -> ty.Any: "Apply a function to an arbitrary input structure. `fn` can be either a function or a method to search on `_x`.\n\n Example:\n >>> out = fn(input, device, op='to') # Apply x.to(device) to each item in `input`\n >>> out = fn(input, func=torch.softmax, dim=1) # Apply torch.softmax(x, dim=1) to each item in `input`\n\n :param _x: (ty.Any) Arbitrary structure to convert to tensors (see map_container).\n :param args: (tuple) `Args` to forward to the given `func`.\n :param fn: (str|Callable) Function to apply. If given a string, search as an attribute of `_x`.\n :param kwargs: (dict) `Kwargs` forwarded to `op`.\n :return:\n " if isinstance(_x, (str, Timer, MultiLevelTimer)): return _x if isinstance(fn, str): fn = getattr(_x, fn) else: args = (_x, *args) return fn(*args, **kwargs)
@opt_args_deco def allow_np(fn: ty.N[ty.Callable], permute: bool=False) -> ty.Callable: "Decorator to allow for `np.ndarray` inputs into a torch function.\n\n Objective is to implement the function using torch ops and apply this decorator to also make it numpy friendly.\n Since `numpy.ndarray` and `torch.Tensor` share memory (when on CPU), there shouldn't be any overhead.\n\n The decorated function can have an arbitrary signature. We enforce that there should only be either `np.ndarray`\n or `torch.Tensor` inputs. Args of any other type (int, float, str...) are left unchanged.\n\n :param fn: (callable) Function to decorate.\n :param permute: (bool) If `True`, permute from Numpy inputs into PyTorch convention (b, h, w, c) -> (b, c, h, w).\n " ann = fn.__annotations__ for (k, type) in ann.items(): if (type == ty.T): ann[k] = ty.U[(ty.A, type)] @wraps(fn) def wrapper(*args, **kwargs): all_args = (args + tuple(kwargs.values())) any_np = any((isinstance(arg, np.ndarray) for arg in all_args)) any_torch = any((isinstance(arg, torch.Tensor) for arg in all_args)) if (any_torch and any_np): raise ValueError('Must pass only `np.ndarray` or `torch.Tensor`!') if any_np: (args, kwargs) = to_torch((args, kwargs), permute=permute) out = fn(*args, **kwargs) if any_np: out = to_np(out, permute=permute) return out return wrapper
def dilate_mask(mask: ty.T, kernel_size: int=3) -> ty.T: 'Apply morphological dilation to the input binary mask.\n\n If any pixel within the kernel is a valid pixel (`True`), the central point is added to the mask.\n\n :param mask: (Tensor) (b, 1, h, w) Boolean mask indicating valid pixels.\n :param kernel_size: (int) Kernel size used for dilation.\n :return: (Tensor) (b, 1, h, w) Dilated boolean mask indicating valid pixels.\n ' mask = F.max_pool2d(mask.float(), kernel_size=kernel_size, padding=(kernel_size // 2), stride=1).bool() return mask
def erode_mask(mask: ty.T, kernel_size: int=3) -> ty.T: 'Apply morphological erosion to the given binary mask.\n\n If any pixel within the kernel is not a valid pixel (`False`), the central point is removed from the mask.\n Since PyTorch does not provide `min_pool` we simply invert the dilation process.\n\n :param mask: (Tensor) (b, 1, h, w) Boolean mask indicating valid pixels.\n :param kernel_size: (int) Kernel size used for erosion.\n :return: (Tensor) (b, 1, h, w) Eroded boolean mask indicating valid pixels.\n ' mask = (~ dilate_mask((~ mask), kernel_size=kernel_size)) return mask
@allow_np(permute=True) def standardize(x: ty.T, /, mean: StatsRGB=_mean, std: StatsRGB=_std) -> ty.T: 'Apply standardization. Default uses ImageNet statistics.' shape = (([1] * (x.ndim - 3)) + [3, 1, 1]) mean = x.new_tensor(mean).view(shape) std = x.new_tensor(std).view(shape) x = ((x - mean) / std) return x
@allow_np(permute=True) def unstandardize(x: ty.T, /, mean: StatsRGB=_mean, std: StatsRGB=_std) -> ty.T: 'Remove standardization. Default uses ImageNet statistics.' shape = (([1] * (x.ndim - 3)) + [3, 1, 1]) mean = x.new_tensor(mean).view(shape) std = x.new_tensor(std).view(shape) x = ((x * std) + mean) return x
@allow_np(permute=True) def to_gray(x: ty.T, /, coeffs: StatsRGB=_coeffs, keepdim: bool=False) -> ty.T: 'Convert image to grayscale.' shape = (([1] * (x.ndim - 3)) + [3, 1, 1]) coeffs = x.new_tensor(coeffs).view(shape) x = (x * coeffs).sum(dim=1, keepdim=keepdim) return x
def mean_normalize(x: ty.T, /, dim: ty.U[(int, ty.S[int])]=(2, 3)) -> ty.T: 'Apply mean normalization across the specified dimensions.\n\n :param x: (Tensor) (*) Input tensor to normalize of any shape.\n :param dim: (int | ty.S[int]) Dimension(s) to compute the mean across.\n :return: (Tensor) (*) Mean normalized input with the same shape.\n ' return (x / x.mean(dim=dim, keepdim=True).clamp(min=eps(x)))
def eye_like(x: ty.T, /) -> ty.T: 'Create an Identity matrix of the same dtype and size as the input.\n\n NOTE: The input can be of any shape, except the final two dimensions, which must be square.\n\n :param x: (Tensor) (*, n, n) Input reference tensor, where `*` can be any size (including zero).\n :return: (Tensor) (*, n, n) Identity matrix with the same dtype and size as the input.\n ' ndim = x.ndim if (ndim < 2): raise ValueError(f'Input must have at least two dimensions! Got "{ndim}"') (n, n2) = (x.shape[(- 2)], x.shape[(- 1)]) if (n != n2): raise ValueError(f'Input last two dimensions must be square (*, n, n)! Got "{x.shape}"') view = (([1] * (ndim - 2)) + [n, n]) I = torch.eye(n, dtype=x.dtype, device=x.device).view(view).expand_as(x).clone() return I
def interpolate_like(input: ty.T, /, other: ty.T, mode: str='nearest', align_corners: bool=False) -> ty.T: 'Interpolate to match the size of `other` tensor.' if (mode == 'nearest'): align_corners = None return F.interpolate(input, size=other.shape[(- 2):], mode=mode, align_corners=align_corners)
def expand_dim(x: ty.T, /, num: ty.U[(int, ty.S[int])], dim: ty.U[(int, ty.S[int])]=0, insert: bool=False) -> ty.T: 'Expand the specified input tensor dimensions, inserting new ones if required.\n\n >>> expand_dim(torch.rand(1, 1, 1), num=5, dim=1, insert=False) # (1, 1, 1) -> (1, 5, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=5, dim=1, insert=True) # (1, 1, 1) -> (1, 5, 1, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=(5, 3), dim=(0, 1), insert=False) # (1, 1, 1) -> (5, 3, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=(5, 3), dim=(0, 1), insert=True) # (1, 1, 1) -> (5, 3, 1, 1, 1)\n\n :param x: (Tensor) (*) Input tensor of any shape.\n :param num: (int|ty.S[int]) Expansion amount for the target dimension(s).\n :param dim: (int|ty.S[int]) Dimension(s) to expand.\n :param insert: (bool) If `True`, insert a new dimension at the specified location(s).\n :return: (Tensor) (*, num, *) Expanded tensor at the given location(s).\n ' if isinstance(num, int): if isinstance(dim, int): (num, dim) = ([num], [dim]) else: num = ([num] * len(dim)) elif (len(num) != len(dim)): raise ValueError(f'Non-matching expansion and dims. ({len(num)} vs. {len(dim)})') for d in (dim if insert else ()): x = x.unsqueeze(d) sizes = ([(- 1)] * x.ndim) for (n, d) in zip(num, dim): sizes[d] = n return x.expand(sizes)
def min(x: ty.T, dim: ty.N[ty.U[(int, ty.S)]]=None, keepdim: bool=False): 'Find the min values of the input tensor along the desired dimension(s).\n Wrapper around `torch.min` that returns only the min value and can be applied to multiple dimensions.\n\n :param x: (Tensor) (*) Input tensor of any shape.\n :param dim: (None|int|ty.S) If `None`, compute min across all dims. Otherwise only specified.\n :param keepdim: (bool) If `True`, keep the reduced dimensions.\n :return: (Tensor) (*) Min values of input tensor. Number of dims depends on `keepdim`.\n ' if (dim is None): return x.min(keepdim=keepdim) if isinstance(dim, int): dim = [dim] if (not all((((- x.ndim) <= d < x.ndim) for d in dim))): raise IndexError(f'Dimension out of range (expected to be in range [{(- x.ndim)}, {(x.ndim - 1)}], but got {dim})') dim = sorted(set(((d if (d >= 0) else (x.ndim + d)) for d in dim)), reverse=True) for d in dim: x = x.min(d, keepdim=keepdim)[0] return x
def max(x: ty.T, dim: ty.N[ty.U[(int, ty.S)]]=None, keepdim: bool=False): 'Find the max values of the input tensor along the desired dimension(s).\n Wrapper around `torch.max` that returns only the max value and can be applied to multiple dimensions.\n\n :param x: (Tensor) (*) Input tensor of any shape.\n :param dim: (None|int|ty.S) If `None`, compute max across all dims. Othwerise only specified.\n :param keepdim: (bool) If `True`, keep the reduced dimensions.\n :return: (Tensor) (*) Max values of input tensor. Number of dims depends on `keepdim`.\n ' if (dim is None): return x.max(keepdim=keepdim) if isinstance(dim, int): dim = [dim] if (not all((((- x.ndim) <= d < x.ndim) for d in dim))): raise IndexError(f'Dimension out of range (expected to be in range [{(- x.ndim)}, {(x.ndim - 1)}], but got {dim})') dim = sorted(set(((d if (d >= 0) else (x.ndim + d)) for d in dim)), reverse=True) for d in dim: x = x.max(d, keepdim=keepdim)[0] return x
def get_cls(cls_dict: dict[(str, ty.Type[T])], /, *args, type: str, **kwargs) -> T: 'Instantiate an arbitrary class from a collection.\n\n Including `type` makes it a keyword-only argument. This has the double benefit of forcing the user to pass it as a\n keyword argument, as well as popping it from the cfg kwargs.\n\n :param cls_dict: (dict[str, cls]) Dict containing mappings to the classes to choose from.\n :param args: (tuple) Args to forward to target class.\n :param type: (str) Key of the target class. Keyword-only argument.\n :param kwargs: (dict) Kwargs to forward to target class.\n :return: Target class instance.\n ' try: return cls_dict[type](*args, **kwargs) except Exception as e: raise ValueError(f'Error using "{type}" in {list(cls_dict)}') from e
def get_net(cfg: dict) -> nn.ModuleDict: "Instantiate the target networks from a cfg dict.\n\n Depth estimation typically consists of multiple networks, commonly `depth` and `pose`.\n We assume that, within a given category, we can use different classes interchangeably.\n For instance, all `depth` networks take a single image as input and produce a multi-scale output, while all\n `pose` networks take multiple images and produce relative poses for each pair.\n\n Networks can be omitted by setting their cfg to `None`. Useful when overriding the default cfg.\n See `cfg/defaults.yaml` for a full example.\n\n Example:\n ```\n cfg = {\n 'depth': {\n 'enc_name': 'convnext_base',\n 'pretrained': True,\n 'dec_name': 'monodepth',\n 'out_scales': [0, 1, 2, 3],\n },\n 'pose': {\n 'enc_name': 'resnet18',\n 'pretrained': True,\n },\n }\n ```\n\n :param cfg: (NetCfg) Dict of dicts, containing the network `type` and kwargs for each network.\n :return: (nn.ModuleDict) Dict of instantiated networks.\n " reg.trigger_nets() reg.trigger_decoders() nets = {k: get_cls(reg.NET_REG, type=k, **kw) for (k, kw) in cfg.items() if (kw is not None)} return nn.ModuleDict(OrderedDict(nets))
def get_loss(cfg: dict) -> tuple[(nn.ModuleDict, nn.ParameterDict)]: "Instantiate the target losses from a cfg dict.\n\n In addition to the kwargs required to instantiate the loss, we also expect a `weight` kwarg, used to\n balance the various losses when computing the final loss. (Default: 1)\n\n Losses can be omitted by setting their cfg to `None`. Useful when overriding the default cfg.\n See `cfg/defaults.yaml` for a full example.\n\n Example:\n ```\n cfg = {\n 'img_recon': {\n 'weight': 1,\n 'loss_name': 'ssim',\n 'use_min': True,\n }\n\n 'disp_smooth': {\n 'weight': 0.001,\n 'use_edges': True,\n }\n ```\n\n :param cfg: (LossDict) Dict of dicts, containing the loss `type`, `weight` and kwargs for each loss.\n :return: (nn.ModuleDict) Dict of instantiated losses.\n " reg.trigger_losses() (losses, weights) = (nn.ModuleDict(), nn.ParameterDict()) for (k, kw) in cfg.items(): if (kw is None): continue weights[k] = nn.Parameter(torch.as_tensor(kw.pop('weight', 1)), requires_grad=False) losses[k] = reg.LOSS_REG[k](**kw) return (losses, weights)
def get_ds(cfg: dict, mode: ty.N[str]=None) -> dict[(str, Dataset)]: "Instantiate the target datasets from a cfg dict.\n\n Datasets consist of a default cfg for each class, which can be overriden based on a `mode` sub-dict.\n\n Datasets can be omitted by setting their cfg to `None`. Useful when overriding the default cfg.\n See `cfg/defaults.yaml` for a full example.\n\n Example:\n ```\n cfg = {\n 'kitti_lmdb': {\n 'split': 'eigen_zhou',\n 'shape': (192, 640),\n 'supp_idxs': [-1, 1, 0],\n\n 'train': {'mode': 'train', 'use_aug': True},\n 'val': {'mode': 'val', 'use_aug': False},\n }\n\n 'slow_tv_lmdb': {\n 'split': 'all',\n 'shape': (384, 640),\n 'supp_idxs': [-1, 1],\n\n 'train': {'mode': 'train', 'use_aug': True},\n 'val': {'mode': 'val', 'use_aug': False},\n }\n ```\n\n :param cfg: (DataCfg) Dict of dicts, containing the dataset `type` and kwargs for each dataset.\n :param mode: (str) Mode to use for the dataset. If `None`, use the default cfg.\n :return: (dict[str, Dataset]) Dict of instantiated datasets.\n " reg.trigger_datas() ds = {} for (t, kw) in cfg.items(): if (kw is None): continue assert isinstance(kw, dict), f"Expected dict of dicts. Got '{kw}'." c = {k: v for (k, v) in kw.items() if (k not in {'train', 'val', 'test'})} if mode: c.update(kw.get(mode, {})) ds[t] = get_cls(reg.DATA_REG, type=t, **c) return ds
def get_dl(mode: str, cfg_ds: dict, cfg_dl: dict) -> DataLoader: "Instantiate the target dataloader from a cfg dict.\n\n Dataloaders consist of a default cfg, which can be overriden based on a `mode` sub-dict.\n The datasets are expected to be a subclass of `BaseDataset`, which provides a `collate_fn` method.\n By default, we use `pin_memory=True`.\n\n If training with multiple datasets, we use the custom `ConcatDataset` class, which concatenates all datasets\n such that each batch contains samples from only one dataset. This is due to each dataset potentially having\n different images shapes.\n\n See `cfg/defaults.yaml` for a full example.\n\n Example:\n ```\n cfg = {\n 'batch_size': 4,\n 'num_workers': 4,\n 'drop_last': True,\n\n 'train': { 'shuffle': True },\n 'val': { 'shuffle': False },\n }\n ```\n\n :param mode: (str) Mode to use for the dataloader. If `None`, use the default cfg.\n :param cfg_ds: (DataCfg) Dict of dicts, containing the dataset `type` and kwargs for each dataset.\n :param cfg_dl: (LoaderCfg) Dict of dicts, containing the dataloader kwargs.\n :return: (DataLoader) Instantiated dataloader.\n " ds = get_ds(cfg_ds, mode) ds = list(ds.values()) cfg = ({k: v for (k, v) in cfg_dl.items() if (k not in {'train', 'val', 'test'})} | cfg_dl.get(mode, {})) cfg['pin_memory'] = cfg.get('pin_memory', True) cfg['collate_fn'] = ds[0].collate_fn use_ddp = cfg.pop('use_ddp', False) seed = cfg.pop('seed', 42) if use_ddp: (shuffle, drop_last) = (cfg.pop('shuffle', False), cfg.pop('drop_last', False)) seeds = [(seed * (10 ** i)) for (i, _) in enumerate(ds)] samplers = [DistributedSampler(d, shuffle=shuffle, drop_last=drop_last, seed=s) for (d, s) in zip(ds, seeds)] else: samplers = [None for _ in ds] dl = [DataLoader(d, sampler=s, **cfg) for (d, s) in zip(ds, samplers)] return (dl[0] if (len(dl) == 1) else ConcatDataLoader(dl))
def get_opt(parameters: ty.U[(ty.Iterable, nn.Module)], cfg: dict) -> optim.Optimizer: "Instantiate the target optimizer from a cfg dict. Wrapper for `timm` `create_optimizer_v2`.\n\n Example:\n ```\n cfg = {\n 'type': 'adamw',\n 'lr': 1e-3,\n 'weight_decay': 1e-4,\n 'frozen_bn': True,\n }\n ```\n\n :param parameters: (Iterable|nn.Module) Parameters to forward to the optimizer (in any `torch` format).\n :param cfg: (OptCfg) Target optimizer `type` and kwargs to forward to it.\n :return: (Optimizer) Instantiated optimizer.\n " if ('type' in cfg): cfg['opt'] = cfg.pop('type') elif ('opt' not in cfg): raise KeyError('Must provide a cfg key `type` or `opt` when instantiating an optimizer.') if cfg.pop('frozen_bn', False): if (not isinstance(parameters, nn.Module)): raise ValueError('Cannot freeze batch norm parameters unless given nn.Module') for m in parameters.modules(): if isinstance(m, nn.BatchNorm2d): m.requires_grad_(False) if (blr := cfg.pop('backbone_lr', False)): if (not isinstance(parameters, nn.Module)): raise ValueError('Cannot set backbone LR unless given nn.Module') if (blr == cfg['lr']): raise ValueError('Backbone LR must be different from the main LR') LOGGER.info(f"Setting backbone LR to {blr} with base LR {cfg['lr']}...") parameters = [{'params': (p for (n, p) in parameters.named_parameters() if ('encoder' not in n))}, {'params': (p for (n, p) in parameters.named_parameters() if ('encoder' in n)), 'lr': blr}] return create_optimizer_v2(parameters, **cfg)
def get_sched(opt: optim.Optimizer, cfg: dict[(str, dict)]) -> dict[(str, ty._LRScheduler)]: "Instantiate the target schedulers from a cfg dict. Wrapper for `timm` `create_scheduler_v2`.\n\n Example:\n ```\n cfg = {\n 'steplr': {\n 'step_size': 10,\n 'gamma': 0.1,\n },\n\n 'linear': {\n 'start_factor: 0.1,\n 'total_iters': 4,\n },\n }\n ```\n\n :param opt: (Optimizer) Optimizer schedule for.\n :param cfg: (SchedCfg) Dict of dicts, containing the scheduler `type` and kwargs for each scheduler.\n :return: (dict[str, _LRScheduler]) Dict of instantiated schedulers.\n " sch = {k: get_cls(reg.SCHED_REG, opt, type=k, **kw) for (k, kw) in cfg.items() if (kw is not None)} return sch
def get_metrics() -> nn.ModuleDict: 'Instantiate the collection of depth metrics to monitor.' return nn.ModuleDict({'MAE': metrics.MAE(), 'RMSE': metrics.RMSE(), 'LogSI': metrics.ScaleInvariant(mode='log'), 'AbsRel': metrics.AbsRel(), 'Acc': metrics.DeltaAcc(delta=1.25)})
def _get_percentile(x: ty.A, p: int) -> float: 'Safe percentile to handle NaNs/Inf.' try: return np.percentile(x, p) except IndexError: return 0.0
@ops.allow_np(permute=True) def rgb_from_disp(disp: ty.T, invert: bool=False, cmap: str='turbo', vmin: float=0, vmax: ty.N[ty.U[(float, ty.S[float])]]=None) -> ty.T: 'Convert a disparity map into an RGB colormap visualization.\n\n :param disp: (Tensor) (*b, *1, h, w) Input disparity/depth map.\n :param invert: (bool) If `True` invert depth into disparity.\n :param cmap: (str) Matplotlib colormap name.\n :param vmin: (float) Minimum value to use when normalizing.\n :param vmax: (None|float|list) Maximum value to use when normalizing. If `None` use 95th percentile.\n :return: (Tensor) (*b, 3, h, w) Colourized disparity map.\n ' if isinstance(vmin, ty.T): vmin = vmin.tolist() if isinstance(vmax, ty.T): vmax = vmax.tolist() n = disp.ndim if (n == 2): disp = disp[(None, None)] if (n == 3): disp = disp[None] if invert: disp = geo.to_inv(disp) disp = ops.to_np(disp).squeeze((- 1)) if (vmax is None): vmax = [_get_percentile(d[(d > 0)], 95) for d in disp] elif isinstance(vmax, (int, float)): vmax = ([vmax] * disp.shape[0]) elif (len(vmax) != disp.shape[0]): raise ValueError(f'Non-matching vmax and disp. ({len(vmax)} vs. {disp.shape[0]})') rgb = torch.stack(ops.to_torch([apply_cmap(d, cmap=cmap, vmin=vmin, vmax=v) for (d, v) in zip(disp, vmax)])) if ((n == 2) or (n == 3)): rgb = rgb.squeeze(0) return rgb
@ops.allow_np(permute=True) def rgb_from_feat(feat: ty.T) -> ty.T: 'Convert dense features into an RGB image via PCA.\n\n NOTE: PCA is computed using all features in the batch, i.e. the representation is batch dependent.\n\n :param feat: (Tensor) (*b, c, h, w) Dense feature representation.\n :return: (Tensor) (*b, 3, h, w) The PCAd features.\n ' n = feat.ndim if (n == 3): feat = feat[None] (b, _, h, w) = feat.shape feat = ops.to_np(feat.permute(0, 2, 3, 1).flatten(0, 2)) proj = PCA(n_components=3).fit_transform(feat) proj -= proj.min(0) proj /= proj.max(0) proj = ops.to_torch(proj.reshape(b, h, w, 3)) if (n == 3): proj = proj.squeeze(0) return proj
class SuppImageNotFoundError(FileNotFoundError): pass
class Predictor(Protocol): @staticmethod def get_img_shape(data_type: str) -> N[tuple[(int, int)]]: ... def __call__(self, net: nn.Module, dl: DataLoader, use_stereo_blend: bool, device: N[str]) -> NDArray: ... def apply(self, net: nn.Module, dl: DataLoader, func: Callable, use_stereo_blend: bool, device: N[str], *args, **kwargs) -> None: ... def load_model(self, *args, **kwargs) -> nn.Module: ... def preprocess(self, imgs: T) -> T: ... def forward(self, net: nn.Module, imgs: T) -> T: ... def postprocess(self, pred: Tensor, imgs: T) -> T: ...
class DepthPred(TypedDict, total=False): depth_feats: S[T] disp: dict[(int, T)] disp_stereo: dict[(int, T)] mask: dict[(int, T)] mask_stereo: dict[(int, T)]
class PosePred(TypedDict, total=False): R: T t: T fs: T cs: T
class AutoencoderPred(TypedDict, total=True): autoenc_feats: S[T] autoenc_imgs: dict[(int, T)]
class TQDMProgressBar(plc.TQDMProgressBar): 'Progress bar that removes all `grad norms` from display.' def get_metrics(self, trainer, pl_module) -> dict: m = super().get_metrics(trainer, pl_module) m = {k: v for (k, v) in m.items() if ('grad' not in k)} return m
class RichProgressBar(plc.RichProgressBar): 'Progress bar that removes all `grad norms` from display.' def get_metrics(self, trainer, pl_module) -> dict: m = super().get_metrics(trainer, pl_module) m = {k: v for (k, v) in m.items() if ('grad' not in k)} return m
class DetectAnomaly(plc.Callback): 'Check for NaN/infinite loss at each core step. Replacement for `detect_anomaly=True`.' def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, unused=0) -> None: if (not (loss := outputs['loss']).isfinite()): raise ValueError(f'Detected NaN/Infinite loss: "{loss}"')
class TrainingManager(plc.Callback): 'Callback to save a dummy file as an indicator when training has started/finished.' def __init__(self, ckpt_dir: Path): super().__init__() self.ckpt_dir = ckpt_dir self.ckpt_dir.mkdir(exist_ok=True, parents=True) self.host = socket.gethostname() self.ftrain = None self.fend = (ckpt_dir / 'finished') if self.is_training: raise ValueError(f'Training already in progress! ({self.ftrain})') if self.has_finished: raise ValueError(f'Training already finished! ({self.fend})') signal.signal(signal.SIGTERM, self._on_sigterm) @property def is_training(self) -> bool: fs = sorted(self.ckpt_dir.glob('training*')) n = len(fs) if (n == 0): return False if (n == 1): self.ftrain = fs[0] return True raise ValueError(f'Invalid number of training files! {fs}') @property def has_finished(self) -> bool: return self.fend.is_file() def on_train_epoch_start(self, trainer, pl_module) -> None: print(f'-> Creating "training" file...') if self.ftrain: self.ftrain.unlink(missing_ok=True) self.ftrain = (self.ckpt_dir / f'training_{trainer.current_epoch}_{self.host}') self.ftrain.touch() def on_fit_end(self, trainer, pl_module) -> None: self._cleanup() print('-> Creating "finished"" file...') self.fend.touch() def on_exception(self, trainer, pl_module, exception) -> None: self._cleanup() def _cleanup(self) -> None: print('-> Deleting "training" file...') if self.ftrain: self.ftrain.unlink(missing_ok=True) print('-> Done! Exiting...') def _on_sigterm(self, signum, frame) -> None: 'Signature required by `signal.signal`.' raise SystemExit
def default_convert(data): "\n Function that converts each NumPy array element into a :class:`torch.Tensor`. If the input is a `Sequence`,\n `Collection`, or `Mapping`, it tries to convert each element inside to a :class:`torch.Tensor`.\n If the input is not an NumPy array, it is left unchanged.\n This is used as the default function for collation when both `batch_sampler` and\n `batch_size` are NOT defined in :class:`~torch.utils.data.DataLoader`.\n\n The general input type to output type mapping is similar to that\n of :func:`~torch.utils.data.default_collate`. See the description there for more details.\n\n Args:\n data: a single data point to be converted\n\n Examples:\n >>> # xdoctest: +SKIP\n >>> # Example with `int`\n >>> default_convert(0)\n 0\n >>> # Example with NumPy array\n >>> default_convert(np.array([0, 1]))\n tensor([0, 1])\n >>> # Example with NamedTuple\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> default_convert(Point(0, 0))\n Point(x=0, y=0)\n >>> default_convert(Point(np.array(0), np.array(0)))\n Point(x=tensor(0), y=tensor(0))\n >>> # Example with List\n >>> default_convert([np.array([0, 1]), np.array([2, 3])])\n [tensor([0, 1]), tensor([2, 3])]\n " elem_type = type(data) if isinstance(data, torch.Tensor): return data elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')): if ((elem_type.__name__ == 'ndarray') and (np_str_obj_array_pattern.search(data.dtype.str) is not None)): return data return torch.as_tensor(data) elif isinstance(data, collections.abc.Mapping): try: return elem_type({key: default_convert(data[key]) for key in data}) except TypeError: return {key: default_convert(data[key]) for key in data} elif (isinstance(data, tuple) and hasattr(data, '_fields')): return elem_type(*(default_convert(d) for d in data)) elif isinstance(data, tuple): return [default_convert(d) for d in data] elif (isinstance(data, collections.abc.Sequence) and (not isinstance(data, str))): try: return elem_type([default_convert(d) for d in data]) except TypeError: return [default_convert(d) for d in data] else: return data
def collate(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None): "\n General collate function that handles collection type of element within each batch\n and opens function registry to deal with specific element types. `default_collate_fn_map`\n provides default collate functions for tensors, numpy arrays, numbers and strings.\n\n Args:\n batch: a single batch to be collated\n collate_fn_map: Optional dictionary mapping from element type to the corresponding collate function.\n If the element type isn't present in this dictionary,\n this function will go through each key of the dictionary in the insertion order to\n invoke the corresponding collate function if the element type is a subclass of the key.\n\n Examples:\n >>> # Extend this function to handle batch of tensors\n >>> def collate_tensor_fn(batch, *, collate_fn_map):\n ... return torch.stack(batch, 0)\n >>> def custom_collate(batch):\n ... collate_map = {torch.Tensor: collate_tensor_fn}\n ... return collate(batch, collate_fn_map=collate_map)\n >>> # Extend `default_collate` by in-place modifying `default_collate_fn_map`\n >>> default_collate_fn_map.update({torch.Tensor: collate_tensor_fn})\n\n Note:\n Each collate function requires a positional argument for batch and a keyword argument\n for the dictionary of collate functions as `collate_fn_map`.\n " elem = batch[0] elem_type = type(elem) if (collate_fn_map is not None): if (elem_type in collate_fn_map): return collate_fn_map[elem_type](batch, collate_fn_map=collate_fn_map) for collate_type in collate_fn_map: if isinstance(elem, collate_type): return collate_fn_map[collate_type](batch, collate_fn_map=collate_fn_map) if isinstance(elem, collections.abc.Mapping): try: return elem_type({key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map) for key in elem}) except TypeError: return {key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map) for key in elem} elif (isinstance(elem, tuple) and hasattr(elem, '_fields')): return elem_type(*(collate(samples, collate_fn_map=collate_fn_map) for samples in zip(*batch))) elif isinstance(elem, collections.abc.Sequence): it = iter(batch) elem_size = len(next(it)) if (not all(((len(elem) == elem_size) for elem in it))): raise RuntimeError('each element in list of batch should be of equal size') transposed = list(zip(*batch)) if isinstance(elem, tuple): return [collate(samples, collate_fn_map=collate_fn_map) for samples in transposed] else: try: return elem_type([collate(samples, collate_fn_map=collate_fn_map) for samples in transposed]) except TypeError: return [collate(samples, collate_fn_map=collate_fn_map) for samples in transposed] elif isinstance(elem, (Timer, MultiLevelTimer)): return batch raise TypeError(default_collate_err_msg_format.format(elem_type))
def collate_tensor_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None): elem = batch[0] out = None if (torch.utils.data.get_worker_info() is not None): numel = sum((x.numel() for x in batch)) storage = elem._typed_storage()._new_shared(numel, device=elem.device) out = elem.new(storage).resize_(len(batch), *list(elem.size())) return torch.stack(batch, 0, out=out)
def collate_numpy_array_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None): elem = batch[0] if (np_str_obj_array_pattern.search(elem.dtype.str) is not None): raise TypeError(default_collate_err_msg_format.format(elem.dtype)) return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)
def collate_numpy_scalar_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None): return torch.as_tensor(batch)
def collate_float_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None): return torch.tensor(batch, dtype=torch.float64)
def collate_int_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None): return torch.tensor(batch)
def collate_str_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None): return batch
def default_collate(batch): "\n Function that takes in a batch of data and puts the elements within the batch\n into a tensor with an additional outer dimension - batch size. The exact output type can be\n a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a\n Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.\n This is used as the default function for collation when\n `batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.\n\n Here is the general input type (based on the type of the element within the batch) to output type mapping:\n\n * :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)\n * NumPy Arrays -> :class:`torch.Tensor`\n * `float` -> :class:`torch.Tensor`\n * `int` -> :class:`torch.Tensor`\n * `str` -> `str` (unchanged)\n * `bytes` -> `bytes` (unchanged)\n * `Mapping[K, V_i]` -> `Mapping[K, default_collate([V_1, V_2, ...])]`\n * `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[default_collate([V1_1, V1_2, ...]),\n default_collate([V2_1, V2_2, ...]), ...]`\n * `Sequence[V1_i, V2_i, ...]` -> `Sequence[default_collate([V1_1, V1_2, ...]),\n default_collate([V2_1, V2_2, ...]), ...]`\n\n Args:\n batch: a single batch to be collated\n\n Examples:\n >>> # xdoctest: +SKIP\n >>> # Example with a batch of `int`s:\n >>> default_collate([0, 1, 2, 3])\n tensor([0, 1, 2, 3])\n >>> # Example with a batch of `str`s:\n >>> default_collate(['a', 'b', 'c'])\n ['a', 'b', 'c']\n >>> # Example with `Map` inside the batch:\n >>> default_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])\n {'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}\n >>> # Example with `NamedTuple` inside the batch:\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> default_collate([Point(0, 0), Point(1, 1)])\n Point(x=tensor([0, 1]), y=tensor([0, 1]))\n >>> # Example with `Tuple` inside the batch:\n >>> default_collate([(0, 1), (2, 3)])\n [tensor([0, 2]), tensor([1, 3])]\n >>> # Example with `List` inside the batch:\n >>> default_collate([[0, 1], [2, 3]])\n [tensor([0, 2]), tensor([1, 3])]\n >>> # Two options to extend `default_collate` to handle specific type\n >>> # Option 1: Write custom collate function and invoke `default_collate`\n >>> def custom_collate(batch):\n ... elem = batch[0]\n ... if isinstance(elem, CustomType): # Some custom condition\n ... return ...\n ... else: # Fall back to `default_collate`\n ... return default_collate(batch)\n >>> # Option 2: In-place modify `default_collate_fn_map`\n >>> def collate_customtype_fn(batch, *, collate_fn_map=None):\n ... return ...\n >>> default_collate_fn_map.update(CustoType, collate_customtype_fn)\n >>> default_collate(batch) # Handle `CustomType` automatically\n " return collate(batch, collate_fn_map=default_collate_fn_map)
def opt_args_deco(deco: ty.Callable) -> ty.Callable: 'Meta-decorator to make implementing of decorators with optional arguments more intuitive\n\n Recall: Decorators are equivalent to applying functions sequentially\n >>> func = deco(func)\n\n If we want to provide optional arguments, it would be the equivalent of doing:\n >>> func = deco(foo=10)(func)\n I.e. in this case, deco is actually a function that RETURNS a decorator (a.k.a. a decorator factory)\n\n In practice, this is typically implemented with two nested functions as opposed to one.\n Also, the "factory" must always be called, "func = deco()(func)", even if no arguments are provided.\n This is ugly, obfuscated and makes puppies cry. No one wants puppies to cry.\n\n This decorator "hides" one level of nesting by using the \'partial\' function.\n If no optional parameters are provided, we proceed as a regular decorator using the default parameters.\n If any optional kwargs are provided, this returns the decorator that is then applied to the function (this is\n equivalent to the "deco(foo=10)" portion of the second example).\n\n Example (before):\n ```\n def stringify(func=None, *, prefix=\'\', suffix=\'\'):\n if func is None:\n return partial(stringify, prefix=prefix, suffix=suffix)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n out = func(*args, **kwargs)\n return f\'{prefix}{out}{suffix}\'\n return wrapper\n ```\n\n Example (after):\n ```\n @opt_args_deco\n def stringify(func, prefix=\'\', suffix=\'\'):\n @wraps(func)\n def wrapper(*args, **kwargs):\n out = func(*args, **kwargs)\n return f\'{prefix}{out}{suffix}\'\n return wrapper\n ```\n\n :param deco: (Callable) Decorator function with optional parameters to wrap.\n :return: (Callable) If `func` is provided: decorated func, otherwise: decorator to apply to `func`.\n ' @wraps(deco) def wrapper(f: ty.N[ty.Callable]=None, **kwargs) -> ty.Callable: if (f is None): return partial(deco, **kwargs) if (not isinstance(f, (types.FunctionType, types.MethodType))): raise TypeError(f'Positional argument must be a function or method, got {f} of type {type(f)}') return deco(f, **kwargs) return wrapper
def delegates(to: ty.N[ty.Callable]=None, keep: bool=False): 'From https://www.fast.ai/2019/08/06/delegation/.\n Decorator to replace `**kwargs` in signature with params from `to`.\n\n This can be used to decorate either a class\n ```\n @delegates()\n class Child(Parent): ...\n ```\n or a function\n ```\n @delegates(parent)\n def func(a, **kwargs): ...\n ```\n\n :param to: (Callable) Callable containing the params to copy\n :param keep: (bool) If `True`, keep `**kwargs` in the signature.\n :return: (Callable) The decorated class or function with the updated signature.\n ' def wrapper(f: ty.U[(type, ty.Callable)]) -> ty.Callable: (to_f, from_f) = ((f.__base__.__init__, f.__init__) if (to is None) else (to, f)) sig = inspect.signature(from_f) sigd = dict(sig.parameters) args = sigd.pop('args', None) if args: sigd2 = {k: v for (k, v) in inspect.signature(to_f).parameters.items() if ((v.default == inspect.Parameter.empty) and (k not in sigd))} sigd.update(sigd2) kwargs = sigd.pop('kwargs', None) if kwargs: sigd2 = {k: v for (k, v) in inspect.signature(to_f).parameters.items() if ((v.default != inspect.Parameter.empty) and (k not in sigd))} sigd.update(sigd2) if (keep and args): sigd['args'] = args if (keep and kwargs): sigd['kwargs'] = kwargs from_f.__signature__ = sig.replace(parameters=list(sigd.values())) return f return wrapper
def map_container(f: ty.Callable) -> ty.Callable: "Decorator to recursively apply a function to arbitrary nestings of `dict`, `list`, `tuple` & `set`\n\n NOTE: `f` can have an arbitrary signature, but the first arg must be the item we want to apply `f` to.\n\n Example:\n ```\n @map_apply\n def square(n, bias=0):\n return (n ** 2) + bias\n\n x = {'a': [1, 2, 3], 'b': 4, 'c': {1: 5, 2: 6}}\n print(map_apply(x))\n\n ===>\n {'a': [1, 4, 9], 'b': 16, 'c': {1: 25, 2: 36}}\n\n print(map_apply(x, bias=2))\n\n ===>\n {'a': [3, 6, 11], 'b': 18, 'c': {1: 27, 2: 38}}\n ```\n " @wraps(f) def wrapper(x: T, *args, **kwargs) -> T: if isinstance(x, dict): return {k: wrapper(v, *args, **kwargs) for (k, v) in x.items()} elif isinstance(x, list): return [wrapper(v, *args, **kwargs) for v in x] elif isinstance(x, tuple): return tuple((wrapper(v, *args, **kwargs) for v in x)) elif isinstance(x, set): return {wrapper(v, *args, **kwargs) for v in x} else: return f(x, *args, **kwargs) return wrapper
def readlines(file: Path, /, encoding: str=None, split: bool=False, sep: ty.N[str]=None) -> ty.U[(list[str], list[list[str]])]: 'Read file as a list of strings.' with open(file, encoding=encoding) as f: lines = f.read().splitlines() if split: lines = splitlines(lines, sep) return lines
def splitlines(lines: list[str], sep: ty.N[str]=None) -> list[list[str]]: 'Split each line in a list of lines.' return [l.split(sep) for l in lines]
def mymap(fn: str, iterable: ty.Iterable, type: ty.N[T]=list, **kwargs) -> ty.U[(ty.Generator, T)]: 'Apply instance method `fn` to each item in `iterable`.\n\n :param fn: (str) Function name to search as an attribute of each item.\n :param iterable: (Iterable) Iterable to apply function to.\n :param type: (None|type) If provided, convert output generator into this sequence type, e.g. list, tuple, set.\n :param kwargs: (dict) Additional kwargs to forward to `fn`.\n :return: (Iterable) Iterable mapped to the provided function.\n ' if (not isinstance(fn, str)): raise TypeError(f'`fn` must be a str to search as an attribute of each item!') gen = (getattr(item, fn)(**kwargs) for item in iterable) return (type(gen) if type else gen)
def _map(fn: ty.Callable, *iterables: ty.Iterable, type: T, star: bool=False) -> T: 'Map `fn` to each iterable item and convert to the specified container `type`.' map_fn = (itertools.starmap if star else map) return type(map_fn(fn, *iterables))
def iterdir(path: Path, key: ty.N[Key]=None) -> list[Path]: 'Get sorted contents in path, optionally filtered by the `key`.' key = (key or (lambda f: True)) return sorted(filter(key, path.iterdir()))
def get_dirs(path: Path, key: ty.N[Key]=None) -> list[Path]: 'Get sorted directories in a path, optionally filtered by the `key`.' _key = (lambda p: (p.is_dir() and (key(p) if key else True))) return iterdir(path, _key)
def get_files(path: Path, key: ty.N[Key]=None) -> list[Path]: 'Get sorted files in a path, optionally filtered by the `key`.' _key = (lambda p: (p.is_file() and (key(p) if key else True))) return iterdir(path, _key)
def has_contents(path: Path) -> bool: 'Check if directory is not empty.' return (path.is_dir() and bool(iterdir(path)))