# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import math import torch from logging import getLogger logger = getLogger() def _no_grad_trunc_normal_(tensor, mean, std, a, b): # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf def norm_cdf(x): # Computes standard normal cumulative distribution function return (1. + math.erf(x / math.sqrt(2.))) / 2. with torch.no_grad(): # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values l = norm_cdf((a - mean) / std) u = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [l, u], then translate to # [2l-1, 2u-1]. tensor.uniform_(2 * l - 1, 2 * u - 1) # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) return tensor def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): # type: (Tensor, float, float, float, float) -> Tensor return _no_grad_trunc_normal_(tensor, mean, std, a, b) def apply_masks(x, masks): """ :param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)] :param masks: list of tensors containing indices of patches in [N] to keep """ all_x = [] for m in masks: mask_keep = m.unsqueeze(-1).repeat(1, 1, x.size(-1)) all_x += [torch.gather(x, dim=1, index=mask_keep)] return torch.cat(all_x, dim=0) def repeat_interleave_batch(x, B, repeat): N = len(x) // B x = torch.cat([ torch.cat([x[i*B:(i+1)*B] for _ in range(repeat)], dim=0) for i in range(N) ], dim=0) return x