|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import math |
|
|
|
|
|
import torch |
|
|
|
|
|
from logging import getLogger |
|
|
|
|
|
logger = getLogger() |
|
|
|
|
|
|
|
|
def _no_grad_trunc_normal_(tensor, mean, std, a, b): |
|
|
|
|
|
|
|
|
def norm_cdf(x): |
|
|
|
|
|
return (1. + math.erf(x / math.sqrt(2.))) / 2. |
|
|
|
|
|
with torch.no_grad(): |
|
|
|
|
|
|
|
|
|
|
|
l = norm_cdf((a - mean) / std) |
|
|
u = norm_cdf((b - mean) / std) |
|
|
|
|
|
|
|
|
|
|
|
tensor.uniform_(2 * l - 1, 2 * u - 1) |
|
|
|
|
|
|
|
|
|
|
|
tensor.erfinv_() |
|
|
|
|
|
|
|
|
tensor.mul_(std * math.sqrt(2.)) |
|
|
tensor.add_(mean) |
|
|
|
|
|
|
|
|
tensor.clamp_(min=a, max=b) |
|
|
return tensor |
|
|
|
|
|
|
|
|
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): |
|
|
|
|
|
return _no_grad_trunc_normal_(tensor, mean, std, a, b) |
|
|
|
|
|
|
|
|
def apply_masks(x, masks): |
|
|
""" |
|
|
:param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)] |
|
|
:param masks: list of tensors containing indices of patches in [N] to keep |
|
|
""" |
|
|
all_x = [] |
|
|
for m in masks: |
|
|
mask_keep = m.unsqueeze(-1).repeat(1, 1, x.size(-1)) |
|
|
all_x += [torch.gather(x, dim=1, index=mask_keep)] |
|
|
return torch.cat(all_x, dim=0) |
|
|
|
|
|
|
|
|
def repeat_interleave_batch(x, B, repeat): |
|
|
N = len(x) // B |
|
|
x = torch.cat([ |
|
|
torch.cat([x[i*B:(i+1)*B] for _ in range(repeat)], dim=0) |
|
|
for i in range(N) |
|
|
], dim=0) |
|
|
return x |
|
|
|