| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| from functools import partial |
|
|
| import torch |
| import torch.nn as nn |
| import numpy as np |
| from timm.models.vision_transformer import PatchEmbed, Block |
|
|
| from huggingface_hub import PyTorchModelHubMixin |
| from timm.models.layers import DropPath |
| import math |
| import torch.nn.functional as F |
|
|
| def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): |
| """ |
| grid_size: int of the grid height and width |
| return: |
| pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
| """ |
| grid_h = np.arange(grid_size, dtype=np.float32) |
| grid_w = np.arange(grid_size, dtype=np.float32) |
| grid = np.meshgrid(grid_w, grid_h) |
| grid = np.stack(grid, axis=0) |
|
|
| grid = grid.reshape([2, 1, grid_size, grid_size]) |
| pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) |
| if cls_token: |
| pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) |
| return pos_embed |
|
|
|
|
| def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): |
| assert embed_dim % 2 == 0 |
|
|
| |
| emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) |
| emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) |
|
|
| emb = np.concatenate([emb_h, emb_w], axis=1) |
| return emb |
|
|
|
|
| def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): |
| """ |
| embed_dim: output dimension for each position |
| pos: a list of positions to be encoded: size (M,) |
| out: (M, D) |
| """ |
| assert embed_dim % 2 == 0 |
| omega = np.arange(embed_dim // 2, dtype=np.float32) |
| omega /= embed_dim / 2. |
| omega = 1. / 10000**omega |
|
|
| pos = pos.reshape(-1) |
| out = np.einsum('m,d->md', pos, omega) |
|
|
| emb_sin = np.sin(out) |
| emb_cos = np.cos(out) |
|
|
| emb = np.concatenate([emb_sin, emb_cos], axis=1) |
| return emb |
|
|
| class Mlp(nn.Module): |
| def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): |
| super().__init__() |
| out_features = out_features or in_features |
| hidden_features = hidden_features or in_features |
| self.hidden_features = hidden_features |
| self.fc1 = nn.Linear(in_features, hidden_features) |
| self.act = act_layer() |
| self.fc2 = nn.Linear(hidden_features, out_features) |
| self.drop = nn.Dropout(drop) |
|
|
| def forward(self, x): |
| x = self.fc1(x) |
| x = self.act(x) |
| x = self.drop(x) |
| x = self.fc2(x) |
| x = self.drop(x) |
| return x |
|
|
| class Attention(nn.Module): |
| def __init__( |
| self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., |
| proj_drop=0., attn_head_dim=None): |
| super().__init__() |
| self.num_heads = num_heads |
| head_dim = dim // num_heads |
| if attn_head_dim is not None: |
| head_dim = attn_head_dim |
| all_head_dim = head_dim * self.num_heads |
| self.scale = qk_scale or head_dim ** -0.5 |
|
|
| self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) |
| if qkv_bias: |
| self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) |
| self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) |
| else: |
| self.q_bias = None |
| self.v_bias = None |
|
|
| self.attn_drop = nn.Dropout(attn_drop) |
| self.proj = nn.Linear(all_head_dim, dim) |
| self.proj_drop = nn.Dropout(proj_drop) |
|
|
| def forward(self, x): |
| B, N, C = x.shape |
| qkv_bias = None |
| if self.q_bias is not None: |
| qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) |
| |
| qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) |
| qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) |
| q, k, v = qkv[0], qkv[1], qkv[2] |
|
|
| q = q * self.scale |
| attn = (q @ k.transpose(-2, -1)) |
|
|
| attn = attn.softmax(dim=-1) |
| attn = self.attn_drop(attn) |
|
|
| x = (attn @ v).transpose(1, 2).reshape(B, N, -1) |
| x = self.proj(x) |
| x = self.proj_drop(x) |
| return x |
|
|
| class NormalCell(nn.Module): |
| def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., |
| drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, class_token=False, group=1, |
| tokens_type='transformer', kernel=3, mlp_hidden_dim=None): |
| super().__init__() |
| self.norm1 = norm_layer(dim) |
| self.class_token = class_token |
| if tokens_type == 'transformer': |
| self.attn = Attention( |
| dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) |
| else: |
| raise NotImplementedError() |
|
|
| self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() |
| self.norm2 = norm_layer(dim) |
| mlp_hidden_dim = mlp_hidden_dim if mlp_hidden_dim is not None else int(dim * mlp_ratio) |
| PCM_dim = int(dim * mlp_ratio) |
| self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) |
| self.PCM = nn.Sequential( |
| nn.Conv2d(dim, PCM_dim, kernel, 1, kernel//2, 1, group), |
| nn.BatchNorm2d(PCM_dim), |
| nn.SiLU(inplace=True), |
| nn.Conv2d(PCM_dim, dim, kernel, 1, kernel//2, 1, group), |
| ) |
|
|
| def forward(self, x): |
| b, n, c = x.shape |
| if self.class_token: |
| n = n - 1 |
| wh = int(math.sqrt(n)) |
| convX = self.drop_path(self.PCM(x[:, 1:, :].view(b, wh, wh, c).permute(0, 3, 1, 2).contiguous()).permute(0, 2, 3, 1).contiguous().view(b, n, c)) |
| x = x + self.drop_path(self.attn(self.norm1(x))) |
| x[:, 1:] = x[:, 1:] + convX |
| else: |
| wh = int(math.sqrt(n)) |
| x_2d = x.view(b, wh, wh, c).permute(0, 3, 1, 2).contiguous() |
| convX = self.drop_path(self.PCM(x_2d).permute(0, 2, 3, 1).contiguous().view(b, n, c)) |
| x = x + self.drop_path(self.attn(self.norm1(x))) |
| x = x + convX |
| x = x + self.drop_path(self.mlp(self.norm2(x))) |
| return x |
|
|
| class MaskedAutoencoderViTAE(nn.Module, PyTorchModelHubMixin): |
| """ Masked Autoencoder with VisionTransformer backbone |
| """ |
| def __init__(self, img_size=224, patch_size=16, in_chans=3, |
| embed_dim=768, depth=12, num_heads=12, |
| decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, |
| mlp_ratio=4., norm_layer=partial(nn.LayerNorm, eps=1e-6), norm_pix_loss=False, kernel=3, mlp_hidden_dim=None): |
| ''' |
| @Param kernel: int, control the kernel size in PCM |
| @Param mlp_hidden_dim: int, the hidden dimenison of FFN, overwrites mlp ratio, default None |
| ''' |
| super().__init__() |
|
|
| |
| |
| self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim) |
| num_patches = self.patch_embed.num_patches |
|
|
| self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) |
| self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False) |
|
|
| self.blocks = nn.ModuleList([ |
| NormalCell(embed_dim, num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, norm_layer=norm_layer, kernel=kernel, class_token=True, group=embed_dim // 4, mlp_hidden_dim=mlp_hidden_dim) |
| for i in range(depth)]) |
| self.norm = norm_layer(embed_dim) |
| |
|
|
| |
| |
| self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True) |
|
|
| self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) |
|
|
| self.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, decoder_embed_dim), requires_grad=False) |
|
|
| self.decoder_blocks = nn.ModuleList([ |
| Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer) |
| for i in range(decoder_depth)]) |
|
|
| self.decoder_norm = norm_layer(decoder_embed_dim) |
| self.decoder_pred = nn.Linear(decoder_embed_dim, patch_size**2 * in_chans, bias=True) |
| |
|
|
| self.norm_pix_loss = norm_pix_loss |
|
|
| self.initialize_weights() |
|
|
| def initialize_weights(self): |
| |
| |
| pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True) |
| self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0)) |
|
|
| decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True) |
| self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0)) |
|
|
| |
| w = self.patch_embed.proj.weight.data |
| torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) |
|
|
| |
| torch.nn.init.normal_(self.cls_token, std=.02) |
| torch.nn.init.normal_(self.mask_token, std=.02) |
|
|
| |
| self.apply(self._init_weights) |
|
|
| def _init_weights(self, m): |
| if isinstance(m, nn.Linear): |
| |
| torch.nn.init.xavier_uniform_(m.weight) |
| if isinstance(m, nn.Linear) and m.bias is not None: |
| nn.init.constant_(m.bias, 0) |
| elif isinstance(m, nn.LayerNorm): |
| nn.init.constant_(m.bias, 0) |
| nn.init.constant_(m.weight, 1.0) |
|
|
| def patchify(self, imgs): |
| """ |
| imgs: (N, 3, H, W) |
| x: (N, L, patch_size**2 *3) |
| """ |
| p = self.patch_embed.patch_size[0] |
| assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0 |
|
|
| h = w = imgs.shape[2] // p |
| x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p)) |
| x = torch.einsum('nchpwq->nhwpqc', x) |
| x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3)) |
| return x |
|
|
| def unpatchify(self, x): |
| """ |
| x: (N, L, patch_size**2 *3) |
| imgs: (N, 3, H, W) |
| """ |
| p = self.patch_embed.patch_size[0] |
| h = w = int(x.shape[1]**.5) |
| assert h * w == x.shape[1] |
| |
| x = x.reshape(shape=(x.shape[0], h, w, p, p, 3)) |
| x = torch.einsum('nhwpqc->nchpwq', x) |
| imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p)) |
| return imgs |
|
|
| def random_masking(self, x, mask_ratio): |
| """ |
| Perform per-sample random masking by per-sample shuffling. |
| Per-sample shuffling is done by argsort random noise. |
| x: [N, L, D], sequence |
| """ |
| N, L, D = x.shape |
| len_keep = int(L * (1 - mask_ratio)) |
| |
| noise = torch.rand(N, L, device=x.device) |
| |
| |
| ids_shuffle = torch.argsort(noise, dim=1) |
| ids_restore = torch.argsort(ids_shuffle, dim=1) |
|
|
| |
| ids_keep = ids_shuffle[:, :len_keep] |
| x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).expand(-1, -1, D)) |
|
|
| |
| mask = torch.ones([N, L], device=x.device) |
| mask[:, :len_keep] = 0 |
| |
| mask = torch.gather(mask, dim=1, index=ids_restore) |
|
|
| return x_masked, mask, ids_restore |
|
|
| def forward_encoder(self, x, mask_ratio): |
| |
| x = self.patch_embed(x) |
|
|
| |
| x = x + self.pos_embed[:, 1:, :] |
|
|
| |
| x, mask, ids_restore = self.random_masking(x, mask_ratio) |
|
|
| |
| cls_token = self.cls_token + self.pos_embed[:, :1, :] |
| cls_tokens = cls_token.expand(x.shape[0], -1, -1) |
| x = torch.cat((cls_tokens, x), dim=1) |
|
|
| |
| for blk in self.blocks: |
| x = blk(x) |
| x = self.norm(x) |
|
|
| return x, mask, ids_restore |
|
|
| def forward_decoder(self, x, ids_restore): |
| |
| x = self.decoder_embed(x) |
|
|
| |
| mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1) |
| x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) |
| x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).expand(-1, -1, x.shape[2])) |
| x = torch.cat([x[:, :1, :], x_], dim=1) |
|
|
| |
| x = x + self.decoder_pos_embed |
|
|
| |
| for blk in self.decoder_blocks: |
| x = blk(x) |
| x = self.decoder_norm(x) |
|
|
| |
| x = self.decoder_pred(x) |
|
|
| |
| x = x[:, 1:, :] |
|
|
| return x |
|
|
| def forward_loss(self, imgs, pred, mask): |
| """ |
| imgs: [N, 3, H, W] |
| pred: [N, L, p*p*3] |
| mask: [N, L], 0 is keep, 1 is remove, |
| """ |
| target = self.patchify(imgs) |
| if self.norm_pix_loss: |
| mean = target.mean(dim=-1, keepdim=True) |
| var = target.var(dim=-1, keepdim=True) |
| target = (target - mean) / (var + 1.e-6)**.5 |
|
|
| loss = (pred - target) ** 2 |
| loss = loss.mean(dim=-1) |
|
|
| loss = (loss * mask).sum() / mask.sum() |
| return loss |
|
|
| def forward(self, imgs, mask_ratio=0.75): |
| latent, mask, ids_restore = self.forward_encoder(imgs, mask_ratio) |
| pred = self.forward_decoder(latent, ids_restore) |
| loss = self.forward_loss(imgs, pred, mask) |
| return loss, pred, mask |