| | |
| | |
| |
|
| | |
| | |
| |
|
| | import math |
| | import torch |
| | import torch.nn as nn |
| | from functools import partial, reduce |
| | from operator import mul |
| |
|
| | from timm.layers import to_2tuple |
| | from timm.models.vision_transformer import VisionTransformer, _cfg |
| | from timm.layers import PatchEmbed |
| |
|
| | __all__ = [ |
| | "vits4", |
| | "vits8", |
| | "vitb4", |
| | "vitb8", |
| | ] |
| |
|
| |
|
| | class VisionTransformerMoCo(VisionTransformer): |
| | def __init__(self, stop_grad_conv1=False, **kwargs): |
| | super().__init__(**kwargs) |
| | |
| | self.build_2d_sincos_position_embedding() |
| |
|
| | |
| | for name, m in self.named_modules(): |
| | if isinstance(m, nn.Linear): |
| | if "qkv" in name: |
| | |
| | val = math.sqrt( |
| | 6.0 / float(m.weight.shape[0] // 3 + m.weight.shape[1]) |
| | ) |
| | nn.init.uniform_(m.weight, -val, val) |
| | else: |
| | nn.init.xavier_uniform_(m.weight) |
| | nn.init.zeros_(m.bias) |
| | nn.init.normal_(self.cls_token, std=1e-6) |
| |
|
| | if isinstance(self.patch_embed, PatchEmbed): |
| | |
| | val = math.sqrt( |
| | 6.0 |
| | / float( |
| | 3 * reduce(mul, self.patch_embed.patch_size, 1) + self.embed_dim |
| | ) |
| | ) |
| | nn.init.uniform_(self.patch_embed.proj.weight, -val, val) |
| | nn.init.zeros_(self.patch_embed.proj.bias) |
| |
|
| | if stop_grad_conv1: |
| | self.patch_embed.proj.weight.requires_grad = False |
| | self.patch_embed.proj.bias.requires_grad = False |
| |
|
| | def build_2d_sincos_position_embedding(self, temperature=10000.0): |
| | h, w = self.patch_embed.grid_size |
| | grid_w = torch.arange(w, dtype=torch.float32) |
| | grid_h = torch.arange(h, dtype=torch.float32) |
| | grid_w, grid_h = torch.meshgrid(grid_w, grid_h) |
| | assert ( |
| | self.embed_dim % 4 == 0 |
| | ), "Embed dimension must be divisible by 4 for 2D sin-cos position embedding" |
| | pos_dim = self.embed_dim // 4 |
| | omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim |
| | omega = 1.0 / (temperature**omega) |
| | out_w = torch.einsum("m,d->md", [grid_w.flatten(), omega]) |
| | out_h = torch.einsum("m,d->md", [grid_h.flatten(), omega]) |
| | pos_emb = torch.cat( |
| | [torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], |
| | dim=1, |
| | )[None, :, :] |
| |
|
| | pe_token = torch.zeros([1, self.num_prefix_tokens, self.embed_dim], dtype=torch.float32) |
| | self.pos_embed = nn.Parameter(torch.cat([pe_token, pos_emb], dim=1)) |
| | self.pos_embed.requires_grad = False |
| |
|
| |
|
| | class ConvStem(nn.Module): |
| | """ |
| | ConvStem, from Early Convolutions Help Transformers See Better, Tete et al. https://arxiv.org/abs/2106.14881 |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | img_size=224, |
| | patch_size=16, |
| | in_chans=3, |
| | embed_dim=768, |
| | norm_layer=None, |
| | flatten=True, |
| | ): |
| | super().__init__() |
| |
|
| | assert patch_size == 16, "ConvStem only supports patch size of 16" |
| | assert embed_dim % 8 == 0, "Embed dimension must be divisible by 8 for ConvStem" |
| |
|
| | img_size = to_2tuple(img_size) |
| | patch_size = to_2tuple(patch_size) |
| | self.img_size = img_size |
| | self.patch_size = patch_size |
| | self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) |
| | self.num_patches = self.grid_size[0] * self.grid_size[1] |
| | self.flatten = flatten |
| |
|
| | |
| | stem = [] |
| | input_dim, output_dim = 3, embed_dim // 8 |
| | for l in range(4): |
| | stem.append( |
| | nn.Conv2d( |
| | input_dim, |
| | output_dim, |
| | kernel_size=3, |
| | stride=2, |
| | padding=1, |
| | bias=False, |
| | ) |
| | ) |
| | stem.append(nn.BatchNorm2d(output_dim)) |
| | stem.append(nn.ReLU(inplace=True)) |
| | input_dim = output_dim |
| | output_dim *= 2 |
| | stem.append(nn.Conv2d(input_dim, embed_dim, kernel_size=1)) |
| | self.proj = nn.Sequential(*stem) |
| |
|
| | self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() |
| |
|
| | def forward(self, x): |
| | B, C, H, W = x.shape |
| | assert ( |
| | H == self.img_size[0] and W == self.img_size[1] |
| | ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." |
| | x = self.proj(x) |
| | if self.flatten: |
| | x = x.flatten(2).transpose(1, 2) |
| | x = self.norm(x) |
| | return x |
| |
|
| |
|
| | def vits(patch_size: int, **kwargs): |
| | model = VisionTransformerMoCo( |
| | patch_size=patch_size, |
| | embed_dim=384, |
| | depth=12, |
| | num_heads=12, |
| | mlp_ratio=4, |
| | qkv_bias=True, |
| | norm_layer=partial(nn.LayerNorm, eps=1e-6), |
| | **kwargs, |
| | ) |
| | model.default_cfg = _cfg() |
| | return model |
| |
|
| | vits4 = partial(vits, patch_size=4) |
| | vits8 = partial(vits, patch_size=8) |
| |
|
| | def vitb(patch_size: int, **kwargs): |
| | model = VisionTransformerMoCo( |
| | patch_size=patch_size, |
| | embed_dim=768, |
| | depth=12, |
| | num_heads=12, |
| | mlp_ratio=4, |
| | qkv_bias=True, |
| | norm_layer=partial(nn.LayerNorm, eps=1e-6), |
| | **kwargs, |
| | ) |
| | model.default_cfg = _cfg() |
| | return model |
| |
|
| | vitb4 = partial(vitb, patch_size=4) |
| | vitb8 = partial(vitb, patch_size=8) |
| |
|