| from collections import OrderedDict
|
| from itertools import repeat
|
| import collections.abc
|
| import math
|
|
|
| import torch
|
| import torch.nn.functional as F
|
| from torch import nn
|
|
|
| from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
|
|
|
| from lavis.models.eva_vit import convert_weights_to_fp16
|
| from lavis.common.dist_utils import download_cached_file
|
|
|
| class Bottleneck(nn.Module):
|
| expansion = 4
|
|
|
| def __init__(self, inplanes, planes, stride=1):
|
| super().__init__()
|
|
|
|
|
| self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
|
| self.bn1 = nn.BatchNorm2d(planes)
|
| self.relu1 = nn.ReLU(inplace=True)
|
|
|
| self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
|
| self.bn2 = nn.BatchNorm2d(planes)
|
| self.relu2 = nn.ReLU(inplace=True)
|
|
|
| self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
|
|
|
| self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
|
| self.bn3 = nn.BatchNorm2d(planes * self.expansion)
|
| self.relu3 = nn.ReLU(inplace=True)
|
|
|
| self.downsample = None
|
| self.stride = stride
|
|
|
| if stride > 1 or inplanes != planes * Bottleneck.expansion:
|
|
|
| self.downsample = nn.Sequential(OrderedDict([
|
| ("-1", nn.AvgPool2d(stride)),
|
| ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
|
| ("1", nn.BatchNorm2d(planes * self.expansion))
|
| ]))
|
|
|
| def forward(self, x: torch.Tensor):
|
| identity = x
|
|
|
| out = self.relu1(self.bn1(self.conv1(x)))
|
| out = self.relu2(self.bn2(self.conv2(out)))
|
| out = self.avgpool(out)
|
| out = self.bn3(self.conv3(out))
|
|
|
| if self.downsample is not None:
|
| identity = self.downsample(x)
|
|
|
| out += identity
|
| out = self.relu3(out)
|
| return out
|
|
|
|
|
| class AttentionPool2d(nn.Module):
|
| def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
|
| super().__init__()
|
| self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
|
| self.k_proj = nn.Linear(embed_dim, embed_dim)
|
| self.q_proj = nn.Linear(embed_dim, embed_dim)
|
| self.v_proj = nn.Linear(embed_dim, embed_dim)
|
| self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
|
| self.num_heads = num_heads
|
|
|
| def forward(self, x):
|
| x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1)
|
| x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
|
| x = x + self.positional_embedding[:, None, :].to(x.dtype)
|
| x, _ = F.multi_head_attention_forward(
|
| query=x, key=x, value=x,
|
| embed_dim_to_check=x.shape[-1],
|
| num_heads=self.num_heads,
|
| q_proj_weight=self.q_proj.weight,
|
| k_proj_weight=self.k_proj.weight,
|
| v_proj_weight=self.v_proj.weight,
|
| in_proj_weight=None,
|
| in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
|
| bias_k=None,
|
| bias_v=None,
|
| add_zero_attn=False,
|
| dropout_p=0,
|
| out_proj_weight=self.c_proj.weight,
|
| out_proj_bias=self.c_proj.bias,
|
| use_separate_proj_weight=True,
|
| training=self.training,
|
| need_weights=False
|
| )
|
|
|
| return x[0]
|
|
|
|
|
| class LayerNorm(nn.LayerNorm):
|
| """Subclass torch's LayerNorm to handle fp16."""
|
|
|
| def forward(self, x: torch.Tensor):
|
| orig_type = x.dtype
|
| ret = super().forward(x.type(torch.float32))
|
| return ret.type(orig_type)
|
|
|
|
|
| class QuickGELU(nn.Module):
|
| def forward(self, x: torch.Tensor):
|
| return x * torch.sigmoid(1.702 * x)
|
|
|
|
|
| class ResidualAttentionBlock(nn.Module):
|
| def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False):
|
| super().__init__()
|
|
|
| self.attn = nn.MultiheadAttention(d_model, n_head)
|
| self.ln_1 = LayerNorm(d_model)
|
| self.mlp = nn.Sequential(OrderedDict([
|
| ("c_fc", nn.Linear(d_model, d_model * 4)),
|
| ("gelu", QuickGELU()),
|
| ("c_proj", nn.Linear(d_model * 4, d_model))
|
| ]))
|
| self.ln_2 = LayerNorm(d_model)
|
| self.attn_mask = attn_mask
|
|
|
| if use_grad_checkpointing:
|
| self.attn = checkpoint_wrapper(self.attn)
|
| self.mlp = checkpoint_wrapper(self.mlp)
|
|
|
| def attention(self, x: torch.Tensor):
|
| self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
|
| return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
|
|
|
| def forward(self, x: torch.Tensor):
|
| x = x + self.attention(self.ln_1(x))
|
| x = x + self.mlp(self.ln_2(x))
|
| return x
|
|
|
|
|
| class Transformer(nn.Module):
|
| def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False):
|
| super().__init__()
|
| self.width = width
|
| self.layers = layers
|
| self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask, use_grad_checkpointing and i>12) for i in range(layers)])
|
|
|
| def forward(self, x: torch.Tensor):
|
| return self.resblocks(x)
|
|
|
|
|
| class VisionTransformer(nn.Module):
|
| def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, use_grad_checkpointing: bool):
|
| super().__init__()
|
| self.input_resolution = input_resolution
|
| self.num_features = width
|
| self.num_heads = heads
|
| self.num_patches = (input_resolution // patch_size) ** 2
|
| self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
|
|
|
| scale = width ** -0.5
|
| self.class_embedding = nn.Parameter(scale * torch.randn(width))
|
| self.positional_embedding = nn.Parameter(scale * torch.randn(self.num_patches + 1, width))
|
| self.ln_pre = LayerNorm(width)
|
|
|
| self.transformer = Transformer(width, layers, heads, use_grad_checkpointing=use_grad_checkpointing)
|
|
|
|
|
|
|
| def forward(self, x: torch.Tensor):
|
|
|
| x = self.conv1(x)
|
| x = x.reshape(x.shape[0], x.shape[1], -1)
|
| x = x.permute(0, 2, 1)
|
| x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)
|
| x = x + self.positional_embedding.to(x.dtype)
|
| x = self.ln_pre(x)
|
|
|
| x = x.permute(1, 0, 2)
|
| x = self.transformer(x)
|
| x = x.permute(1, 0, 2)
|
|
|
|
|
| return x
|
|
|
| def get_num_layer(self, var_name=""):
|
| if var_name in ("class_embedding", "positional_embedding", "conv1", "ln_pre"):
|
| return 0
|
| elif var_name.startswith("transformer.resblocks"):
|
| layer_id = int(var_name.split('.')[2])
|
| return layer_id + 1
|
| else:
|
| return len(self.transformer.resblocks)
|
|
|
|
|
|
|
| def _ntuple(n):
|
| def parse(x):
|
| if isinstance(x, collections.abc.Iterable):
|
| return x
|
| return tuple(repeat(x, n))
|
| return parse
|
| to_2tuple = _ntuple(2)
|
|
|
| def interpolate_pos_embed(model, state_dict, interpolation: str = 'bicubic', seq_dim=1):
|
|
|
| old_pos_embed = state_dict.get('positional_embedding', None)
|
|
|
| grid_size = round((model.positional_embedding.shape[0] - 1) ** 0.5)
|
| if old_pos_embed is None:
|
| return
|
| grid_size = to_2tuple(grid_size)
|
| extra_tokens = 1
|
| new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
|
| if new_seq_len == old_pos_embed.shape[0]:
|
| return
|
|
|
| if extra_tokens:
|
| pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
|
| else:
|
| pos_emb_tok, pos_emb_img = None, old_pos_embed
|
|
|
| old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
|
|
|
| print('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
|
| pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
|
| pos_emb_img = F.interpolate(
|
| pos_emb_img,
|
| size=grid_size,
|
| mode=interpolation,
|
| align_corners=True,
|
| )
|
| pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
|
| if pos_emb_tok is not None:
|
| new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
|
| else:
|
| new_pos_embed = pos_emb_img
|
| state_dict['positional_embedding'] = new_pos_embed
|
|
|
|
|
| def create_clip_vit_L(img_size=224,use_checkpoint=False,precision="fp16"):
|
| model = VisionTransformer(
|
| input_resolution=img_size,
|
| patch_size=14,
|
| width=1024,
|
| layers=23,
|
| heads=16,
|
| use_grad_checkpointing=use_checkpoint,
|
| )
|
| url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/clip_vit_L.pth"
|
| cached_file = download_cached_file(
|
| url, check_hash=False, progress=True
|
| )
|
| state_dict = torch.load(cached_file, map_location="cpu")
|
| interpolate_pos_embed(model,state_dict)
|
|
|
| incompatible_keys = model.load_state_dict(state_dict, strict=False)
|
|
|
|
|
| if precision == "fp16":
|
| convert_weights_to_fp16(model)
|
| return model
|
|
|