| import math |
| import numpy as np |
| import torch |
| import torch.nn.functional as F |
| import torch.utils.checkpoint as checkpoint |
| from functools import partial |
| from einops import rearrange |
| from timm.models.layers import DropPath, to_2tuple, trunc_normal_ |
| from typing import Optional, Tuple, Union, Dict |
| from functools import partial, reduce |
| from PIL import Image |
| from torch import nn |
| from transformers.image_processing_utils import BatchFeature, get_size_dict |
| from transformers.image_transforms import ( |
| convert_to_rgb, |
| normalize, |
| rescale, |
| resize, |
| to_channel_dimension_format, |
| ) |
| from transformers.image_utils import ( |
| ChannelDimension, |
| PILImageResampling, |
| to_numpy_array, |
| ) |
|
|
| try: |
| from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func |
| from flash_attn.bert_padding import unpad_input, pad_input |
| except: |
| pass |
|
|
| class FlashAttention(nn.Module): |
| """Implement the scaled dot product attention with softmax. |
| Arguments |
| --------- |
| softmax_scale: The temperature to use for the softmax attention. |
| (default: 1/sqrt(d_keys) where d_keys is computed at |
| runtime) |
| attention_dropout: The dropout rate to apply to the attention |
| (default: 0.0) |
| """ |
|
|
| def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None): |
| super().__init__() |
| self.softmax_scale = softmax_scale |
| self.dropout_p = attention_dropout |
|
|
| def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None, |
| max_s=None, need_weights=False): |
| """Implements the multihead softmax attention. |
| Arguments |
| --------- |
| qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None |
| if unpadded: (nnz, 3, h, d) |
| key_padding_mask: a bool tensor of shape (B, S) |
| """ |
| assert not need_weights |
| assert qkv.dtype in [torch.float16, torch.bfloat16] |
| assert qkv.is_cuda |
|
|
| if cu_seqlens is None: |
| batch_size = qkv.shape[0] |
| seqlen = qkv.shape[1] |
| if key_padding_mask is None: |
| qkv = rearrange(qkv, 'b s ... -> (b s) ...') |
| max_s = seqlen |
| cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32, |
| device=qkv.device) |
| output = flash_attn_varlen_qkvpacked_func( |
| qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, |
| softmax_scale=self.softmax_scale, causal=causal |
| ) |
| output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) |
| else: |
| nheads = qkv.shape[-2] |
| x = rearrange(qkv, 'b s three h d -> b s (three h d)') |
| x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask) |
| x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads) |
| output_unpad = flash_attn_varlen_qkvpacked_func( |
| x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, |
| softmax_scale=self.softmax_scale, causal=causal |
| ) |
| output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), |
| indices, batch_size, seqlen), |
| 'b s (h d) -> b s h d', h=nheads) |
| else: |
| assert max_s is not None |
| output = flash_attn_varlen_qkvpacked_func( |
| qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, |
| softmax_scale=self.softmax_scale, causal=causal |
| ) |
|
|
| return output, None |
|
|
|
|
|
|
|
|
|
|
| |
| |
| |
| |
| |
| |
| def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): |
| """ |
| grid_size: int of the grid height and width |
| return: |
| pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
| """ |
| grid_h = np.arange(grid_size, dtype=np.float32) |
| grid_w = np.arange(grid_size, dtype=np.float32) |
| grid = np.meshgrid(grid_w, grid_h) |
| grid = np.stack(grid, axis=0) |
|
|
| grid = grid.reshape([2, 1, grid_size, grid_size]) |
| pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) |
| if cls_token: |
| pos_embed = np.concatenate( |
| [np.zeros([1, embed_dim]), pos_embed], axis=0 |
| ) |
| return pos_embed |
|
|
|
|
| def get_1d_sincos_pos_embed(embed_dim, t_size, cls_token=False): |
| """ |
| t_size: int of the temporal size |
| return: |
| pos_embed: [t_size, embed_dim] or [1+t_size, embed_dim] (w/ or w/o cls_token) |
| """ |
| grid_t = np.arange(t_size, dtype=np.float32) |
| pos_embed = get_1d_sincos_pos_embed_from_grid(embed_dim, grid_t) |
| if cls_token: |
| pos_embed = np.concatenate( |
| [np.zeros([1, embed_dim]), pos_embed], axis=0 |
| ) |
| return pos_embed |
|
|
|
|
| def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): |
| assert embed_dim % 2 == 0 |
|
|
| |
| emb_h = get_1d_sincos_pos_embed_from_grid( |
| embed_dim // 2, grid[0] |
| ) |
| emb_w = get_1d_sincos_pos_embed_from_grid( |
| embed_dim // 2, grid[1] |
| ) |
|
|
| emb = np.concatenate([emb_h, emb_w], axis=1) |
| return emb |
|
|
|
|
| def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): |
| """ |
| embed_dim: output dimension for each position |
| pos: a list of positions to be encoded: size (M,) |
| out: (M, D) |
| """ |
| assert embed_dim % 2 == 0 |
| omega = np.arange(embed_dim // 2, dtype=np.float32) |
| omega /= embed_dim / 2.0 |
| omega = 1.0 / 10000**omega |
|
|
| pos = pos.reshape(-1) |
| out = np.einsum("m,d->md", pos, omega) |
|
|
| emb_sin = np.sin(out) |
| emb_cos = np.cos(out) |
|
|
| emb = np.concatenate([emb_sin, emb_cos], axis=1) |
| return emb |
|
|
| |
| |
| |
| |
| |
| def get_3d_sincos_pos_embed(embed_dim, grid_size, t_size, cls_token=False): |
| """ |
| grid_size: int of the grid height and width |
| t_size: int of the temporal size |
| return: |
| pos_embed: [t_size*grid_size*grid_size, embed_dim] or [1+t_size*grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
| """ |
| assert embed_dim % 4 == 0 |
| embed_dim_spatial = embed_dim // 4 * 3 |
| embed_dim_temporal = embed_dim // 4 |
|
|
| |
| grid_h = np.arange(grid_size, dtype=np.float32) |
| grid_w = np.arange(grid_size, dtype=np.float32) |
| grid = np.meshgrid(grid_w, grid_h) |
| grid = np.stack(grid, axis=0) |
|
|
| grid = grid.reshape([2, 1, grid_size, grid_size]) |
| pos_embed_spatial = get_2d_sincos_pos_embed_from_grid( |
| embed_dim_spatial, grid |
| ) |
|
|
| |
| grid_t = np.arange(t_size, dtype=np.float32) |
| pos_embed_temporal = get_1d_sincos_pos_embed_from_grid( |
| embed_dim_temporal, grid_t |
| ) |
|
|
| |
| pos_embed_temporal = pos_embed_temporal[:, np.newaxis, :] |
| pos_embed_temporal = np.repeat( |
| pos_embed_temporal, grid_size**2, axis=1 |
| ) |
| pos_embed_spatial = pos_embed_spatial[np.newaxis, :, :] |
| pos_embed_spatial = np.repeat( |
| pos_embed_spatial, t_size, axis=0 |
| ) |
|
|
| pos_embed = np.concatenate([pos_embed_temporal, pos_embed_spatial], axis=-1) |
| pos_embed = pos_embed.reshape([-1, embed_dim]) |
|
|
| if cls_token: |
| pos_embed = np.concatenate( |
| [np.zeros([1, embed_dim]), pos_embed], axis=0 |
| ) |
| return pos_embed |
|
|
|
|
|
|
| class RMSNorm(nn.Module): |
| def __init__(self, hidden_size, eps=1e-6): |
| super().__init__() |
| self.weight = nn.Parameter(torch.ones(hidden_size)) |
| self.variance_epsilon = eps |
| |
| def forward(self, hidden_states): |
| input_dtype = hidden_states.dtype |
| hidden_states = hidden_states.to(torch.float32) |
| variance = hidden_states.pow(2).mean(-1, keepdim=True) |
| hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) |
| return self.weight * hidden_states.to(input_dtype) |
|
|
|
|
| class LayerScale(nn.Module): |
| def __init__(self, dim, init_values=1e-5, inplace=False, force_fp32=False): |
| super().__init__() |
| self.inplace = inplace |
| self.weight = nn.Parameter(init_values * torch.ones(dim)) |
| self.force_fp32 = force_fp32 |
| |
| @torch.cuda.amp.autocast(enabled=False) |
| def forward(self, x): |
| if self.force_fp32: |
| output_type = x.dtype |
| out = x.float().mul_(self.weight.float()) if self.inplace else x.float() * self.weight.float() |
| return out.to(dtype=output_type) |
| else: |
| out = x.mul_(self.weight) if self.inplace else x * self.weight |
| return out |
|
|
|
|
| class Attention(nn.Module): |
| def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., use_flash_attn=False, |
| causal=False, norm_layer=nn.LayerNorm, qk_normalization=False, use_fused_rmsnorm=False): |
| super().__init__() |
| assert dim % num_heads == 0, 'dim should be divisible by num_heads' |
| self.num_heads = num_heads |
| head_dim = dim // num_heads |
| self.scale = head_dim ** -0.5 |
| |
| self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) |
| self.attn_drop = nn.Dropout(attn_drop) |
| self.proj = nn.Linear(dim, dim) |
| self.proj_drop = nn.Dropout(proj_drop) |
| |
| self.use_flash_attn = use_flash_attn |
| if use_flash_attn: |
| self.causal = causal |
| self.inner_attn = FlashAttention(attention_dropout=attn_drop) |
| |
| self.qk_normalization = qk_normalization |
| self.q_norm = norm_layer(dim) if qk_normalization else nn.Identity() |
| self.k_norm = norm_layer(dim) if qk_normalization else nn.Identity() |
| self.use_fused_rmsnorm = use_fused_rmsnorm |
| |
| def _naive_attn(self, x): |
| B, N, C = x.shape |
| |
| qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) |
| q, k, v = qkv.unbind(0) |
| |
| if self.qk_normalization: |
| B_, H_, N_, D_ = q.shape |
| q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2) |
| k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2) |
| |
| attn = ((q * self.scale) @ k.transpose(-2, -1)) |
| |
| attn = attn.softmax(dim=-1) |
| attn = self.attn_drop(attn) |
| |
| x = (attn @ v).transpose(1, 2).reshape(B, N, C) |
| |
| |
| x = self.proj(x) |
| x = self.proj_drop(x) |
| return x |
| |
| def _flash_attn(self, x, key_padding_mask=None, need_weights=False): |
| |
| qkv = self.qkv(x) |
| qkv = rearrange(qkv, "b s (three h d) -> b s three h d", three=3, h=self.num_heads) |
| |
| if self.qk_normalization: |
| q, k, v = qkv.unbind(2) |
| if self.use_fused_rmsnorm: |
| q = self.q_norm(q.flatten(-2, -1))[0].view(q.shape) |
| k = self.k_norm(k.flatten(-2, -1))[0].view(k.shape) |
| else: |
| q = self.q_norm(q.flatten(-2, -1)).view(q.shape) |
| k = self.k_norm(k.flatten(-2, -1)).view(k.shape) |
| qkv = torch.stack([q, k, v], dim=2) |
| |
| context, _ = self.inner_attn( |
| qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=self.causal |
| ) |
| outs = self.proj(rearrange(context, "b s h d -> b s (h d)")) |
| outs = self.proj_drop(outs) |
| return outs |
| |
| def forward(self, x): |
| x = self._naive_attn(x) if not self.use_flash_attn else self._flash_attn(x) |
| return x |
|
|
|
|
| class Mlp(nn.Module): |
| """ MLP as used in Vision Transformer, MLP-Mixer and related networks |
| """ |
| |
| def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, |
| bias=True, drop=0.): |
| super().__init__() |
| out_features = out_features or in_features |
| hidden_features = hidden_features or in_features |
| bias = to_2tuple(bias) |
| drop_probs = to_2tuple(drop) |
| |
| self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) |
| self.act = act_layer() |
| self.drop1 = nn.Dropout(drop_probs[0]) |
| self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) |
| self.drop2 = nn.Dropout(drop_probs[1]) |
| |
| def forward(self, x): |
| x = self.fc1(x) |
| x = self.act(x) |
| x = self.drop1(x) |
| x = self.fc2(x) |
| x = self.drop2(x) |
| return x |
|
|
|
|
| class Block(nn.Module): |
| |
| def __init__( |
| self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., init_values=None, |
| drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_flash_attn=False, use_fused_mlp=False, |
| fused_mlp_heuristic=1, with_cp=False, qk_normalization=False, layerscale_no_force_fp32=False, |
| use_fused_rmsnorm=False): |
| super().__init__() |
| |
| self.norm1 = norm_layer(dim) |
| self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, |
| use_flash_attn=use_flash_attn, causal=False, norm_layer=norm_layer, |
| qk_normalization=qk_normalization, |
| use_fused_rmsnorm=use_fused_rmsnorm) |
| self.ls1 = LayerScale(dim, init_values=init_values, |
| force_fp32=(not layerscale_no_force_fp32)) if init_values else nn.Identity() |
| |
| self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() |
| |
| self.norm2 = norm_layer(dim) |
| mlp_hidden_dim = int(dim * mlp_ratio) |
| if use_fused_mlp: |
| raise NotImplementedError |
| self.mlp = FusedMLP(in_features=dim, hidden_features=mlp_hidden_dim, heuristic=fused_mlp_heuristic) |
| else: |
| self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) |
| self.ls2 = LayerScale(dim, init_values=init_values, |
| force_fp32=(not layerscale_no_force_fp32)) if init_values else nn.Identity() |
| self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() |
| |
| self.with_cp = with_cp |
| self.use_fused_rmsnorm = use_fused_rmsnorm |
| |
| def forward(self, x, residual=None): |
| |
| def _inner_forward(x, residual=None): |
| if self.use_fused_rmsnorm: |
| x, residual = self.norm1(x, residual) |
| x = self.drop_path1(self.ls1(self.attn(x))) |
| x, residual = self.norm2(x, residual) |
| x = self.drop_path2(self.ls2(self.mlp(x))) |
| return x, residual |
| else: |
| assert residual is None |
| x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x)))) |
| x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) |
| return x |
| |
| if self.with_cp: |
| |
| return checkpoint.checkpoint(_inner_forward, x, residual) |
| else: |
| return _inner_forward(x, residual=residual) |
|
|
|
|
| class PatchEmbed(nn.Module): |
| """ 3D Image to Patch Embedding |
| """ |
| |
| def __init__( |
| self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, |
| num_frames=8, tubelet_size=1, norm_layer=None |
| ): |
| super().__init__() |
| img_size = to_2tuple(img_size) |
| patch_size = to_2tuple(patch_size) |
| self.img_size = img_size |
| self.patch_size = patch_size |
| self.grid_size = ( |
| num_frames // tubelet_size, |
| img_size[0] // patch_size[0], |
| img_size[1] // patch_size[1] |
| ) |
| self.num_patches = self.grid_size[0] * self.grid_size[1] * self.grid_size[2] |
| self.num_img_patches = self.grid_size[1] * self.grid_size[2] |
|
|
| self.proj = nn.Conv3d( |
| in_channels=in_chans, out_channels=embed_dim, |
| kernel_size=(tubelet_size, patch_size[0], patch_size[1]), |
| stride=(tubelet_size, patch_size[0], patch_size[1]) |
| ) |
| self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() |
| |
| def forward(self, x): |
| x = self.proj(x) |
| x = x.flatten(3).permute(0, 2, 3, 1) |
| x = self.norm(x) |
| return x |
|
|
|
|
| class PretrainVisionTransformer_clean(nn.Module): |
| def __init__( |
| self, |
| in_chans: int = 3, |
| patch_size: int = 14, |
| img_size: int = 224, |
| qkv_bias: bool = False, |
| drop_path_rate: float = 0.25, |
| embed_dim: int = 1408, |
| num_heads: int = 16, |
| mlp_ratio: float = 48/11, |
| init_values: float = 1e-5, |
| qk_normalization: bool = True, |
| depth: int = 40, |
| use_flash_attn: bool = True, |
| use_fused_rmsnorm: bool = True, |
| use_fused_mlp: bool = True, |
| fused_mlp_heuristic: int = 1, |
| attn_pool_num_heads: int = 16, |
| clip_embed_dim: int = 768, |
| layerscale_no_force_fp32: bool = False, |
| num_frames: int = 8, |
| tubelet_size: int = 1, |
| sep_pos_embed: bool = False, |
| sep_image_video_pos_embed: bool = False, |
| use_checkpoint: bool = False, |
| checkpoint_num: int = 0, |
| |
| x_vis_return_idx=-1, |
| x_vis_only=False |
| ): |
| super().__init__() |
| |
| self.num_frames = num_frames |
| self.tubelet_size = tubelet_size |
| |
| |
| self.use_flash_attn = use_flash_attn |
| self.embed_dim = embed_dim |
|
|
| print(f"Origin depth: {depth}") |
| depth = depth + x_vis_return_idx + 1 |
| print(f"New depth: {depth}") |
| self.depth = depth |
|
|
| self.x_vis_only = x_vis_only |
|
|
| if use_fused_rmsnorm: |
| raise NotImplementedError |
| norm_layer_for_blocks = partial(DropoutAddRMSNorm, eps=1e-6, prenorm=True) |
| else: |
| norm_layer_for_blocks = partial(RMSNorm, eps=1e-6) |
| self.norm_layer_for_blocks = norm_layer_for_blocks |
| self.patch_embed = PatchEmbed( |
| img_size, patch_size, in_chans, embed_dim, |
| num_frames=num_frames, tubelet_size=tubelet_size, |
| ) |
| num_patches = self.patch_embed.num_patches |
| num_img_patches = self.patch_embed.num_img_patches |
| |
| self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) |
| |
| |
| self.sep_pos_embed = sep_pos_embed |
| self.sep_image_video_pos_embed = sep_image_video_pos_embed |
| if sep_pos_embed: |
| raise NotImplementedError |
| else: |
| if sep_image_video_pos_embed: |
| print("Use separate position embedding, for image and video we use different pos_embed.") |
| self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) |
| self.img_pos_embed = nn.Parameter(torch.zeros(1, num_img_patches + 1, embed_dim)) |
| else: |
| print("Use joint position embedding, for image and video we use same pos_embed.") |
| self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) |
|
|
| dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] |
| |
| with_cp_list = [False] * depth |
| if use_checkpoint: |
| for idx in range(depth): |
| if idx < checkpoint_num: |
| with_cp_list[idx] = True |
| print(f"Droppath rate: {dpr}") |
| print(f"Checkpoint list: {with_cp_list}") |
| |
| self.blocks = nn.ModuleList([ |
| Block(embed_dim, num_heads, mlp_ratio, qkv_bias=qkv_bias, |
| norm_layer=norm_layer_for_blocks, |
| drop_path=dpr[i], init_values=init_values, attn_drop=0., |
| use_flash_attn=use_flash_attn, use_fused_mlp=use_fused_mlp, |
| fused_mlp_heuristic=fused_mlp_heuristic, |
| with_cp=with_cp_list[i], |
| qk_normalization=qk_normalization, |
| layerscale_no_force_fp32=layerscale_no_force_fp32, |
| use_fused_rmsnorm=use_fused_rmsnorm) |
| for i in range(depth)]) |
| |
| if not self.x_vis_only: |
| raise NotImplementedError |
|
|
| self.init_pos_embed() |
| trunc_normal_(self.cls_token, std=.02) |
| self.apply(self._init_weights) |
| self.fix_init_weight() |
|
|
| def init_pos_embed(self): |
| print("Init pos_embed from sincos pos_embed") |
| if self.sep_pos_embed: |
| raise NotImplementedError |
| else: |
| pos_embed = get_3d_sincos_pos_embed( |
| self.pos_embed.shape[-1], |
| self.patch_embed.grid_size[1], |
| self.patch_embed.grid_size[0], |
| cls_token=True |
| ) |
| self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0)) |
|
|
| if self.sep_image_video_pos_embed: |
| img_pos_embed = get_3d_sincos_pos_embed( |
| self.pos_embed.shape[-1], |
| self.patch_embed.grid_size[1], |
| 1, |
| cls_token=True |
| ) |
| self.img_pos_embed.data.copy_(torch.from_numpy(img_pos_embed).float().unsqueeze(0)) |
|
|
|
|
| def _init_weights(self, m): |
| if isinstance(m, nn.Linear): |
| trunc_normal_(m.weight, std=.02) |
| if isinstance(m, nn.Linear) and m.bias is not None: |
| nn.init.constant_(m.bias, 0) |
| elif isinstance(m, nn.LayerNorm): |
| nn.init.constant_(m.bias, 0) |
| nn.init.constant_(m.weight, 1.0) |
|
|
| def fix_init_weight(self): |
| def rescale(param, layer_id): |
| param.div_(math.sqrt(2.0 * layer_id)) |
|
|
| for layer_id, layer in enumerate(self.blocks): |
| rescale(layer.attn.proj.weight.data, layer_id + 1) |
| rescale(layer.mlp.fc2.weight.data, layer_id + 1) |
| |
| @property |
| def dtype(self): |
| return self.patch_embed.proj.weight.dtype |
|
|
| def get_num_layers(self): |
| return len(self.blocks) |
|
|
| @torch.jit.ignore |
| def no_weight_decay(self): |
| return { |
| 'pos_embed', |
| 'pos_embed_spatial', |
| 'pos_embed_temporal', |
| 'pos_embed_cls', |
| 'img_pos_embed', |
| 'cls_token' |
| } |
| |
| |
| def forward(self, x, mask=None, use_image=False): |
| x = self.patch_embed(x.type(self.dtype)) |
| |
| B, T, L, C = x.shape |
| x = x.view([B, T * L, C]) |
|
|
| |
| cls_tokens = self.cls_token.expand(B, -1, -1) |
| x = torch.cat((cls_tokens, x), dim=1) |
|
|
| |
| if self.sep_pos_embed: |
| raise NotImplementedError |
| else: |
| if use_image: |
| if self.sep_image_video_pos_embed: |
| pos_embed = self.img_pos_embed |
| else: |
| |
| |
| cls_pos_embed = self.pos_embed[:, 0:1, :] |
| |
|
|
| img_pos_embed = self.pos_embed[:, 1:, :].view(1, self.num_frames, self.patch_embed.num_patches // self.num_frames, self.embed_dim).mean(dim=1) |
| |
|
|
| pos_embed = torch.cat([cls_pos_embed, img_pos_embed], dim=1) |
| |
| else: |
| pos_embed = self.pos_embed |
| |
| |
| x = x + pos_embed |
|
|
| |
| if mask is not None: |
| x = x[~mask].reshape(B, -1, C) |
| else: |
| x = x.reshape(B, -1, C) |
|
|
| residual = None |
|
|
| for idx, blk in enumerate(self.blocks): |
| if isinstance(x, tuple) and len(x) == 2: |
| x, residual = x |
| x = blk(x, residual=residual) |
|
|
| if isinstance(x, tuple) and len(x) == 2: |
| x, residual = x |
| if residual is not None: |
| x = x + residual |
| |
| x_vis = x |
| if self.x_vis_only: |
| return x_vis |
| else: |
| x_pool_vis = self.clip_projector(x_vis) |
| return x_vis, x_pool_vis, None, None |
| |
|
|
|
|
|
|
|
|
| class InternVideo2ImageProcessor: |
| def __init__(self, image_mean=(0.485, 0.456, 0.406), image_std=(0.229, 0.224, 0.225), size=(224, 224), crop_size: Dict[str, int] = None, resample=PILImageResampling.BICUBIC, rescale_factor=1 / 255, data_format=ChannelDimension.FIRST): |
| crop_size = crop_size if crop_size is not None else {"height": size[0], "width": size[1]} |
| crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size") |
|
|
| self.image_mean = image_mean |
| self.image_std = image_std |
| self.size = size |
| self.resample = resample |
| self.rescale_factor = rescale_factor |
| self.data_format = data_format |
| self.crop_size = crop_size |
|
|
| def preprocess(self, images, return_tensors, target_size=None): |
| if isinstance(images, Image.Image): |
| images = [images] |
| else: |
| |
| images = [to_numpy_array(image) for image in images] |
| assert isinstance(images, list) |
|
|
| if target_size is None: |
| target_size = self.size |
| |
| transforms = [ |
| convert_to_rgb, |
| to_numpy_array, |
| partial(resize, size=target_size, resample=self.resample, data_format=self.data_format), |
| partial(rescale, scale=self.rescale_factor, data_format=self.data_format), |
| partial(normalize, mean=self.image_mean, std=self.image_std, data_format=self.data_format), |
| partial(to_channel_dimension_format, channel_dim=self.data_format, input_channel_dim=self.data_format), |
| ] |
|
|
| images = reduce(lambda x, f: [*map(f, x)], transforms, images) |
| data = {"pixel_values": images} |
|
|
| return BatchFeature(data=data, tensor_type=return_tensors) |
|
|
|
|
| class InternVideo2VisionConfig: |
| model_type = "internvideo2_vision_model" |
|
|
| def __init__( |
| self, |
| num_frames=4, |
| hidden_size=1408, |
| num_hidden_layers=40, |
| num_attention_heads=16, |
| num_channels=3, |
| image_size=224, |
| patch_size=14, |
| x_vis_return_idx=-2, |
| sep_image_video_pos_embed=True, |
| use_checkpoint=False, |
| checkpoint_num=40, |
| |
| ): |
| |
| self.num_frames = num_frames |
| self.hidden_size = hidden_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.num_channels = num_channels |
| self.patch_size = patch_size |
| self.image_size = image_size |
| self.x_vis_return_idx = x_vis_return_idx |
| self.sep_image_video_pos_embed = sep_image_video_pos_embed |
| self.use_checkpoint = use_checkpoint |
| self.checkpoint_num = checkpoint_num |
|
|
|
|
| def build_vit(config, pt_type='origin'): |
|
|
| model = PretrainVisionTransformer_clean( |
| in_chans=config.num_channels, img_size=config.image_size, patch_size=config.patch_size, |
| embed_dim=config.hidden_size, depth=config.num_hidden_layers, num_heads=config.num_attention_heads, mlp_ratio=48/11, |
| |
| attn_pool_num_heads=16, qkv_bias=False, |
| drop_path_rate=0.25, |
| init_values=0.00001, |
| qk_normalization=True, |
| use_flash_attn=torch.cuda.is_available(), |
| use_fused_rmsnorm=False, |
| use_fused_mlp=False, |
| fused_mlp_heuristic=1, |
| layerscale_no_force_fp32=False, |
| num_frames=config.num_frames, |
| tubelet_size=1, |
| sep_pos_embed=False, |
| sep_image_video_pos_embed=config.sep_image_video_pos_embed, |
| use_checkpoint=config.use_checkpoint, |
| checkpoint_num=config.checkpoint_num, |
| x_vis_return_idx=config.x_vis_return_idx, |
| x_vis_only=True |
| ) |
|
|
| if config.num_frames != 4: |
| raise NotImplementedError |
|
|
|
|
| return model |
|
|
|
|
|
|
| class InternVideo2VisionTower(nn.Module): |
| def __init__(self, vision_tower, vision_tower_cfg, delay_load=False, pt_type='origin', image_size=224): |
| super().__init__() |
|
|
| self.is_loaded = False |
| self.pt_type = pt_type |
|
|
| self.config = InternVideo2VisionConfig(num_frames=vision_tower_cfg.mm_local_num_frames, x_vis_return_idx=vision_tower_cfg.mm_vision_select_layer, image_size=image_size) |
|
|
| self.vision_tower_name = vision_tower |
|
|
| self.image_processor = InternVideo2ImageProcessor(size=(image_size, image_size)) |
|
|
| if not delay_load: |
| print(f"Loading vision tower: {vision_tower}") |
| self.load_model() |
| elif getattr(vision_tower_cfg, "unfreeze_mm_vision_tower", False): |
| |
| print(f"The checkpoint seems to contain `vision_tower` weights: `unfreeze_mm_vision_tower`: True.") |
| self.load_model() |
| elif hasattr(vision_tower_cfg, "mm_tunable_parts") and "mm_vision_tower" in vision_tower_cfg.mm_tunable_parts: |
| print(f"The checkpoint seems to contain `vision_tower` weights: `mm_tunable_parts` contains `mm_vision_tower`.") |
| self.load_model() |
| else: |
| raise NotImplementedError |
| self.cfg_only = self.config |
|
|
| def load_model(self, device_map=None): |
| if self.is_loaded: |
| print("{} is already loaded, `load_model` called again, skipping.".format(self.vision_tower_name)) |
| return |
|
|
| self.vision_tower = build_vit(self.config, pt_type=self.pt_type) |
| self.vision_tower.requires_grad_(False) |
|
|
| self.is_loaded = True |
|
|
| def forward(self, images): |
| if type(images) is list: |
| raise NotImplementedError |
| else: |
| |
| |
| T = images.shape[1] |
| images = images.permute(0, 2, 1, 3, 4) |
| image_embeds = self.vision_tower(images, use_image=(T == 1)) |
|
|
| return image_embeds[:, 1:, :] |
|
|
| @property |
| def dummy_feature(self): |
| return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) |
|
|
| @property |
| def dtype(self): |
| for p in self.vision_tower.parameters(): |
| return p.dtype |
|
|
| @property |
| def device(self): |
| for p in self.vision_tower.parameters(): |
| return p.device |
|
|
| @property |
| def hidden_size(self): |
| return self.config.hidden_size |
|
|
| @property |
| def num_patches(self): |
| return (self.config.image_size // self.config.patch_size) ** 2 |
|
|
| @property |
| def num_patches_per_side(self): |
| return self.config.image_size // self.config.patch_size |
|
|
| @property |
| def image_size(self): |
| return self.config.image_size |
|
|
|
|
| def build_vision_tower(vision_tower_cfg, **kwargs): |
| vision_tower = getattr(vision_tower_cfg, "mm_vision_tower", getattr(vision_tower_cfg, "vision_tower", None)) |
| return InternVideo2VisionTower(vision_tower, vision_tower_cfg=vision_tower_cfg, **kwargs) |
|
|
|
|