|
|
from functools import partial
|
|
|
|
|
|
import numpy as np
|
|
|
import torch
|
|
|
import torch.nn as nn
|
|
|
import torch.nn.functional as F
|
|
|
import torch.utils.checkpoint as checkpoint
|
|
|
from mmcv.cnn.bricks import DropPath
|
|
|
from mmengine import to_2tuple
|
|
|
|
|
|
from mmaction.registry import MODELS
|
|
|
|
|
|
|
|
|
class Mlp(nn.Module):
|
|
|
|
|
|
def __init__(self,
|
|
|
in_features,
|
|
|
hidden_features=None,
|
|
|
out_features=None,
|
|
|
act_layer=nn.GELU,
|
|
|
drop=0.):
|
|
|
super().__init__()
|
|
|
out_features = out_features or in_features
|
|
|
hidden_features = hidden_features or in_features
|
|
|
self.fc1 = nn.Linear(in_features, hidden_features)
|
|
|
self.act = act_layer()
|
|
|
self.fc2 = nn.Linear(hidden_features, out_features)
|
|
|
self.drop = nn.Dropout(drop)
|
|
|
|
|
|
def forward(self, x):
|
|
|
x = self.fc1(x)
|
|
|
x = self.act(x)
|
|
|
|
|
|
|
|
|
x = self.fc2(x)
|
|
|
x = self.drop(x)
|
|
|
return x
|
|
|
|
|
|
|
|
|
class Attention(nn.Module):
|
|
|
|
|
|
def __init__(self,
|
|
|
dim,
|
|
|
num_heads=8,
|
|
|
qkv_bias=False,
|
|
|
qk_scale=None,
|
|
|
attn_drop=0.,
|
|
|
proj_drop=0.,
|
|
|
attn_head_dim=None):
|
|
|
super().__init__()
|
|
|
self.num_heads = num_heads
|
|
|
head_dim = dim // num_heads
|
|
|
if attn_head_dim is not None:
|
|
|
head_dim = attn_head_dim
|
|
|
all_head_dim = head_dim * self.num_heads
|
|
|
self.scale = qk_scale or head_dim**-0.5
|
|
|
|
|
|
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
|
|
|
if qkv_bias:
|
|
|
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
|
|
|
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
|
|
|
else:
|
|
|
self.q_bias = None
|
|
|
self.v_bias = None
|
|
|
|
|
|
self.attn_drop = nn.Dropout(attn_drop)
|
|
|
self.proj = nn.Linear(all_head_dim, dim)
|
|
|
self.proj_drop = nn.Dropout(proj_drop)
|
|
|
|
|
|
def forward(self, x):
|
|
|
B, N, C = x.shape
|
|
|
qkv_bias = None
|
|
|
if self.q_bias is not None:
|
|
|
qkv_bias = torch.cat(
|
|
|
(self.q_bias,
|
|
|
torch.zeros_like(self.v_bias,
|
|
|
requires_grad=False), self.v_bias))
|
|
|
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
|
|
|
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
|
|
q, k, v = qkv[0], qkv[1], qkv[2]
|
|
|
|
|
|
q = q * self.scale
|
|
|
attn = (q @ k.transpose(-2, -1))
|
|
|
|
|
|
attn = attn.softmax(dim=-1)
|
|
|
attn = self.attn_drop(attn)
|
|
|
|
|
|
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
|
|
|
x = self.proj(x)
|
|
|
x = self.proj_drop(x)
|
|
|
return x
|
|
|
|
|
|
|
|
|
class Block(nn.Module):
|
|
|
|
|
|
def __init__(self,
|
|
|
dim,
|
|
|
num_heads,
|
|
|
mlp_ratio=4.,
|
|
|
qkv_bias=False,
|
|
|
qk_scale=None,
|
|
|
drop=0.,
|
|
|
attn_drop=0.,
|
|
|
drop_path=0.,
|
|
|
init_values=None,
|
|
|
act_layer=nn.GELU,
|
|
|
norm_layer=nn.LayerNorm,
|
|
|
attn_head_dim=None):
|
|
|
super().__init__()
|
|
|
self.norm1 = norm_layer(dim)
|
|
|
self.attn = Attention(
|
|
|
dim,
|
|
|
num_heads=num_heads,
|
|
|
qkv_bias=qkv_bias,
|
|
|
qk_scale=qk_scale,
|
|
|
attn_drop=attn_drop,
|
|
|
proj_drop=drop,
|
|
|
attn_head_dim=attn_head_dim)
|
|
|
self.drop_path = DropPath(
|
|
|
drop_path) if drop_path > 0. else nn.Identity()
|
|
|
self.norm2 = norm_layer(dim)
|
|
|
mlp_hidden_dim = int(dim * mlp_ratio)
|
|
|
self.mlp = Mlp(
|
|
|
in_features=dim,
|
|
|
hidden_features=mlp_hidden_dim,
|
|
|
act_layer=act_layer,
|
|
|
drop=drop)
|
|
|
|
|
|
if init_values > 0:
|
|
|
self.gamma_1 = nn.Parameter(
|
|
|
init_values * torch.ones((dim)), requires_grad=True)
|
|
|
self.gamma_2 = nn.Parameter(
|
|
|
init_values * torch.ones((dim)), requires_grad=True)
|
|
|
else:
|
|
|
self.gamma_1, self.gamma_2 = None, None
|
|
|
|
|
|
def forward(self, x):
|
|
|
if self.gamma_1 is None:
|
|
|
x = x + self.drop_path(self.attn(self.norm1(x)))
|
|
|
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
|
|
else:
|
|
|
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x)))
|
|
|
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
|
|
|
return x
|
|
|
|
|
|
|
|
|
class PatchEmbed(nn.Module):
|
|
|
|
|
|
def __init__(self,
|
|
|
img_size=224,
|
|
|
patch_size=16,
|
|
|
in_chans=3,
|
|
|
embed_dim=768,
|
|
|
num_frames=16,
|
|
|
tubelet_size=2):
|
|
|
super().__init__()
|
|
|
img_size = to_2tuple(img_size)
|
|
|
patch_size = to_2tuple(patch_size)
|
|
|
self.tubelet_size = int(tubelet_size)
|
|
|
num_patches = (img_size[1] //
|
|
|
patch_size[1]) * (img_size[0] // patch_size[0]) * (
|
|
|
num_frames // self.tubelet_size)
|
|
|
self.img_size = img_size
|
|
|
self.patch_size = patch_size
|
|
|
self.num_patches = num_patches
|
|
|
self.proj = nn.Conv3d(
|
|
|
in_channels=in_chans,
|
|
|
out_channels=embed_dim,
|
|
|
kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]),
|
|
|
stride=(self.tubelet_size, patch_size[0], patch_size[1]))
|
|
|
|
|
|
def forward(self, x):
|
|
|
B, C, T, H, W = x.shape
|
|
|
assert H == self.img_size[0] and W == self.img_size[1], \
|
|
|
f"Input image size ({H}*{W}) doesn't match model " \
|
|
|
f'({self.img_size[0]}*{self.img_size[1]}).'
|
|
|
x = self.proj(x).flatten(2).transpose(1, 2)
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
def get_sinusoid_encoding_table(n_position,
|
|
|
d_hid,
|
|
|
cur_frame=-1,
|
|
|
pre_n_position=1568):
|
|
|
"""Sinusoid position encoding table."""
|
|
|
|
|
|
def get_position_angle_vec(position):
|
|
|
return [
|
|
|
position / np.power(10000, 2 * (hid_j // 2) / d_hid)
|
|
|
for hid_j in range(d_hid)
|
|
|
]
|
|
|
|
|
|
sinusoid_table = np.array(
|
|
|
[get_position_angle_vec(pos_i) for pos_i in range(pre_n_position)])
|
|
|
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2])
|
|
|
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])
|
|
|
sinusoid_table = torch.tensor(
|
|
|
sinusoid_table, dtype=torch.float, requires_grad=False).unsqueeze(0)
|
|
|
print(f'n_position: {n_position}')
|
|
|
print(f'pre_n_position: {pre_n_position}')
|
|
|
if n_position // cur_frame * 8 != pre_n_position and cur_frame != -1:
|
|
|
T = 8
|
|
|
P = 14
|
|
|
C = d_hid
|
|
|
new_P = int((n_position // cur_frame)**0.5)
|
|
|
print(
|
|
|
f'Pretraining uses 14x14, but current version is {new_P}x{new_P}')
|
|
|
print('Interpolate the position embedding')
|
|
|
sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C)
|
|
|
sinusoid_table = sinusoid_table.reshape(-1, P, P,
|
|
|
C).permute(0, 3, 1, 2)
|
|
|
sinusoid_table = torch.nn.functional.interpolate(
|
|
|
sinusoid_table,
|
|
|
size=(new_P, new_P),
|
|
|
mode='bicubic',
|
|
|
align_corners=False)
|
|
|
|
|
|
sinusoid_table = sinusoid_table.permute(0, 2, 3, 1).reshape(
|
|
|
-1, T, new_P, new_P, C)
|
|
|
sinusoid_table = sinusoid_table.flatten(1, 3)
|
|
|
if cur_frame != -1 and cur_frame != 8:
|
|
|
print(f'Pretraining uses 8 frames, but current frame is {cur_frame}')
|
|
|
print('Interpolate the position embedding')
|
|
|
T = 8
|
|
|
new_T = cur_frame
|
|
|
|
|
|
P = int((n_position // cur_frame)**0.5)
|
|
|
C = d_hid
|
|
|
sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C)
|
|
|
sinusoid_table = sinusoid_table.permute(0, 2, 3, 4,
|
|
|
1).reshape(-1, C,
|
|
|
T)
|
|
|
sinusoid_table = torch.nn.functional.interpolate(
|
|
|
sinusoid_table, size=new_T, mode='linear')
|
|
|
sinusoid_table = sinusoid_table.reshape(1, P, P, C, new_T).permute(
|
|
|
0, 4, 1, 2, 3)
|
|
|
sinusoid_table = sinusoid_table.flatten(1, 3)
|
|
|
if n_position == pre_n_position:
|
|
|
return sinusoid_table
|
|
|
else:
|
|
|
print('Use learnable position embedding')
|
|
|
return nn.Parameter(sinusoid_table, requires_grad=True)
|
|
|
|
|
|
|
|
|
@MODELS.register_module()
|
|
|
class UMTViT(nn.Module):
|
|
|
|
|
|
def __init__(self,
|
|
|
img_size=224,
|
|
|
patch_size=16,
|
|
|
in_chans=3,
|
|
|
embed_dim=768,
|
|
|
depth=12,
|
|
|
num_heads=12,
|
|
|
mlp_ratio=4.,
|
|
|
qkv_bias=False,
|
|
|
qk_scale=None,
|
|
|
drop_rate=0.,
|
|
|
attn_drop_rate=0.,
|
|
|
drop_path_rate=0.,
|
|
|
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
|
|
init_values=0.,
|
|
|
use_learnable_pos_emb=False,
|
|
|
all_frames=16,
|
|
|
tubelet_size=1,
|
|
|
use_checkpoint=False,
|
|
|
checkpoint_num=0,
|
|
|
use_mean_pooling=True):
|
|
|
super().__init__()
|
|
|
self.num_features = self.embed_dim = embed_dim
|
|
|
self.tubelet_size = tubelet_size
|
|
|
self.patch_embed = PatchEmbed(
|
|
|
img_size=img_size,
|
|
|
patch_size=patch_size,
|
|
|
in_chans=in_chans,
|
|
|
embed_dim=embed_dim,
|
|
|
num_frames=all_frames,
|
|
|
tubelet_size=self.tubelet_size)
|
|
|
num_patches = self.patch_embed.num_patches
|
|
|
self.use_checkpoint = use_checkpoint
|
|
|
self.checkpoint_num = checkpoint_num
|
|
|
print(f'Use checkpoint: {use_checkpoint}')
|
|
|
print(f'Checkpoint number: {checkpoint_num}')
|
|
|
|
|
|
if use_learnable_pos_emb:
|
|
|
self.pos_embed = nn.Parameter(
|
|
|
torch.zeros(1, num_patches, embed_dim))
|
|
|
else:
|
|
|
|
|
|
if patch_size == 14:
|
|
|
pre_n_position = 2048
|
|
|
else:
|
|
|
pre_n_position = 1568
|
|
|
self.pos_embed = get_sinusoid_encoding_table(
|
|
|
num_patches,
|
|
|
embed_dim,
|
|
|
all_frames // tubelet_size,
|
|
|
pre_n_position=pre_n_position)
|
|
|
|
|
|
self.pos_drop = nn.Dropout(p=drop_rate)
|
|
|
|
|
|
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
|
|
|
self.blocks = nn.ModuleList([
|
|
|
Block(
|
|
|
dim=embed_dim,
|
|
|
num_heads=num_heads,
|
|
|
mlp_ratio=mlp_ratio,
|
|
|
qkv_bias=qkv_bias,
|
|
|
qk_scale=qk_scale,
|
|
|
drop=drop_rate,
|
|
|
attn_drop=attn_drop_rate,
|
|
|
drop_path=dpr[i],
|
|
|
norm_layer=norm_layer,
|
|
|
init_values=init_values) for i in range(depth)
|
|
|
])
|
|
|
self.norm = nn.Identity() if use_mean_pooling else norm_layer(
|
|
|
embed_dim)
|
|
|
self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
|
|
|
|
|
|
def forward_features(self, x):
|
|
|
x = self.patch_embed(x)
|
|
|
B, _, _ = x.size()
|
|
|
|
|
|
if self.pos_embed is not None:
|
|
|
x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to(
|
|
|
x.device).clone().detach()
|
|
|
x = self.pos_drop(x)
|
|
|
|
|
|
for idx, blk in enumerate(self.blocks):
|
|
|
if self.use_checkpoint and idx < self.checkpoint_num:
|
|
|
x = checkpoint.checkpoint(blk, x)
|
|
|
else:
|
|
|
x = blk(x)
|
|
|
|
|
|
x = self.norm(x)
|
|
|
if self.fc_norm is not None:
|
|
|
return self.fc_norm(x.mean(1))
|
|
|
else:
|
|
|
return x[:, 0]
|
|
|
|
|
|
def forward(self, x):
|
|
|
x = self.forward_features(x)
|
|
|
return x
|
|
|
|