|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import math |
|
|
from functools import partial |
|
|
import numpy as np |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
|
|
|
from model.tensors import ( |
|
|
trunc_normal_, |
|
|
repeat_interleave_batch |
|
|
) |
|
|
from model.utils import apply_masks |
|
|
|
|
|
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): |
|
|
""" |
|
|
grid_size: int of the grid height and width |
|
|
return: |
|
|
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
|
|
""" |
|
|
grid_h = np.arange(grid_size, dtype=float) |
|
|
grid_w = np.arange(grid_size, dtype=float) |
|
|
grid = np.meshgrid(grid_w, grid_h) |
|
|
grid = np.stack(grid, axis=0) |
|
|
|
|
|
grid = grid.reshape([2, 1, grid_size, grid_size]) |
|
|
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) |
|
|
if cls_token: |
|
|
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) |
|
|
return pos_embed |
|
|
|
|
|
|
|
|
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): |
|
|
assert embed_dim % 2 == 0 |
|
|
|
|
|
|
|
|
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) |
|
|
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) |
|
|
|
|
|
emb = np.concatenate([emb_h, emb_w], axis=1) |
|
|
return emb |
|
|
|
|
|
|
|
|
def get_1d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): |
|
|
""" |
|
|
grid_size: int of the grid length |
|
|
return: |
|
|
pos_embed: [grid_size, embed_dim] or [1+grid_size, embed_dim] (w/ or w/o cls_token) |
|
|
""" |
|
|
grid = np.arange(grid_size, dtype=float) |
|
|
pos_embed = get_1d_sincos_pos_embed_from_grid(embed_dim, grid) |
|
|
if cls_token: |
|
|
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) |
|
|
return pos_embed |
|
|
|
|
|
|
|
|
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): |
|
|
""" |
|
|
embed_dim: output dimension for each position |
|
|
pos: a list of positions to be encoded: size (M,) |
|
|
out: (M, D) |
|
|
""" |
|
|
assert embed_dim % 2 == 0 |
|
|
omega = np.arange(embed_dim // 2, dtype=float) |
|
|
omega /= embed_dim / 2. |
|
|
omega = 1. / 10000**omega |
|
|
|
|
|
pos = pos.reshape(-1) |
|
|
out = np.einsum('m,d->md', pos, omega) |
|
|
|
|
|
emb_sin = np.sin(out) |
|
|
emb_cos = np.cos(out) |
|
|
|
|
|
emb = np.concatenate([emb_sin, emb_cos], axis=1) |
|
|
return emb |
|
|
|
|
|
|
|
|
def drop_path(x, drop_prob: float = 0., training: bool = False): |
|
|
if drop_prob == 0. or not training: |
|
|
return x |
|
|
keep_prob = 1 - drop_prob |
|
|
shape = (x.shape[0],) + (1,) * (x.ndim - 1) |
|
|
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) |
|
|
random_tensor.floor_() |
|
|
output = x.div(keep_prob) * random_tensor |
|
|
return output |
|
|
|
|
|
|
|
|
class DropPath(nn.Module): |
|
|
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). |
|
|
""" |
|
|
def __init__(self, drop_prob=None): |
|
|
super(DropPath, self).__init__() |
|
|
self.drop_prob = drop_prob |
|
|
|
|
|
def forward(self, x): |
|
|
return drop_path(x, self.drop_prob, self.training) |
|
|
|
|
|
|
|
|
class MLP(nn.Module): |
|
|
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): |
|
|
super().__init__() |
|
|
out_features = out_features or in_features |
|
|
hidden_features = hidden_features or in_features |
|
|
self.fc1 = nn.Linear(in_features, hidden_features) |
|
|
self.act = act_layer() |
|
|
self.fc2 = nn.Linear(hidden_features, out_features) |
|
|
self.drop = nn.Dropout(drop) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.fc1(x) |
|
|
x = self.act(x) |
|
|
x = self.drop(x) |
|
|
x = self.fc2(x) |
|
|
x = self.drop(x) |
|
|
return x |
|
|
|
|
|
|
|
|
class Attention(nn.Module): |
|
|
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): |
|
|
super().__init__() |
|
|
self.num_heads = num_heads |
|
|
head_dim = dim // num_heads |
|
|
self.scale = qk_scale or head_dim ** -0.5 |
|
|
|
|
|
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) |
|
|
self.attn_drop = nn.Dropout(attn_drop) |
|
|
self.proj = nn.Linear(dim, dim) |
|
|
self.proj_drop = nn.Dropout(proj_drop) |
|
|
|
|
|
def forward(self, x): |
|
|
B, N, C = x.shape |
|
|
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) |
|
|
q, k, v = qkv[0], qkv[1], qkv[2] |
|
|
|
|
|
attn = (q @ k.transpose(-2, -1)) * self.scale |
|
|
attn = attn.softmax(dim=-1) |
|
|
attn = self.attn_drop(attn) |
|
|
|
|
|
x = (attn @ v).transpose(1, 2).reshape(B, N, C) |
|
|
x = self.proj(x) |
|
|
x = self.proj_drop(x) |
|
|
return x, attn |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Block(nn.Module): |
|
|
def __init__( |
|
|
self, |
|
|
dim, |
|
|
num_heads, |
|
|
mlp_ratio = 4., |
|
|
qkv_bias = False, |
|
|
qk_scale = None, |
|
|
drop = 0., |
|
|
attn_drop = 0., |
|
|
drop_path = 0., |
|
|
act_layer = nn.GELU, |
|
|
norm_layer= nn.LayerNorm, |
|
|
): |
|
|
super().__init__() |
|
|
self.norm1 = norm_layer(dim) |
|
|
|
|
|
self.attn = Attention( |
|
|
dim, num_heads=num_heads, |
|
|
qkv_bias=qkv_bias, qk_scale=qk_scale, |
|
|
attn_drop=attn_drop, proj_drop=drop) |
|
|
self.attn_returns_weights = True |
|
|
|
|
|
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() |
|
|
self.norm2 = norm_layer(dim) |
|
|
mlp_hidden_dim = int(dim * mlp_ratio) |
|
|
self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, |
|
|
act_layer=act_layer, drop=drop) |
|
|
|
|
|
def forward(self, x, return_attention=False): |
|
|
if self.attn_returns_weights: |
|
|
y, attn = self.attn(self.norm1(x)) |
|
|
if return_attention: |
|
|
return attn |
|
|
else: |
|
|
y = self.attn(self.norm1(x)) |
|
|
attn = None |
|
|
x = x + self.drop_path(y) |
|
|
x = x + self.drop_path(self.mlp(self.norm2(x))) |
|
|
return x if not return_attention else attn |
|
|
|
|
|
|
|
|
class PatchEmbed(nn.Module): |
|
|
""" Image to Patch Embedding |
|
|
""" |
|
|
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): |
|
|
super().__init__() |
|
|
num_patches = (img_size // patch_size) * (img_size // patch_size) |
|
|
self.img_size = img_size |
|
|
self.patch_size = patch_size |
|
|
self.num_patches = num_patches |
|
|
|
|
|
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) |
|
|
|
|
|
def forward(self, x): |
|
|
B, C, H, W = x.shape |
|
|
x = self.proj(x).flatten(2).transpose(1, 2) |
|
|
return x |
|
|
|
|
|
|
|
|
class ConvEmbed(nn.Module): |
|
|
""" |
|
|
3x3 Convolution stems for ViT following ViTC models |
|
|
""" |
|
|
|
|
|
def __init__(self, channels, strides, img_size=224, in_chans=3, batch_norm=True): |
|
|
super().__init__() |
|
|
|
|
|
stem = [] |
|
|
channels = [in_chans] + channels |
|
|
for i in range(len(channels) - 2): |
|
|
stem += [nn.Conv2d(channels[i], channels[i+1], kernel_size=3, |
|
|
stride=strides[i], padding=1, bias=(not batch_norm))] |
|
|
if batch_norm: |
|
|
stem += [nn.BatchNorm2d(channels[i+1])] |
|
|
stem += [nn.ReLU(inplace=True)] |
|
|
stem += [nn.Conv2d(channels[-2], channels[-1], kernel_size=1, stride=strides[-1])] |
|
|
self.stem = nn.Sequential(*stem) |
|
|
|
|
|
|
|
|
stride_prod = int(np.prod(strides)) |
|
|
self.num_patches = (img_size[0] // stride_prod)**2 |
|
|
|
|
|
def forward(self, x): |
|
|
p = self.stem(x) |
|
|
return p.flatten(2).transpose(1, 2) |
|
|
|
|
|
|
|
|
class VisionTransformerPredictor(nn.Module): |
|
|
""" Vision Transformer """ |
|
|
def __init__( |
|
|
self, |
|
|
num_patches, |
|
|
embed_dim=768, |
|
|
predictor_embed_dim=384, |
|
|
depth=6, |
|
|
num_heads=12, |
|
|
mlp_ratio=4.0, |
|
|
qkv_bias=True, |
|
|
qk_scale=None, |
|
|
drop_rate=0.0, |
|
|
attn_drop_rate=0.0, |
|
|
drop_path_rate=0.0, |
|
|
norm_layer=nn.LayerNorm, |
|
|
init_std=0.02, |
|
|
**kwargs |
|
|
): |
|
|
super().__init__() |
|
|
self.predictor_embed = nn.Linear(embed_dim, predictor_embed_dim, bias=True) |
|
|
self.mask_token = nn.Parameter(torch.zeros(1, 1, predictor_embed_dim)) |
|
|
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] |
|
|
|
|
|
self.predictor_pos_embed = nn.Parameter(torch.zeros(1, num_patches, predictor_embed_dim), |
|
|
requires_grad=False) |
|
|
predictor_pos_embed = get_2d_sincos_pos_embed(self.predictor_pos_embed.shape[-1], |
|
|
int(num_patches**.5), |
|
|
cls_token=False) |
|
|
self.predictor_pos_embed.data.copy_(torch.from_numpy(predictor_pos_embed).float().unsqueeze(0)) |
|
|
|
|
|
self.predictor_blocks = nn.ModuleList([ |
|
|
Block( |
|
|
dim=predictor_embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, |
|
|
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) |
|
|
for i in range(depth)]) |
|
|
self.predictor_norm = norm_layer(predictor_embed_dim) |
|
|
self.predictor_proj = nn.Linear(predictor_embed_dim, embed_dim, bias=True) |
|
|
|
|
|
self.init_std = init_std |
|
|
trunc_normal_(self.mask_token, std=self.init_std) |
|
|
self.apply(self._init_weights) |
|
|
self.fix_init_weight() |
|
|
|
|
|
def fix_init_weight(self): |
|
|
def rescale(param, layer_id): |
|
|
param.div_(math.sqrt(2.0 * layer_id)) |
|
|
|
|
|
for layer_id, layer in enumerate(self.predictor_blocks): |
|
|
rescale(layer.attn.proj.weight.data, layer_id + 1) |
|
|
rescale(layer.mlp.fc2.weight.data, layer_id + 1) |
|
|
|
|
|
def _init_weights(self, m): |
|
|
if isinstance(m, nn.Linear): |
|
|
trunc_normal_(m.weight, std=self.init_std) |
|
|
if isinstance(m, nn.Linear) and m.bias is not None: |
|
|
nn.init.constant_(m.bias, 0) |
|
|
elif isinstance(m, nn.LayerNorm): |
|
|
nn.init.constant_(m.bias, 0) |
|
|
nn.init.constant_(m.weight, 1.0) |
|
|
elif isinstance(m, nn.Conv2d): |
|
|
trunc_normal_(m.weight, std=self.init_std) |
|
|
if m.bias is not None: |
|
|
nn.init.constant_(m.bias, 0) |
|
|
|
|
|
def forward(self, x, masks_x, masks): |
|
|
assert (masks is not None) and (masks_x is not None), 'Cannot run predictor without mask indices' |
|
|
|
|
|
if not isinstance(masks_x, list): |
|
|
masks_x = [masks_x] |
|
|
|
|
|
if not isinstance(masks, list): |
|
|
masks = [masks] |
|
|
|
|
|
|
|
|
B = len(x) // len(masks_x) |
|
|
|
|
|
|
|
|
x = self.predictor_embed(x) |
|
|
|
|
|
|
|
|
x_pos_embed = self.predictor_pos_embed.repeat(B, 1, 1) |
|
|
x += apply_masks(x_pos_embed, masks_x[0].unsqueeze(1)) |
|
|
|
|
|
_, N_ctxt, D = x.shape |
|
|
|
|
|
|
|
|
pos_embs = self.predictor_pos_embed.repeat(B, 1, 1) |
|
|
pos_embs = apply_masks(pos_embs, masks[0]) |
|
|
|
|
|
|
|
|
pred_tokens = self.mask_token.repeat(pos_embs.size(0), pos_embs.size(1), 1) |
|
|
|
|
|
pred_tokens += pos_embs |
|
|
x = x.repeat(masks[0].shape[1], 1, 1) |
|
|
x = torch.cat([x, pred_tokens], dim=1) |
|
|
|
|
|
|
|
|
for blk in self.predictor_blocks: |
|
|
x = blk(x) |
|
|
x = self.predictor_norm(x) |
|
|
|
|
|
|
|
|
x = x[:, N_ctxt:] |
|
|
x = self.predictor_proj(x) |
|
|
|
|
|
return x |
|
|
|
|
|
def gather_tokens_multiK(x_full: torch.Tensor, |
|
|
idx: torch.Tensor) -> torch.Tensor: |
|
|
""" |
|
|
x_full : [B, N_tot, D] |
|
|
idx : [B, V, K, N_q] (int64 indices) |
|
|
Returns |
|
|
------- |
|
|
out : [B, V, K, N_q, D] |
|
|
""" |
|
|
B, N_tot, D = x_full.shape |
|
|
B2, V, K, N_q = idx.shape |
|
|
assert B == B2, "batch mismatch" |
|
|
|
|
|
|
|
|
idx_exp = idx.unsqueeze(-1).expand(-1, -1, -1, -1, D) |
|
|
|
|
|
|
|
|
x_exp = x_full[:, None, None] |
|
|
x_exp = x_exp.expand(B, V, K, N_tot, D) |
|
|
|
|
|
|
|
|
gathered = torch.gather(x_exp, 3, idx_exp) |
|
|
return gathered |
|
|
|
|
|
class VisionTransformerPredictorMV(nn.Module): |
|
|
""" |
|
|
Multi‑view predictor for JEPA. |
|
|
|
|
|
* Context sequence = visible tokens from **all views and all K_enc sets** |
|
|
* Target sequence = one mask token per **K_pred set** per view |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
num_patches, |
|
|
n_views, |
|
|
embed_dim = 768, |
|
|
predictor_embed_dim = 384, |
|
|
depth = 3, |
|
|
num_heads = 12, |
|
|
mlp_ratio = 4.0, |
|
|
qkv_bias = True, |
|
|
qk_scale = None, |
|
|
drop_rate = 0.0, |
|
|
attn_drop_rate = 0.0, |
|
|
drop_path_rate = 0.0, |
|
|
norm_layer = nn.LayerNorm, |
|
|
init_std = 0.02, |
|
|
**kwargs, |
|
|
): |
|
|
super().__init__() |
|
|
P = predictor_embed_dim |
|
|
|
|
|
|
|
|
self.proj_in = nn.Linear(embed_dim, P, bias=True) |
|
|
self.mask_tok = nn.Parameter(torch.zeros(1, 1, P)) |
|
|
|
|
|
|
|
|
dpr = [x.item() for x in torch.linspace(0.0, drop_path_rate, depth)] |
|
|
self.blocks = nn.ModuleList([ |
|
|
Block( |
|
|
dim = P, |
|
|
num_heads = num_heads, |
|
|
mlp_ratio = mlp_ratio, |
|
|
qkv_bias = qkv_bias, |
|
|
qk_scale = qk_scale, |
|
|
drop = drop_rate, |
|
|
attn_drop = attn_drop_rate, |
|
|
drop_path = dpr[i], |
|
|
norm_layer = norm_layer |
|
|
) |
|
|
for i in range(depth) |
|
|
]) |
|
|
self.norm = norm_layer(P) |
|
|
self.proj_out = nn.Linear(P, embed_dim, bias=True) |
|
|
|
|
|
trunc_normal_(self.mask_tok, std=init_std) |
|
|
self.apply(self._init_weights) |
|
|
|
|
|
|
|
|
@staticmethod |
|
|
def _init_weights(m): |
|
|
if isinstance(m, nn.Linear): |
|
|
trunc_normal_(m.weight, std=0.02) |
|
|
if m.bias is not None: |
|
|
nn.init.constant_(m.bias, 0) |
|
|
elif isinstance(m, nn.LayerNorm): |
|
|
nn.init.constant_(m.weight, 1.0) |
|
|
nn.init.constant_(m.bias, 0.0) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
z_ctx: torch.Tensor, |
|
|
masks_pred: torch.Tensor, |
|
|
): |
|
|
""" |
|
|
Returns |
|
|
------- |
|
|
pred : [B, V*K_pred*N_q, embed_dim] (flattened) – or reshape as needed |
|
|
""" |
|
|
|
|
|
z_ctx = self.proj_in(z_ctx) |
|
|
|
|
|
ctx_tokens = (z_ctx.unsqueeze(2)) |
|
|
B, V, K_enc, N_vis, P = ctx_tokens.shape |
|
|
ctx_tokens = ctx_tokens.view(B, V * K_enc * N_vis, P) |
|
|
N_ctx = ctx_tokens.size(1) |
|
|
|
|
|
B, V, N_q, P = masks_pred.shape |
|
|
D = self.mask_tok.shape[-1] |
|
|
M = V * N_q * P |
|
|
tgt_tok = self.mask_tok.expand(B, M, D) |
|
|
|
|
|
|
|
|
seq = torch.cat([ctx_tokens, tgt_tok], dim=1) |
|
|
for blk in self.blocks: |
|
|
seq = blk(seq) |
|
|
seq = self.norm(seq) |
|
|
|
|
|
pred = seq[:, N_ctx:] |
|
|
pred = self.proj_out(pred) |
|
|
return pred |
|
|
|
|
|
|
|
|
def vit_predictor(**kwargs): |
|
|
model = VisionTransformerPredictorMV( |
|
|
mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), |
|
|
**kwargs) |
|
|
return model |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
B, V = 2, 4 |
|
|
N_tot = 196 |
|
|
N_vis = 31 |
|
|
K_enc = 1 |
|
|
K_pred = 4 |
|
|
N_q = 36 |
|
|
E = 768 |
|
|
|
|
|
torch.manual_seed(0) |
|
|
device = "cuda" |
|
|
dtype = torch.float16 |
|
|
|
|
|
z_ctx = torch.randn(B, V, N_vis, E).to(device, dtype) |
|
|
masks_enc = torch.randint(0, N_tot, (B, V, K_enc, N_vis)).to(device) |
|
|
masks_pred = torch.randint(0, N_tot, (B, V, K_pred, N_q)).to(device) |
|
|
|
|
|
pred_mv = VisionTransformerPredictorMV( |
|
|
num_patches = N_tot, |
|
|
n_views = V, |
|
|
embed_dim = E, |
|
|
predictor_embed_dim = 384, |
|
|
depth = 4, |
|
|
num_heads = 8 |
|
|
).to(device).to(dtype) |
|
|
|
|
|
out = pred_mv(z_ctx, masks_enc, masks_pred) |
|
|
print(out.shape) |
|
|
|