HTill's picture
Update model_core.py
641784b verified
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import collections
# --- Helpers (Replacements for timm functions) ---
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
return x
return tuple(x for _ in range(2))
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Replacement for timm.models.layers.trunc_normal_"""
return torch.nn.init.trunc_normal_(tensor, mean, std, a, b)
# --- Custom Modules (No TIMM) ---
def drop_path(
x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True
):
"""Drop paths (Stochastic Depth) per sample."""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample."""
def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
def extra_repr(self):
return f"drop_prob={round(self.drop_prob,3):0.3f}"
class Mlp(nn.Module):
"""MLP as used in Vision Transformer, MLP-Mixer and related networks"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer() if isinstance(act_layer, type) else act_layer
self.drop1 = nn.Dropout(drop)
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class SinCos2DEmbed(torch.nn.Module):
def __init__(
self,
):
super().__init__()
def forward(self, x):
# x has the shape [batch_size, embed_dim, grid_length, grid_height]
batch_size, embed_dim, grid_length, grid_height = x.shape
# Create grid positions
grid_length_a = torch.arange(grid_length, dtype=torch.float32, device=x.device)
grid_height_a = torch.arange(grid_height, dtype=torch.float32, device=x.device)
grid = torch.meshgrid(grid_height_a, grid_length_a, indexing="xy")
sub_embed_dim = embed_dim//4
omega = torch.arange(sub_embed_dim, dtype=torch.float32, device=x.device)
omega /= sub_embed_dim
omega = 1.0 / 10000**omega
# embed_length
out_length = torch.einsum("mn,d->dmn", grid[0],omega)
embed_length_sin = torch.sin(out_length)
embed_length_cos = torch.cos(out_length)
embed_length = torch.concatenate([embed_length_sin,embed_length_cos],dim=0)
# embed_heigth
out_heigth = torch.einsum("mn,d->dmn", grid[1], omega)
embed_heigth_sin = torch.sin(out_heigth)
embed_heigth_cos = torch.cos(out_heigth)
embed_heigth = torch.concatenate([embed_heigth_sin,embed_heigth_cos],dim=0)
# concat length and heigth
embed = torch.concatenate([embed_length, embed_heigth],dim=0).unsqueeze(dim=0)
x = x + embed
return x
class PatchEmbed(nn.Module):
"""Flexible Image to Patch Embedding"""
def __init__(
self,
patch_size=16,
in_chans=3,
embed_dim=768,
stride=16,
use_sincos_pos=False,
):
super().__init__()
patch_size = to_2tuple(patch_size)
stride = to_2tuple(stride)
self.patch_size = patch_size
self.use_sincos_pos = use_sincos_pos
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=stride
) # with overlapped patches
if self.use_sincos_pos:
self.pos_embed = SinCos2DEmbed()
else:
self.pos_embed = None
def forward(self, x):
x = self.proj(x)
# Apply dynamic positional embedding before flattening
if self.pos_embed is not None:
x = self.pos_embed(x)
x = x.flatten(2).transpose(1, 2)
return x
class AltBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
mlp_drop=0.0,
post_mlp_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
layer_norm_first=True,
ffn_targets=False,
cosine_attention=False,
):
super().__init__()
self.layer_norm_first = layer_norm_first
self.ffn_targets = ffn_targets
self.norm1 = norm_layer(dim)
self.attn = AltAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
cosine_attention=cosine_attention,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=mlp_drop,
)
self.post_mlp_dropout = nn.Dropout(post_mlp_drop, inplace=False)
def forward(self, x, padding_mask=None, alibi_bias=None):
if self.layer_norm_first:
x = x + self.drop_path(self.attn(self.norm1(x), padding_mask, alibi_bias))
r = x = self.mlp(self.norm2(x))
t = x
x = r + self.drop_path(self.post_mlp_dropout(x))
if not self.ffn_targets:
t = x
else:
x = x + self.drop_path(self.attn(x, padding_mask, alibi_bias))
r = x = self.norm1(x)
x = self.mlp(x)
t = x
x = self.norm2(r + self.drop_path(self.post_mlp_dropout(x)))
if not self.ffn_targets:
t = x
return x, t
class AltAttention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
cosine_attention=False,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.cosine_attention = cosine_attention
if cosine_attention:
self.logit_scale = nn.Parameter(
torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True
)
def forward(self, x, padding_mask=None, alibi_bias=None):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4) # qkv x B x H x L x D
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
dtype = q.dtype
if self.cosine_attention:
# cosine attention
attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)
logit_scale = torch.clamp(
self.logit_scale, max=torch.log(torch.tensor(1.0 / 0.01))
).exp()
attn = attn * logit_scale
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if alibi_bias is not None:
attn = attn.type_as(alibi_bias)
attn[:, : alibi_bias.size(1)] += alibi_bias
if padding_mask is not None and padding_mask.any():
attn = attn.masked_fill(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn = attn.softmax(dim=-1, dtype=torch.float32).to(dtype=dtype)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2) #
x = x.reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x