|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from typing import Callable, Optional, Union |
|
|
|
|
|
import torch |
|
|
import torch.nn.functional as F |
|
|
from torch import Tensor, nn |
|
|
|
|
|
|
|
|
class Attention(nn.Module): |
|
|
def __init__( |
|
|
self, |
|
|
dim: int, |
|
|
num_heads: int = 8, |
|
|
qkv_bias: bool = True, |
|
|
proj_bias: bool = True, |
|
|
attn_drop: float = 0.0, |
|
|
proj_drop: float = 0.0, |
|
|
norm_layer: nn.Module = nn.LayerNorm, |
|
|
qk_norm: bool = False, |
|
|
rope=None, |
|
|
) -> None: |
|
|
super().__init__() |
|
|
assert dim % num_heads == 0, "dim should be divisible by num_heads" |
|
|
self.num_heads = num_heads |
|
|
self.head_dim = dim // num_heads |
|
|
self.scale = self.head_dim**-0.5 |
|
|
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) |
|
|
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() |
|
|
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() |
|
|
self.attn_drop = nn.Dropout(attn_drop) |
|
|
self.proj = nn.Linear(dim, dim, bias=proj_bias) |
|
|
self.proj_drop = nn.Dropout(proj_drop) |
|
|
self.rope = rope |
|
|
|
|
|
def forward(self, x: Tensor, pos=None, attn_mask=None) -> Tensor: |
|
|
|
|
|
B, N, C = x.shape |
|
|
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) |
|
|
q, k, v = qkv.unbind(0) |
|
|
q, k = self.q_norm(q), self.k_norm(k) |
|
|
q = self.rope(q, pos) if self.rope is not None else q |
|
|
k = self.rope(k, pos) if self.rope is not None else k |
|
|
x = F.scaled_dot_product_attention( |
|
|
q, |
|
|
k, |
|
|
v, |
|
|
dropout_p=self.attn_drop.p if self.training else 0.0, |
|
|
attn_mask=attn_mask, |
|
|
) |
|
|
x = x.transpose(1, 2).reshape(B, N, C) |
|
|
x = self.proj(x) |
|
|
x = self.proj_drop(x) |
|
|
return x |
|
|
|
|
|
|
|
|
class LayerScale(nn.Module): |
|
|
def __init__( |
|
|
self, |
|
|
dim: int, |
|
|
init_values: Union[float, Tensor] = 1e-5, |
|
|
inplace: bool = False, |
|
|
) -> None: |
|
|
super().__init__() |
|
|
self.inplace = inplace |
|
|
self.gamma = nn.Parameter(init_values * torch.ones(dim)) |
|
|
|
|
|
def forward(self, x: Tensor) -> Tensor: |
|
|
return x.mul_(self.gamma) if self.inplace else x * self.gamma |
|
|
|
|
|
|
|
|
class Mlp(nn.Module): |
|
|
def __init__( |
|
|
self, |
|
|
in_features: int, |
|
|
hidden_features: Optional[int] = None, |
|
|
out_features: Optional[int] = None, |
|
|
act_layer: Callable[..., nn.Module] = nn.GELU, |
|
|
drop: float = 0.0, |
|
|
bias: bool = True, |
|
|
) -> None: |
|
|
super().__init__() |
|
|
out_features = out_features or in_features |
|
|
hidden_features = hidden_features or in_features |
|
|
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias) |
|
|
self.act = act_layer() |
|
|
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias) |
|
|
self.drop = nn.Dropout(drop) |
|
|
|
|
|
def forward(self, x: Tensor) -> Tensor: |
|
|
x = self.fc1(x) |
|
|
x = self.act(x) |
|
|
x = self.drop(x) |
|
|
x = self.fc2(x) |
|
|
x = self.drop(x) |
|
|
return x |
|
|
|