| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| import logging |
|
|
| from torch import Tensor |
| from torch import nn |
|
|
|
|
| logger = logging.getLogger("dinov2") |
|
|
|
|
| |
| |
|
|
| |
| |
| logger.warning("xFormers not available") |
| XFORMERS_AVAILABLE = False |
|
|
|
|
| class Attention(nn.Module): |
| def __init__( |
| self, |
| dim: int, |
| num_heads: int = 8, |
| qkv_bias: bool = False, |
| proj_bias: bool = True, |
| attn_drop: float = 0.0, |
| proj_drop: float = 0.0, |
| ) -> None: |
| super().__init__() |
| self.num_heads = num_heads |
| head_dim = dim // num_heads |
| self.scale = head_dim**-0.5 |
|
|
| self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) |
| self.attn_drop = nn.Dropout(attn_drop) |
| self.proj = nn.Linear(dim, dim, bias=proj_bias) |
| self.proj_drop = nn.Dropout(proj_drop) |
|
|
| def forward(self, x: Tensor) -> Tensor: |
| B, N, C = x.shape |
| qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) |
|
|
| q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] |
| attn = q @ k.transpose(-2, -1) |
|
|
| attn = attn.softmax(dim=-1) |
| attn = self.attn_drop(attn) |
|
|
| x = (attn @ v).transpose(1, 2).reshape(B, N, C) |
| x = self.proj(x) |
| x = self.proj_drop(x) |
| return x |
|
|
|
|
| class MemEffAttention(Attention): |
| def forward(self, x: Tensor, attn_bias=None) -> Tensor: |
| if not XFORMERS_AVAILABLE: |
| assert attn_bias is None, "xFormers is required for nested tensors usage" |
| return super().forward(x) |
|
|
| B, N, C = x.shape |
| qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads) |
|
|
| q, k, v = unbind(qkv, 2) |
|
|
| x = memory_efficient_attention(q, k, v, attn_bias=attn_bias) |
| x = x.reshape([B, N, C]) |
|
|
| x = self.proj(x) |
| x = self.proj_drop(x) |
| return x |
|
|
| |