| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from torch.amp.autocast_mode import autocast |
|
|
| from inspect import isfunction |
| from einops import rearrange, repeat |
| from typing import Optional, Any |
| from .util import checkpoint, zero_module |
|
|
| try: |
| import xformers |
| import xformers.ops |
| XFORMERS_IS_AVAILBLE = True |
| except: |
| print(f'[WARN] xformers is unavailable!') |
| XFORMERS_IS_AVAILBLE = False |
|
|
| |
| import os |
|
|
| _ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32") |
|
|
|
|
| def default(val, d): |
| if val is not None: |
| return val |
| return d() if isfunction(d) else d |
|
|
|
|
| class GEGLU(nn.Module): |
| def __init__(self, dim_in, dim_out): |
| super().__init__() |
| self.proj = nn.Linear(dim_in, dim_out * 2) |
|
|
| def forward(self, x): |
| x, gate = self.proj(x).chunk(2, dim=-1) |
| return x * F.gelu(gate) |
|
|
|
|
| class FeedForward(nn.Module): |
| def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): |
| super().__init__() |
| inner_dim = int(dim * mult) |
| dim_out = default(dim_out, dim) |
| project_in = ( |
| nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) |
| if not glu |
| else GEGLU(dim, inner_dim) |
| ) |
|
|
| self.net = nn.Sequential( |
| project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) |
| ) |
|
|
| def forward(self, x): |
| return self.net(x) |
|
|
|
|
| class CrossAttention(nn.Module): |
| def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): |
| super().__init__() |
| inner_dim = dim_head * heads |
| context_dim = default(context_dim, query_dim) |
|
|
| self.scale = dim_head**-0.5 |
| self.heads = heads |
|
|
| self.to_q = nn.Linear(query_dim, inner_dim, bias=False) |
| self.to_k = nn.Linear(context_dim, inner_dim, bias=False) |
| self.to_v = nn.Linear(context_dim, inner_dim, bias=False) |
|
|
| self.to_out = nn.Sequential( |
| nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) |
| ) |
|
|
| def forward(self, x, context=None, mask=None): |
| h = self.heads |
|
|
| q = self.to_q(x) |
| context = default(context, x) |
| k = self.to_k(context) |
| v = self.to_v(context) |
|
|
| q, k, v = map(lambda t: rearrange(t, "b n (h d) -> (b h) n d", h=h), (q, k, v)) |
|
|
| |
| if _ATTN_PRECISION == "fp32": |
| with autocast(enabled=False, device_type="cuda"): |
| q, k = q.float(), k.float() |
| sim = torch.einsum("b i d, b j d -> b i j", q, k) * self.scale |
| else: |
| sim = torch.einsum("b i d, b j d -> b i j", q, k) * self.scale |
|
|
| del q, k |
|
|
| if mask is not None: |
| mask = rearrange(mask, "b ... -> b (...)") |
| max_neg_value = -torch.finfo(sim.dtype).max |
| mask = repeat(mask, "b j -> (b h) () j", h=h) |
| sim.masked_fill_(~mask, max_neg_value) |
|
|
| |
| sim = sim.softmax(dim=-1) |
|
|
| out = torch.einsum("b i j, b j d -> b i d", sim, v) |
| out = rearrange(out, "(b h) n d -> b n (h d)", h=h) |
| return self.to_out(out) |
|
|
|
|
| class MemoryEfficientCrossAttention(nn.Module): |
| |
| def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): |
| super().__init__() |
| |
| inner_dim = dim_head * heads |
| context_dim = default(context_dim, query_dim) |
|
|
| self.heads = heads |
| self.dim_head = dim_head |
|
|
| self.to_q = nn.Linear(query_dim, inner_dim, bias=False) |
| self.to_k = nn.Linear(context_dim, inner_dim, bias=False) |
| self.to_v = nn.Linear(context_dim, inner_dim, bias=False) |
|
|
| self.to_out = nn.Sequential( |
| nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) |
| ) |
| self.attention_op: Optional[Any] = None |
|
|
| def forward(self, x, context=None, mask=None): |
| q = self.to_q(x) |
| context = default(context, x) |
| k = self.to_k(context) |
| v = self.to_v(context) |
|
|
| b, _, _ = q.shape |
| q, k, v = map( |
| lambda t: t.unsqueeze(3) |
| .reshape(b, t.shape[1], self.heads, self.dim_head) |
| .permute(0, 2, 1, 3) |
| .reshape(b * self.heads, t.shape[1], self.dim_head) |
| .contiguous(), |
| (q, k, v), |
| ) |
|
|
| |
| out = xformers.ops.memory_efficient_attention( |
| q, k, v, attn_bias=None, op=self.attention_op |
| ) |
|
|
| if mask is not None: |
| raise NotImplementedError |
| out = ( |
| out.unsqueeze(0) |
| .reshape(b, self.heads, out.shape[1], self.dim_head) |
| .permute(0, 2, 1, 3) |
| .reshape(b, out.shape[1], self.heads * self.dim_head) |
| ) |
| return self.to_out(out) |
|
|
|
|
| class BasicTransformerBlock(nn.Module): |
| ATTENTION_MODES = { |
| "softmax": CrossAttention, |
| "softmax-xformers": MemoryEfficientCrossAttention, |
| } |
|
|
| def __init__( |
| self, |
| dim, |
| n_heads, |
| d_head, |
| dropout=0.0, |
| context_dim=None, |
| gated_ff=True, |
| checkpoint=True, |
| disable_self_attn=False, |
| ): |
| super().__init__() |
| attn_mode = "softmax-xformers" if XFORMERS_IS_AVAILBLE else "softmax" |
| assert attn_mode in self.ATTENTION_MODES |
| attn_cls = self.ATTENTION_MODES[attn_mode] |
| self.disable_self_attn = disable_self_attn |
| self.attn1 = attn_cls( |
| query_dim=dim, |
| heads=n_heads, |
| dim_head=d_head, |
| dropout=dropout, |
| context_dim=context_dim if self.disable_self_attn else None, |
| ) |
| self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) |
| self.attn2 = attn_cls( |
| query_dim=dim, |
| context_dim=context_dim, |
| heads=n_heads, |
| dim_head=d_head, |
| dropout=dropout, |
| ) |
| self.norm1 = nn.LayerNorm(dim) |
| self.norm2 = nn.LayerNorm(dim) |
| self.norm3 = nn.LayerNorm(dim) |
| self.checkpoint = checkpoint |
|
|
| def forward(self, x, context=None): |
| return checkpoint( |
| self._forward, (x, context), self.parameters(), self.checkpoint |
| ) |
|
|
| def _forward(self, x, context=None): |
| x = ( |
| self.attn1( |
| self.norm1(x), context=context if self.disable_self_attn else None |
| ) |
| + x |
| ) |
| x = self.attn2(self.norm2(x), context=context) + x |
| x = self.ff(self.norm3(x)) + x |
| return x |
|
|
|
|
| class SpatialTransformer(nn.Module): |
| """ |
| Transformer block for image-like data. |
| First, project the input (aka embedding) |
| and reshape to b, t, d. |
| Then apply standard transformer action. |
| Finally, reshape to image |
| NEW: use_linear for more efficiency instead of the 1x1 convs |
| """ |
|
|
| def __init__( |
| self, |
| in_channels, |
| n_heads, |
| d_head, |
| depth=1, |
| dropout=0.0, |
| context_dim=None, |
| disable_self_attn=False, |
| use_linear=False, |
| use_checkpoint=True, |
| ): |
| super().__init__() |
| assert context_dim is not None |
| if not isinstance(context_dim, list): |
| context_dim = [context_dim] |
| self.in_channels = in_channels |
| inner_dim = n_heads * d_head |
| self.norm = nn.GroupNorm( |
| num_groups=32, num_channels=in_channels, eps=1e-6, affine=True |
| ) |
| if not use_linear: |
| self.proj_in = nn.Conv2d( |
| in_channels, inner_dim, kernel_size=1, stride=1, padding=0 |
| ) |
| else: |
| self.proj_in = nn.Linear(in_channels, inner_dim) |
|
|
| self.transformer_blocks = nn.ModuleList( |
| [ |
| BasicTransformerBlock( |
| inner_dim, |
| n_heads, |
| d_head, |
| dropout=dropout, |
| context_dim=context_dim[d], |
| disable_self_attn=disable_self_attn, |
| checkpoint=use_checkpoint, |
| ) |
| for d in range(depth) |
| ] |
| ) |
| if not use_linear: |
| self.proj_out = zero_module( |
| nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) |
| ) |
| else: |
| self.proj_out = zero_module(nn.Linear(in_channels, inner_dim)) |
| self.use_linear = use_linear |
|
|
| def forward(self, x, context=None): |
| |
| if not isinstance(context, list): |
| context = [context] |
| b, c, h, w = x.shape |
| x_in = x |
| x = self.norm(x) |
| if not self.use_linear: |
| x = self.proj_in(x) |
| x = rearrange(x, "b c h w -> b (h w) c").contiguous() |
| if self.use_linear: |
| x = self.proj_in(x) |
| for i, block in enumerate(self.transformer_blocks): |
| x = block(x, context=context[i]) |
| if self.use_linear: |
| x = self.proj_out(x) |
| x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous() |
| if not self.use_linear: |
| x = self.proj_out(x) |
| return x + x_in |
|
|
|
|
| class BasicTransformerBlock3D(BasicTransformerBlock): |
| def forward(self, x, context=None, num_frames=1): |
| return checkpoint( |
| self._forward, (x, context, num_frames), self.parameters(), self.checkpoint |
| ) |
|
|
| def _forward(self, x, context=None, num_frames=1): |
| x = rearrange(x, "(b f) l c -> b (f l) c", f=num_frames).contiguous() |
| x = ( |
| self.attn1( |
| self.norm1(x), context=context if self.disable_self_attn else None |
| ) |
| + x |
| ) |
| x = rearrange(x, "b (f l) c -> (b f) l c", f=num_frames).contiguous() |
| x = self.attn2(self.norm2(x), context=context) + x |
| x = self.ff(self.norm3(x)) + x |
| return x |
|
|
|
|
| class SpatialTransformer3D(nn.Module): |
| """3D self-attention""" |
|
|
| def __init__( |
| self, |
| in_channels, |
| n_heads, |
| d_head, |
| depth=1, |
| dropout=0.0, |
| context_dim=None, |
| disable_self_attn=False, |
| use_linear=True, |
| use_checkpoint=True, |
| ): |
| super().__init__() |
| assert context_dim is not None |
| if not isinstance(context_dim, list): |
| context_dim = [context_dim] |
| self.in_channels = in_channels |
| inner_dim = n_heads * d_head |
| self.norm = nn.GroupNorm( |
| num_groups=32, num_channels=in_channels, eps=1e-6, affine=True |
| ) |
| if not use_linear: |
| self.proj_in = nn.Conv2d( |
| in_channels, inner_dim, kernel_size=1, stride=1, padding=0 |
| ) |
| else: |
| self.proj_in = nn.Linear(in_channels, inner_dim) |
|
|
| self.transformer_blocks = nn.ModuleList( |
| [ |
| BasicTransformerBlock3D( |
| inner_dim, |
| n_heads, |
| d_head, |
| dropout=dropout, |
| context_dim=context_dim[d], |
| disable_self_attn=disable_self_attn, |
| checkpoint=use_checkpoint, |
| ) |
| for d in range(depth) |
| ] |
| ) |
| if not use_linear: |
| self.proj_out = zero_module( |
| nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) |
| ) |
| else: |
| self.proj_out = zero_module(nn.Linear(in_channels, inner_dim)) |
| self.use_linear = use_linear |
|
|
| def forward(self, x, context=None, num_frames=1): |
| |
| if not isinstance(context, list): |
| context = [context] |
| b, c, h, w = x.shape |
| x_in = x |
| x = self.norm(x) |
| if not self.use_linear: |
| x = self.proj_in(x) |
| x = rearrange(x, "b c h w -> b (h w) c").contiguous() |
| if self.use_linear: |
| x = self.proj_in(x) |
| for i, block in enumerate(self.transformer_blocks): |
| x = block(x, context=context[i], num_frames=num_frames) |
| if self.use_linear: |
| x = self.proj_out(x) |
| x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous() |
| if not self.use_linear: |
| x = self.proj_out(x) |
| return x + x_in |
|
|