from inspect import isfunction import global_ import math import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat from ldm.modules.diffusionmodules.util import checkpoint from typing import List, Tuple from confs import * def exists(val): return val is not None def uniq(arr): return{el: True for el in arr}.keys() def default(val, d): if exists(val): return val return d() if isfunction(d) else d def max_neg_value(t): return -torch.finfo(t.dtype).max def init_(tensor): dim = tensor.shape[-1] std = 1 / math.sqrt(dim) tensor.uniform_(-std, std) return tensor # feedforward class GEGLU(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2) def forward(self, x): x, gate = self.proj(x).chunk(2, dim=-1) return x * F.gelu(gate) class FeedForward(nn.Module): def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., inner_dim=None): super().__init__() if inner_dim is None: inner_dim = int(dim * mult) dim_out = default(dim_out, dim) project_in = nn.Sequential( nn.Linear(dim, inner_dim), nn.GELU() ) if not glu else GEGLU(dim, inner_dim) self.dim = dim self.inner_dim = inner_dim self.dim_out = dim_out self.net = nn.Sequential( project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) ) def forward(self, x, token_pos=None): return self.net(x) def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module def Normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) class LinearAttention(nn.Module): def __init__(self, dim, heads=4, dim_head=32): super().__init__() self.heads = heads hidden_dim = dim_head * heads self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) self.to_out = nn.Conv2d(hidden_dim, dim, 1) def forward(self, x): b, c, h, w = x.shape qkv = self.to_qkv(x) q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) k = k.softmax(dim=-1) context = torch.einsum('bhdn,bhen->bhde', k, v) out = torch.einsum('bhde,bhdn->bhen', context, q) out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) return self.to_out(out) class SpatialSelfAttention(nn.Module): def __init__(self, in_channels): super().__init__() self.in_channels = in_channels self.norm = Normalize(in_channels) self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): h_ = x h_ = self.norm(h_) q = self.q(h_) k = self.k(h_) v = self.v(h_) # compute attention b,c,h,w = q.shape q = rearrange(q, 'b c h w -> b (h w) c') k = rearrange(k, 'b c h w -> b c (h w)') w_ = torch.einsum('bij,bjk->bik', q, k) w_ = w_ * (int(c)**(-0.5)) w_ = torch.nn.functional.softmax(w_, dim=2) # attend to values v = rearrange(v, 'b c h w -> b c (h w)') w_ = rearrange(w_, 'b i j -> b j i') h_ = torch.einsum('bij,bjk->bik', v, w_) h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) h_ = self.proj_out(h_) return x+h_ class CrossAttention(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.,sep_head_att=False): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) self.scale = dim_head ** -0.5 self.heads = heads # 8 self.dim_head=dim_head #40 self.to_q = nn.Linear(query_dim, inner_dim, bias=False) # self.to_k = nn.Linear(context_dim, inner_dim, bias=False) # self.to_v = nn.Linear(context_dim, inner_dim, bias=False) head_splits=[6,2] self.head_splits=head_splits # if sep_head_att: # self.to_k = nn.ModuleList([nn.Linear(context_dim, dim_head*head_splits[i], bias=False) for i in range(len(head_splits))]) # self.to_v = nn.ModuleList([nn.Linear(context_dim, dim_head*head_splits[i], bias=False) for i in range(len(head_splits))]) # else: self.to_k = nn.Linear(context_dim, inner_dim, bias=False) self.to_v = nn.Linear(context_dim, inner_dim, bias=False) self.to_out = nn.Sequential( nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) ) def forward(self, x, context=None, mask=None): h = self.heads q = self.to_q(x) # 2,4096,320 context = default(context, x) #2,4096,320 if context.shape[-1]==768*2: # this is for different attention heads context1,context2=torch.chunk(context,2,dim=-1) # clip/id context1, landmark context2 k1=self.to_k(context1) k2=self.to_k(context2) v1=self.to_v(context1) v2=self.to_v(context2) k=torch.cat([k1[:,:,:self.head_splits[0]*self.dim_head],k2[:,:,-self.head_splits[1]*self.dim_head:]],dim=-1) v=torch.cat([v1[:,:,:self.head_splits[0]*self.dim_head],v2[:,:,-self.head_splits[1]*self.dim_head:]],dim=-1) # head_splits=[6,2] # k1 = self.to_k[0](context1) # v1 = self.to_v[0](context1) # k2 = self.to_k[1](context2) # v2 = self.to_v[1](context2) # k=torch.cat([k1,k2],dim=-1) # v=torch.cat([v1,v2],dim=-1) else: k = self.to_k(context) v = self.to_v(context) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) sim = einsum('b i d, b j d -> b i j', q, k) * self.scale if exists(mask): mask = rearrange(mask, 'b ... -> b (...)') max_neg_value = -torch.finfo(sim.dtype).max mask = repeat(mask, 'b j -> (b h) () j', h=h) sim.masked_fill_(~mask, max_neg_value) # attention, what we cannot get enough of attn = sim.softmax(dim=-1) out = einsum('b i j, b j d -> b i d', attn, v) out = rearrange(out, '(b h) n d -> b n (h d)', h=h) return self.to_out(out) class BasicTransformerBlock(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,sep_head_att=False): super().__init__() self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,sep_head_att=False) # is a self-attention self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout,sep_head_att=sep_head_att) # is self-attn if context is none self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.norm3 = nn.LayerNorm(dim) self.checkpoint = checkpoint def forward(self, x, context=None, token_pos=None): inputs = (x, context, token_pos, ) if hasattr(self,'name4bank') and REFNET.task2layerNum[global_.task]>0: if self.isReader_4bank: inputs = (x, context, token_pos, self.bank.get(self.name4bank) ) # x, context, x_refNet else: self.bank.set(self.name4bank, x) return checkpoint(self._forward, inputs, self.parameters(), self.checkpoint) def _forward(self, x, context=None, token_pos=None, x_refNet=None):# x, x_refNet: before LN if x_refNet is None: x = self.attn1(self.norm1(x)) + x else: x_norm = self.norm1(x) x_norm_cat = torch.cat( [ x_norm, self.norm1(x_refNet) ] , dim=1 ) x = self.attn1(x_norm, context=x_norm_cat) + x del x_norm,x_norm_cat x = self.attn2(self.norm2(x), context=context) + x # This ff might be modified into an MoE module, so pass token_pos x = self.ff(self.norm3(x), token_pos) + x return x class SpatialTransformer(nn.Module): """ Transformer block for image-like data. First, project the input (aka embedding) and reshape to b, t, d. Then apply standard transformer action. Finally, reshape to image """ def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., context_dim=None,sep_head_att=False,head_splits=None): super().__init__() self.in_channels = in_channels inner_dim = n_heads * d_head self.norm = Normalize(in_channels) self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) self.transformer_blocks = nn.ModuleList( [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim,sep_head_att=sep_head_att) for d in range(depth)] ) self.proj_out = zero_module(nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)) def forward(self, x, context=None): # note: if no context is given, cross-attention defaults to self-attention b, c, h, w = x.shape x_in = x x = self.norm(x) x = self.proj_in(x) x = rearrange(x, 'b c h w -> b (h w) c') if 1: # set token position grid (normalized centers) for gating/router use num_tokens = h * w y_coords = torch.arange(h, device=x.device, dtype=x.dtype) x_coords = torch.arange(w, device=x.device, dtype=x.dtype) yy, xx = torch.meshgrid(y_coords, x_coords, indexing='ij') pos = torch.stack([(xx + 0.5) / float(w), (yy + 0.5) / float(h)], dim=-1) # [h,w,2] pos = pos.reshape(1, num_tokens, 2).expand(b, -1, -1).contiguous() # b, n, 2 for block in self.transformer_blocks: x = block(x, context=context, token_pos=pos) x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) x = self.proj_out(x) return x + x_in