| from inspect import isfunction |
| import math |
| import torch |
| import torch.nn.functional as F |
| from torch import nn, einsum |
| from einops import rearrange, repeat |
| from typing import Optional, Any |
|
|
| from .diffusionmodules.util import checkpoint |
| from .sub_quadratic_attention import efficient_dot_product_attention |
|
|
| from fcbh import model_management |
|
|
| if model_management.xformers_enabled(): |
| import xformers |
| import xformers.ops |
|
|
| from fcbh.cli_args import args |
| import fcbh.ops |
|
|
| |
| if args.dont_upcast_attention: |
| print("disabling upcasting of attention") |
| _ATTN_PRECISION = "fp16" |
| else: |
| _ATTN_PRECISION = "fp32" |
|
|
|
|
| def exists(val): |
| return val is not None |
|
|
|
|
| def uniq(arr): |
| return{el: True for el in arr}.keys() |
|
|
|
|
| def default(val, d): |
| if exists(val): |
| return val |
| return d |
|
|
|
|
| def max_neg_value(t): |
| return -torch.finfo(t.dtype).max |
|
|
|
|
| def init_(tensor): |
| dim = tensor.shape[-1] |
| std = 1 / math.sqrt(dim) |
| tensor.uniform_(-std, std) |
| return tensor |
|
|
|
|
| |
| class GEGLU(nn.Module): |
| def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=fcbh.ops): |
| super().__init__() |
| self.proj = operations.Linear(dim_in, dim_out * 2, dtype=dtype, device=device) |
|
|
| def forward(self, x): |
| x, gate = self.proj(x).chunk(2, dim=-1) |
| return x * F.gelu(gate) |
|
|
|
|
| class FeedForward(nn.Module): |
| def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None, operations=fcbh.ops): |
| super().__init__() |
| inner_dim = int(dim * mult) |
| dim_out = default(dim_out, dim) |
| project_in = nn.Sequential( |
| operations.Linear(dim, inner_dim, dtype=dtype, device=device), |
| nn.GELU() |
| ) if not glu else GEGLU(dim, inner_dim, dtype=dtype, device=device, operations=operations) |
|
|
| self.net = nn.Sequential( |
| project_in, |
| nn.Dropout(dropout), |
| operations.Linear(inner_dim, dim_out, dtype=dtype, device=device) |
| ) |
|
|
| def forward(self, x): |
| return self.net(x) |
|
|
|
|
| def zero_module(module): |
| """ |
| Zero out the parameters of a module and return it. |
| """ |
| for p in module.parameters(): |
| p.detach().zero_() |
| return module |
|
|
|
|
| def Normalize(in_channels, dtype=None, device=None): |
| return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) |
|
|
| def attention_basic(q, k, v, heads, mask=None): |
| b, _, dim_head = q.shape |
| dim_head //= heads |
| scale = dim_head ** -0.5 |
|
|
| h = heads |
| q, k, v = map( |
| lambda t: t.unsqueeze(3) |
| .reshape(b, -1, heads, dim_head) |
| .permute(0, 2, 1, 3) |
| .reshape(b * heads, -1, dim_head) |
| .contiguous(), |
| (q, k, v), |
| ) |
|
|
| |
| if _ATTN_PRECISION =="fp32": |
| with torch.autocast(enabled=False, device_type = 'cuda'): |
| q, k = q.float(), k.float() |
| sim = einsum('b i d, b j d -> b i j', q, k) * scale |
| else: |
| sim = einsum('b i d, b j d -> b i j', q, k) * scale |
|
|
| del q, k |
|
|
| if exists(mask): |
| mask = rearrange(mask, 'b ... -> b (...)') |
| max_neg_value = -torch.finfo(sim.dtype).max |
| mask = repeat(mask, 'b j -> (b h) () j', h=h) |
| sim.masked_fill_(~mask, max_neg_value) |
|
|
| |
| sim = sim.softmax(dim=-1) |
|
|
| out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v) |
| out = ( |
| out.unsqueeze(0) |
| .reshape(b, heads, -1, dim_head) |
| .permute(0, 2, 1, 3) |
| .reshape(b, -1, heads * dim_head) |
| ) |
| return out |
|
|
|
|
| def attention_sub_quad(query, key, value, heads, mask=None): |
| b, _, dim_head = query.shape |
| dim_head //= heads |
|
|
| scale = dim_head ** -0.5 |
| query = query.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head) |
| value = value.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head) |
|
|
| key = key.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 3, 1).reshape(b * heads, dim_head, -1) |
|
|
| dtype = query.dtype |
| upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32 |
| if upcast_attention: |
| bytes_per_token = torch.finfo(torch.float32).bits//8 |
| else: |
| bytes_per_token = torch.finfo(query.dtype).bits//8 |
| batch_x_heads, q_tokens, _ = query.shape |
| _, _, k_tokens = key.shape |
| qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens |
|
|
| mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True) |
|
|
| kv_chunk_size_min = None |
| kv_chunk_size = None |
| query_chunk_size = None |
|
|
| for x in [4096, 2048, 1024, 512, 256]: |
| count = mem_free_total / (batch_x_heads * bytes_per_token * x * 4.0) |
| if count >= k_tokens: |
| kv_chunk_size = k_tokens |
| query_chunk_size = x |
| break |
|
|
| if query_chunk_size is None: |
| query_chunk_size = 512 |
|
|
| hidden_states = efficient_dot_product_attention( |
| query, |
| key, |
| value, |
| query_chunk_size=query_chunk_size, |
| kv_chunk_size=kv_chunk_size, |
| kv_chunk_size_min=kv_chunk_size_min, |
| use_checkpoint=False, |
| upcast_attention=upcast_attention, |
| ) |
|
|
| hidden_states = hidden_states.to(dtype) |
|
|
| hidden_states = hidden_states.unflatten(0, (-1, heads)).transpose(1,2).flatten(start_dim=2) |
| return hidden_states |
|
|
| def attention_split(q, k, v, heads, mask=None): |
| b, _, dim_head = q.shape |
| dim_head //= heads |
| scale = dim_head ** -0.5 |
|
|
| h = heads |
| q, k, v = map( |
| lambda t: t.unsqueeze(3) |
| .reshape(b, -1, heads, dim_head) |
| .permute(0, 2, 1, 3) |
| .reshape(b * heads, -1, dim_head) |
| .contiguous(), |
| (q, k, v), |
| ) |
|
|
| r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) |
|
|
| mem_free_total = model_management.get_free_memory(q.device) |
|
|
| if _ATTN_PRECISION =="fp32": |
| element_size = 4 |
| else: |
| element_size = q.element_size() |
|
|
| gb = 1024 ** 3 |
| tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * element_size |
| modifier = 3 |
| mem_required = tensor_size * modifier |
| steps = 1 |
|
|
|
|
| if mem_required > mem_free_total: |
| steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2))) |
| |
| |
|
|
| if steps > 64: |
| max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 |
| raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' |
| f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free') |
|
|
| |
| first_op_done = False |
| cleared_cache = False |
| while True: |
| try: |
| slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] |
| for i in range(0, q.shape[1], slice_size): |
| end = i + slice_size |
| if _ATTN_PRECISION =="fp32": |
| with torch.autocast(enabled=False, device_type = 'cuda'): |
| s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * scale |
| else: |
| s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale |
|
|
| s2 = s1.softmax(dim=-1).to(v.dtype) |
| del s1 |
| first_op_done = True |
|
|
| r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) |
| del s2 |
| break |
| except model_management.OOM_EXCEPTION as e: |
| if first_op_done == False: |
| model_management.soft_empty_cache(True) |
| if cleared_cache == False: |
| cleared_cache = True |
| print("out of memory error, emptying cache and trying again") |
| continue |
| steps *= 2 |
| if steps > 64: |
| raise e |
| print("out of memory error, increasing steps and trying again", steps) |
| else: |
| raise e |
|
|
| del q, k, v |
|
|
| r1 = ( |
| r1.unsqueeze(0) |
| .reshape(b, heads, -1, dim_head) |
| .permute(0, 2, 1, 3) |
| .reshape(b, -1, heads * dim_head) |
| ) |
| return r1 |
|
|
| def attention_xformers(q, k, v, heads, mask=None): |
| b, _, dim_head = q.shape |
| dim_head //= heads |
|
|
| q, k, v = map( |
| lambda t: t.unsqueeze(3) |
| .reshape(b, -1, heads, dim_head) |
| .permute(0, 2, 1, 3) |
| .reshape(b * heads, -1, dim_head) |
| .contiguous(), |
| (q, k, v), |
| ) |
|
|
| |
| out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) |
|
|
| if exists(mask): |
| raise NotImplementedError |
| out = ( |
| out.unsqueeze(0) |
| .reshape(b, heads, -1, dim_head) |
| .permute(0, 2, 1, 3) |
| .reshape(b, -1, heads * dim_head) |
| ) |
| return out |
|
|
| def attention_pytorch(q, k, v, heads, mask=None): |
| b, _, dim_head = q.shape |
| dim_head //= heads |
| q, k, v = map( |
| lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2), |
| (q, k, v), |
| ) |
|
|
| out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) |
| out = ( |
| out.transpose(1, 2).reshape(b, -1, heads * dim_head) |
| ) |
| return out |
|
|
|
|
| optimized_attention = attention_basic |
| optimized_attention_masked = attention_basic |
|
|
| if model_management.xformers_enabled(): |
| print("Using xformers cross attention") |
| optimized_attention = attention_xformers |
| elif model_management.pytorch_attention_enabled(): |
| print("Using pytorch cross attention") |
| optimized_attention = attention_pytorch |
| else: |
| if args.use_split_cross_attention: |
| print("Using split optimization for cross attention") |
| optimized_attention = attention_split |
| else: |
| print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention") |
| optimized_attention = attention_sub_quad |
|
|
| if model_management.pytorch_attention_enabled(): |
| optimized_attention_masked = attention_pytorch |
|
|
| class CrossAttention(nn.Module): |
| def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=fcbh.ops): |
| super().__init__() |
| inner_dim = dim_head * heads |
| context_dim = default(context_dim, query_dim) |
|
|
| self.heads = heads |
| self.dim_head = dim_head |
|
|
| self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) |
| self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) |
| self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) |
|
|
| self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout)) |
|
|
| def forward(self, x, context=None, value=None, mask=None): |
| q = self.to_q(x) |
| context = default(context, x) |
| k = self.to_k(context) |
| if value is not None: |
| v = self.to_v(value) |
| del value |
| else: |
| v = self.to_v(context) |
|
|
| if mask is None: |
| out = optimized_attention(q, k, v, self.heads) |
| else: |
| out = optimized_attention_masked(q, k, v, self.heads, mask) |
| return self.to_out(out) |
|
|
|
|
| class BasicTransformerBlock(nn.Module): |
| def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, |
| disable_self_attn=False, dtype=None, device=None, operations=fcbh.ops): |
| super().__init__() |
| self.disable_self_attn = disable_self_attn |
| self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, |
| context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device, operations=operations) |
| self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations) |
| self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, |
| heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations) |
| self.norm1 = nn.LayerNorm(dim, dtype=dtype, device=device) |
| self.norm2 = nn.LayerNorm(dim, dtype=dtype, device=device) |
| self.norm3 = nn.LayerNorm(dim, dtype=dtype, device=device) |
| self.checkpoint = checkpoint |
| self.n_heads = n_heads |
| self.d_head = d_head |
|
|
| def forward(self, x, context=None, transformer_options={}): |
| return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint) |
|
|
| def _forward(self, x, context=None, transformer_options={}): |
| extra_options = {} |
| block = None |
| block_index = 0 |
| if "current_index" in transformer_options: |
| extra_options["transformer_index"] = transformer_options["current_index"] |
| if "block_index" in transformer_options: |
| block_index = transformer_options["block_index"] |
| extra_options["block_index"] = block_index |
| if "original_shape" in transformer_options: |
| extra_options["original_shape"] = transformer_options["original_shape"] |
| if "block" in transformer_options: |
| block = transformer_options["block"] |
| extra_options["block"] = block |
| if "cond_or_uncond" in transformer_options: |
| extra_options["cond_or_uncond"] = transformer_options["cond_or_uncond"] |
| if "patches" in transformer_options: |
| transformer_patches = transformer_options["patches"] |
| else: |
| transformer_patches = {} |
|
|
| extra_options["n_heads"] = self.n_heads |
| extra_options["dim_head"] = self.d_head |
|
|
| if "patches_replace" in transformer_options: |
| transformer_patches_replace = transformer_options["patches_replace"] |
| else: |
| transformer_patches_replace = {} |
|
|
| n = self.norm1(x) |
| if self.disable_self_attn: |
| context_attn1 = context |
| else: |
| context_attn1 = None |
| value_attn1 = None |
|
|
| if "attn1_patch" in transformer_patches: |
| patch = transformer_patches["attn1_patch"] |
| if context_attn1 is None: |
| context_attn1 = n |
| value_attn1 = context_attn1 |
| for p in patch: |
| n, context_attn1, value_attn1 = p(n, context_attn1, value_attn1, extra_options) |
|
|
| if block is not None: |
| transformer_block = (block[0], block[1], block_index) |
| else: |
| transformer_block = None |
| attn1_replace_patch = transformer_patches_replace.get("attn1", {}) |
| block_attn1 = transformer_block |
| if block_attn1 not in attn1_replace_patch: |
| block_attn1 = block |
|
|
| if block_attn1 in attn1_replace_patch: |
| if context_attn1 is None: |
| context_attn1 = n |
| value_attn1 = n |
| n = self.attn1.to_q(n) |
| context_attn1 = self.attn1.to_k(context_attn1) |
| value_attn1 = self.attn1.to_v(value_attn1) |
| n = attn1_replace_patch[block_attn1](n, context_attn1, value_attn1, extra_options) |
| n = self.attn1.to_out(n) |
| else: |
| n = self.attn1(n, context=context_attn1, value=value_attn1) |
|
|
| if "attn1_output_patch" in transformer_patches: |
| patch = transformer_patches["attn1_output_patch"] |
| for p in patch: |
| n = p(n, extra_options) |
|
|
| x += n |
| if "middle_patch" in transformer_patches: |
| patch = transformer_patches["middle_patch"] |
| for p in patch: |
| x = p(x, extra_options) |
|
|
| n = self.norm2(x) |
|
|
| context_attn2 = context |
| value_attn2 = None |
| if "attn2_patch" in transformer_patches: |
| patch = transformer_patches["attn2_patch"] |
| value_attn2 = context_attn2 |
| for p in patch: |
| n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options) |
|
|
| attn2_replace_patch = transformer_patches_replace.get("attn2", {}) |
| block_attn2 = transformer_block |
| if block_attn2 not in attn2_replace_patch: |
| block_attn2 = block |
|
|
| if block_attn2 in attn2_replace_patch: |
| if value_attn2 is None: |
| value_attn2 = context_attn2 |
| n = self.attn2.to_q(n) |
| context_attn2 = self.attn2.to_k(context_attn2) |
| value_attn2 = self.attn2.to_v(value_attn2) |
| n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options) |
| n = self.attn2.to_out(n) |
| else: |
| n = self.attn2(n, context=context_attn2, value=value_attn2) |
|
|
| if "attn2_output_patch" in transformer_patches: |
| patch = transformer_patches["attn2_output_patch"] |
| for p in patch: |
| n = p(n, extra_options) |
|
|
| x += n |
| x = self.ff(self.norm3(x)) + x |
| return x |
|
|
|
|
| class SpatialTransformer(nn.Module): |
| """ |
| Transformer block for image-like data. |
| First, project the input (aka embedding) |
| and reshape to b, t, d. |
| Then apply standard transformer action. |
| Finally, reshape to image |
| NEW: use_linear for more efficiency instead of the 1x1 convs |
| """ |
| def __init__(self, in_channels, n_heads, d_head, |
| depth=1, dropout=0., context_dim=None, |
| disable_self_attn=False, use_linear=False, |
| use_checkpoint=True, dtype=None, device=None, operations=fcbh.ops): |
| super().__init__() |
| if exists(context_dim) and not isinstance(context_dim, list): |
| context_dim = [context_dim] * depth |
| self.in_channels = in_channels |
| inner_dim = n_heads * d_head |
| self.norm = Normalize(in_channels, dtype=dtype, device=device) |
| if not use_linear: |
| self.proj_in = operations.Conv2d(in_channels, |
| inner_dim, |
| kernel_size=1, |
| stride=1, |
| padding=0, dtype=dtype, device=device) |
| else: |
| self.proj_in = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device) |
|
|
| self.transformer_blocks = nn.ModuleList( |
| [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d], |
| disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, dtype=dtype, device=device, operations=operations) |
| for d in range(depth)] |
| ) |
| if not use_linear: |
| self.proj_out = operations.Conv2d(inner_dim,in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0, dtype=dtype, device=device) |
| else: |
| self.proj_out = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device) |
| self.use_linear = use_linear |
|
|
| def forward(self, x, context=None, transformer_options={}): |
| |
| if not isinstance(context, list): |
| context = [context] * len(self.transformer_blocks) |
| b, c, h, w = x.shape |
| x_in = x |
| x = self.norm(x) |
| if not self.use_linear: |
| x = self.proj_in(x) |
| x = rearrange(x, 'b c h w -> b (h w) c').contiguous() |
| if self.use_linear: |
| x = self.proj_in(x) |
| for i, block in enumerate(self.transformer_blocks): |
| transformer_options["block_index"] = i |
| x = block(x, context=context[i], transformer_options=transformer_options) |
| if self.use_linear: |
| x = self.proj_out(x) |
| x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous() |
| if not self.use_linear: |
| x = self.proj_out(x) |
| return x + x_in |
|
|
|
|