stablediffusion / attention.py
shlok123's picture
Upload 16 files
a0d1656 verified
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class SelfAttention(nn.Module):
def __init__(self, n_heads:int, d_embed:int, in_proj_bias = True, out_proj_bias = True):
super().__init__()
self.in_proj = nn.Linear(d_embed, d_embed * 3, bias = in_proj_bias)
self.out_proj = nn.Linear(d_embed, d_embed, bias = out_proj_bias)
self.n_heads = n_heads
self.d_head = d_embed // n_heads
def forward (self, x:torch.Tensor, causal_mask = False):
# x: (Batch_Size, seq_length, embedding)
input_shape = x.shape
batch_size, seq_length, embed_dim = input_shape
# (Batch_Size, seq_length, embedding) -> (Batch_Size, seq_length, 3 * embedding)
intermim_shape = (batch_size, seq_length, self.n_heads, self.d_head)
# (Batch_Size, seq_length, dim) -> (Batch_Size, seq_length, dim *3)-> 3
q, k ,v = self.in_proj(x).chunk(3, dim = -1)
# (Batch_Size, seq_length, dim) -> (Batch_Size, seq_length, H, Dim / H) -> (Batch_Size, H, seq_length, Dim / H)
q = q.view(intermim_shape).transpose(1,2)
k = k.view(intermim_shape).transpose(1,2)
v = v.view(intermim_shape).transpose(1,2)
# (Batch_Size, H, seq_length, seq_length)
weight = q @ k.transpose(-1,-2)
if causal_mask:
# Mask where the upper triangle (above the principal diagonal) is made of 1
mask = torch.ones_like(weight, dtype=torch.bool).triu(1)
weight.masked_fill_(mask, -torch.inf)
weight /= math.sqrt(self.d_head)
weight = F.softmax(weight, dim = -1)
# (Batch_Size, H, seq_length, seq_length) @ (Batch_Size, H, seq_length, Dim / H) -> (Batch_Size, H, seq_length, Dim / H)
output = weight @ v
# (Batch_Size, H, seq_length, Dim / H) -> (Batch_Size, seq_length, H, Dim / H)
output = output.transpose(1,2)
output = output.reshape(input_shape)
output = self.out_proj(output)
# (Batch_Size, seq_length, Dim)
return output
class CrossAttention(nn.Module):
def __init__(self, n_heads:int, d_embed:int, d_cross:int, in_proj_bias = True, out_proj_bias = True):
super().__init__()
self.q_proj = nn.Linear(d_embed, d_embed, bias = in_proj_bias)
self.k_proj = nn.Linear(d_cross, d_embed, bias = in_proj_bias)
self.v_proj = nn.Linear(d_cross, d_embed, bias = in_proj_bias)
self.out_proj = nn.Linear(d_embed, d_embed, bias = out_proj_bias)
self.n_heads = n_heads
self.d_head = d_embed // n_heads
def forward(self, x,y):
#X:(latent) : (Batch, seq_length_q, Dim_q)
#Y:(context) : (Batch, seq_length_kv, Dim_kv): (Batch_size, 77, 768)
input_shape = x.shape
batch_size, seq_length_q, embed_dim = input_shape
interim_shape = (batch_size, -1, self.n_heads, self.d_head)
# Multiply query by Wq
q = self.q_proj(x)
# Multiply key by Wk
k = self.k_proj(y)
# Multiply value by Wv
v = self.v_proj(y)
# (Batch_Size, seq_length_q, Dim) -> (Batch_Size, seq_length_q, H, Dim / H) -> (Batch_Size, H, seq_length_q, Dim / H)
q = q.view(interim_shape).transpose(1,2)
# (Batch_Size, seq_length_kv, Dim) -> (Batch_Size, seq_length_kv, H, Dim / H) -> (Batch_Size, H, seq_length_kv, Dim / H)
k = k.view(interim_shape).transpose(1,2)
# (Batch_Size, seq_length_kv, Dim) -> (Batch_Size, seq_length_kv, H, Dim / H) -> (Batch_Size, H, seq_length_kv, Dim / H)
v = v.view(interim_shape).transpose(1,2)
weight = q @ k.transpose(-1,-2)
weight /= math.sqrt(self.d_head)
weight = F.softmax(weight, dim = -1)
output = weight @ v
output = output.transpose(1,2).contiguous().view(input_shape)
output = self.out_proj(output)
return output