File size: 4,038 Bytes
a0d1656
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import torch
import torch.nn as nn
import torch.nn.functional as F
import math

class SelfAttention(nn.Module):

    def __init__(self, n_heads:int, d_embed:int, in_proj_bias = True, out_proj_bias = True):
        super().__init__()

        self.in_proj = nn.Linear(d_embed, d_embed * 3, bias = in_proj_bias)

        self.out_proj = nn.Linear(d_embed, d_embed, bias = out_proj_bias)
        self.n_heads = n_heads
        self.d_head = d_embed // n_heads
    
    def forward (self, x:torch.Tensor, causal_mask = False):
        # x: (Batch_Size, seq_length, embedding)

        input_shape = x.shape
        batch_size, seq_length, embed_dim = input_shape

        # (Batch_Size, seq_length, embedding) -> (Batch_Size, seq_length, 3 * embedding)
        intermim_shape = (batch_size, seq_length, self.n_heads, self.d_head)
        # (Batch_Size, seq_length, dim) -> (Batch_Size, seq_length, dim *3)-> 3
        q, k ,v = self.in_proj(x).chunk(3, dim = -1)
        # (Batch_Size, seq_length, dim) -> (Batch_Size, seq_length, H, Dim / H) -> (Batch_Size, H, seq_length, Dim / H)
        q = q.view(intermim_shape).transpose(1,2)
        k = k.view(intermim_shape).transpose(1,2)
        v = v.view(intermim_shape).transpose(1,2)

        # (Batch_Size, H, seq_length, seq_length)
        weight = q @ k.transpose(-1,-2)

        if causal_mask:
            # Mask where the upper triangle (above the principal diagonal) is made of 1
            mask = torch.ones_like(weight, dtype=torch.bool).triu(1)
            weight.masked_fill_(mask, -torch.inf)
        
        weight /= math.sqrt(self.d_head)

        weight = F.softmax(weight, dim = -1)

        # (Batch_Size, H, seq_length, seq_length) @ (Batch_Size, H, seq_length, Dim / H) -> (Batch_Size, H, seq_length, Dim / H)
        output = weight @ v

        # (Batch_Size, H, seq_length, Dim / H) -> (Batch_Size, seq_length, H, Dim / H)
        output = output.transpose(1,2)

        output = output.reshape(input_shape)

        output = self.out_proj(output)

        # (Batch_Size, seq_length, Dim)
        return output

class CrossAttention(nn.Module):

    def __init__(self, n_heads:int, d_embed:int, d_cross:int, in_proj_bias = True, out_proj_bias = True):
        super().__init__()

        self.q_proj = nn.Linear(d_embed, d_embed, bias = in_proj_bias)
        self.k_proj = nn.Linear(d_cross, d_embed, bias = in_proj_bias)
        self.v_proj = nn.Linear(d_cross, d_embed, bias = in_proj_bias)

        self.out_proj = nn.Linear(d_embed, d_embed, bias = out_proj_bias)

        self.n_heads = n_heads

        self.d_head = d_embed // n_heads

    def forward(self, x,y):
        #X:(latent) : (Batch, seq_length_q, Dim_q)
        #Y:(context) : (Batch, seq_length_kv, Dim_kv): (Batch_size, 77, 768)

        input_shape = x.shape
        batch_size, seq_length_q, embed_dim = input_shape

        interim_shape = (batch_size, -1, self.n_heads, self.d_head)

        # Multiply query by Wq
        q = self.q_proj(x)
        # Multiply key by Wk
        k = self.k_proj(y)
        # Multiply value by Wv
        v = self.v_proj(y)

        # (Batch_Size, seq_length_q, Dim) -> (Batch_Size, seq_length_q, H, Dim / H) -> (Batch_Size, H, seq_length_q, Dim / H)
        q = q.view(interim_shape).transpose(1,2)

        # (Batch_Size, seq_length_kv, Dim) -> (Batch_Size, seq_length_kv, H, Dim / H) -> (Batch_Size, H, seq_length_kv, Dim / H)
        k = k.view(interim_shape).transpose(1,2)

        # (Batch_Size, seq_length_kv, Dim) -> (Batch_Size, seq_length_kv, H, Dim / H) -> (Batch_Size, H, seq_length_kv, Dim / H)
        v = v.view(interim_shape).transpose(1,2)

        weight = q @ k.transpose(-1,-2)

        weight /= math.sqrt(self.d_head)

        weight = F.softmax(weight, dim = -1)

        output = weight @ v

        output = output.transpose(1,2).contiguous().view(input_shape)

        output = self.out_proj(output)

        return output