|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import math |
|
|
|
|
|
class SelfAttention(nn.Module): |
|
|
def __init__(self, embed_dim, num_heads): |
|
|
super().__init__() |
|
|
assert embed_dim % num_heads == 0 |
|
|
self.head_dim = embed_dim // num_heads |
|
|
self.num_heads = num_heads |
|
|
|
|
|
self.query = nn.Linear(embed_dim, embed_dim) |
|
|
self.key = nn.Linear(embed_dim, embed_dim) |
|
|
self.value = nn.Linear(embed_dim, embed_dim) |
|
|
self.out_proj = nn.Linear(embed_dim, embed_dim) |
|
|
|
|
|
def forward(self, x): |
|
|
B, T, C = x.size() |
|
|
q = self.query(x).view(B, T, self.num_heads, self.head_dim).transpose(1, 2) |
|
|
k = self.key(x).view(B, T, self.num_heads, self.head_dim).transpose(1, 2) |
|
|
v = self.value(x).view(B, T, self.num_heads, self.head_dim).transpose(1, 2) |
|
|
|
|
|
scores = (q @ k.transpose(-2, -1)) / math.sqrt(self.head_dim) |
|
|
mask = torch.tril(torch.ones(T, T)).to(x.device) |
|
|
scores = scores.masked_fill(mask == 0, float('-inf')) |
|
|
attn = torch.softmax(scores, dim=-1) |
|
|
|
|
|
out = attn @ v |
|
|
out = out.transpose(1, 2).contiguous().view(B, T, C) |
|
|
return self.out_proj(out) |
|
|
|
|
|
class TransformerBlock(nn.Module): |
|
|
def __init__(self, embed_dim, num_heads): |
|
|
super().__init__() |
|
|
self.attn = SelfAttention(embed_dim, num_heads) |
|
|
self.ln1 = nn.LayerNorm(embed_dim) |
|
|
self.ff = nn.Sequential( |
|
|
nn.Linear(embed_dim, embed_dim * 4), |
|
|
nn.GELU(), |
|
|
nn.Linear(embed_dim * 4, embed_dim) |
|
|
) |
|
|
self.ln2 = nn.LayerNorm(embed_dim) |
|
|
|
|
|
def forward(self, x): |
|
|
x = x + self.attn(self.ln1(x)) |
|
|
x = x + self.ff(self.ln2(x)) |
|
|
return x |
|
|
|
|
|
class TinyTransformer(nn.Module): |
|
|
def __init__(self, vocab_size, max_len, embed_dim=128, num_heads=2, num_layers=1): |
|
|
super().__init__() |
|
|
self.token_embed = nn.Embedding(vocab_size, embed_dim) |
|
|
self.pos_embed = nn.Parameter(torch.zeros(1, max_len, embed_dim)) |
|
|
self.blocks = nn.ModuleList([ |
|
|
TransformerBlock(embed_dim, num_heads) for _ in range(num_layers) |
|
|
]) |
|
|
self.ln_final = nn.LayerNorm(embed_dim) |
|
|
self.head = nn.Linear(embed_dim, vocab_size) |
|
|
|
|
|
def forward(self, x): |
|
|
B, T = x.size() |
|
|
tok_emb = self.token_embed(x) |
|
|
pos_emb = self.pos_embed[:, :T, :] |
|
|
x = tok_emb + pos_emb |
|
|
|
|
|
for block in self.blocks: |
|
|
x = block(x) |
|
|
|
|
|
x = self.ln_final(x) |
|
|
logits = self.head(x) |
|
|
return logits |
|
|
|