|
|
import torch |
|
|
import torch.nn as nn |
|
|
|
|
|
class DecoderEmbeddings(nn.Module): |
|
|
def __init__(self, vocab_size, embed_dim, max_len): |
|
|
super().__init__() |
|
|
self.token_embed = nn.Embedding(vocab_size, embed_dim) |
|
|
self.pos_embed = nn.Embedding(max_len, embed_dim) |
|
|
self.dropout = nn.Dropout(0.1) |
|
|
|
|
|
def forward(self, input_ids): |
|
|
seq_len = input_ids.size(1) |
|
|
positions = torch.arange(0, seq_len, device=input_ids.device).unsqueeze(0) |
|
|
token_embeddings = self.token_embed(input_ids) |
|
|
pos_embeddings = self.pos_embed(positions) |
|
|
return self.dropout(token_embeddings + pos_embeddings) |
|
|
|
|
|
def generate_causal_mask(seq_len, device): |
|
|
mask = torch.tril(torch.ones(seq_len, seq_len, device=device)) |
|
|
return mask == 0 |
|
|
|
|
|
class MultiHeadSelfAttention(nn.Module): |
|
|
def __init__(self, embed_dim, num_heads): |
|
|
super().__init__() |
|
|
assert embed_dim % num_heads == 0 |
|
|
|
|
|
self.num_heads = num_heads |
|
|
self.head_dim = embed_dim // num_heads |
|
|
|
|
|
self.qkv_proj = nn.Linear(embed_dim, embed_dim * 3) |
|
|
self.out_proj = nn.Linear(embed_dim, embed_dim) |
|
|
|
|
|
def forward(self, x, attn_mask=None): |
|
|
batch_size, seq_len, embed_dim = x.size() |
|
|
|
|
|
|
|
|
qkv = self.qkv_proj(x) |
|
|
qkv = qkv.view(batch_size, seq_len, 3, self.num_heads, self.head_dim) |
|
|
qkv = qkv.permute(2, 0, 3, 1, 4) |
|
|
q, k, v = qkv[0], qkv[1], qkv[2] |
|
|
|
|
|
|
|
|
scores = (q @ k.transpose(-2, -1)) / (self.head_dim ** 0.5) |
|
|
|
|
|
if attn_mask is not None: |
|
|
scores = scores.masked_fill(attn_mask.unsqueeze(0).unsqueeze(0), float('-inf')) |
|
|
attn_weights = torch.softmax(scores, dim=-1) |
|
|
attn_output = attn_weights @ v |
|
|
|
|
|
|
|
|
attn_output = attn_output.transpose(1, 2).contiguous() |
|
|
attn_output = attn_output.view(batch_size, seq_len, embed_dim) |
|
|
|
|
|
return self.out_proj(attn_output) |
|
|
|
|
|
class FeedForward(nn.Module): |
|
|
def __init__(self, embed_dim, ff_dim): |
|
|
super().__init__() |
|
|
self.net = nn.Sequential( |
|
|
nn.Linear(embed_dim, ff_dim), |
|
|
nn.GELU(), |
|
|
nn.Linear(ff_dim, embed_dim) |
|
|
) |
|
|
|
|
|
def forward(self, x): |
|
|
return self.net(x) |
|
|
|
|
|
class DecoderBlock(nn.Module): |
|
|
def __init__(self, embed_dim, num_heads, ff_dim): |
|
|
super().__init__() |
|
|
self.ln1 = nn.LayerNorm(embed_dim) |
|
|
self.attn = MultiHeadSelfAttention(embed_dim, num_heads) |
|
|
self.ln2 = nn.LayerNorm(embed_dim) |
|
|
self.ff = FeedForward(embed_dim, ff_dim) |
|
|
|
|
|
def forward(self, x, attn_mask): |
|
|
|
|
|
attn_out = self.attn(self.ln1(x), attn_mask) |
|
|
x = x + attn_out |
|
|
|
|
|
|
|
|
ff_out = self.ff(self.ln2(x)) |
|
|
x = x + ff_out |
|
|
|
|
|
return x |
|
|
|
|
|
class DecoderOnlyTransformer(nn.Module): |
|
|
def __init__(self, vocab_size, max_len, embed_dim, num_heads, depth, ff_dim): |
|
|
super().__init__() |
|
|
self.embedding = DecoderEmbeddings(vocab_size, embed_dim, max_len) |
|
|
|
|
|
self.blocks = nn.ModuleList([ |
|
|
DecoderBlock(embed_dim, num_heads, ff_dim) |
|
|
for _ in range(depth) |
|
|
]) |
|
|
|
|
|
self.ln_final = nn.LayerNorm(embed_dim) |
|
|
self.head = nn.Linear(embed_dim, vocab_size) |
|
|
|
|
|
def forward(self, input_ids): |
|
|
""" |
|
|
input_ids: [B, T] |
|
|
""" |
|
|
B, T = input_ids.size() |
|
|
x = self.embedding(input_ids) |
|
|
|
|
|
|
|
|
mask = generate_causal_mask(T, input_ids.device) |
|
|
|
|
|
for block in self.blocks: |
|
|
x = block(x, attn_mask=mask) |
|
|
|
|
|
x = self.ln_final(x) |
|
|
logits = self.head(x) |
|
|
|
|
|
return logits |