mini-gpt1 / model_code /decoder_only_transformer.py
dilip025's picture
Create model_code/decoder_only_transformer.py
09d712b verified
import torch
import torch.nn as nn
class DecoderEmbeddings(nn.Module):
def __init__(self, vocab_size, embed_dim, max_len):
super().__init__()
self.token_embed = nn.Embedding(vocab_size, embed_dim)
self.pos_embed = nn.Embedding(max_len, embed_dim)
self.dropout = nn.Dropout(0.1)
def forward(self, input_ids):
seq_len = input_ids.size(1)
positions = torch.arange(0, seq_len, device=input_ids.device).unsqueeze(0) # [1, seq_len]
token_embeddings = self.token_embed(input_ids) # [batch, seq_len, dim]
pos_embeddings = self.pos_embed(positions) # [1, seq_len, dim]
return self.dropout(token_embeddings + pos_embeddings)
def generate_causal_mask(seq_len, device):
mask = torch.tril(torch.ones(seq_len, seq_len, device=device)) # lower triangular
return mask == 0 # False = allow attend, True = mask
class MultiHeadSelfAttention(nn.Module):
def __init__(self, embed_dim, num_heads):
super().__init__()
assert embed_dim % num_heads == 0
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.qkv_proj = nn.Linear(embed_dim, embed_dim * 3)
self.out_proj = nn.Linear(embed_dim, embed_dim)
def forward(self, x, attn_mask=None):
batch_size, seq_len, embed_dim = x.size()
# Get Q, K, V
qkv = self.qkv_proj(x) # [B, T, 3 * D]
qkv = qkv.view(batch_size, seq_len, 3, self.num_heads, self.head_dim)
qkv = qkv.permute(2, 0, 3, 1, 4) # [3, B, H, T, D]
q, k, v = qkv[0], qkv[1], qkv[2] # Each: [B, H, T, D]
# Attention scores
scores = (q @ k.transpose(-2, -1)) / (self.head_dim ** 0.5) # [B, H, T, T]
if attn_mask is not None:
scores = scores.masked_fill(attn_mask.unsqueeze(0).unsqueeze(0), float('-inf'))
attn_weights = torch.softmax(scores, dim=-1) # [B, H, T, T]
attn_output = attn_weights @ v # [B, H, T, D]
# Merge heads
attn_output = attn_output.transpose(1, 2).contiguous() # [B, T, H, D]
attn_output = attn_output.view(batch_size, seq_len, embed_dim)
return self.out_proj(attn_output)
class FeedForward(nn.Module):
def __init__(self, embed_dim, ff_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(embed_dim, ff_dim),
nn.GELU(),
nn.Linear(ff_dim, embed_dim)
)
def forward(self, x):
return self.net(x)
class DecoderBlock(nn.Module):
def __init__(self, embed_dim, num_heads, ff_dim):
super().__init__()
self.ln1 = nn.LayerNorm(embed_dim)
self.attn = MultiHeadSelfAttention(embed_dim, num_heads)
self.ln2 = nn.LayerNorm(embed_dim)
self.ff = FeedForward(embed_dim, ff_dim)
def forward(self, x, attn_mask):
# Self-attention with residual
attn_out = self.attn(self.ln1(x), attn_mask)
x = x + attn_out
# Feedforward with residual
ff_out = self.ff(self.ln2(x))
x = x + ff_out
return x
class DecoderOnlyTransformer(nn.Module):
def __init__(self, vocab_size, max_len, embed_dim, num_heads, depth, ff_dim):
super().__init__()
self.embedding = DecoderEmbeddings(vocab_size, embed_dim, max_len)
self.blocks = nn.ModuleList([
DecoderBlock(embed_dim, num_heads, ff_dim)
for _ in range(depth)
])
self.ln_final = nn.LayerNorm(embed_dim)
self.head = nn.Linear(embed_dim, vocab_size) # Language modeling head
def forward(self, input_ids):
"""
input_ids: [B, T]
"""
B, T = input_ids.size()
x = self.embedding(input_ids) # [B, T, D]
# Generate causal mask: True where mask is applied
mask = generate_causal_mask(T, input_ids.device)
for block in self.blocks:
x = block(x, attn_mask=mask)
x = self.ln_final(x) # [B, T, D]
logits = self.head(x) # [B, T, vocab_size]
return logits