GPT-2-450M / model_architecture.py
nnsohamnn's picture
Update model_architecture.py
4728d44 verified
from dataclasses import dataclass
import torch
import torch.nn as nn
@dataclass
class GPT2Config:
vocab_size: int = 50304
hidden_size: int = 1024 # GPT-2 Medium
num_layers: int = 24 # GPT-2 Medium (for ~450M total)
num_heads: int = 16
intermediate_size: int = 4096
max_position_embeddings: int = 1024
rms_norm_eps: float = 1e-6
dropout: float = 0.1
use_bias: bool = False
class RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.eps = eps
def forward(self, x):
variance = x.pow(2).mean(-1, keepdim=True)
x = x * torch.rsqrt(variance + self.eps)
return self.weight * x
class RotaryEmbedding(nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000):
super().__init__()
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float() / self.dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, x, seq_len):
t = torch.arange(seq_len, device=self.inv_freq.device).type_as(self.inv_freq)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos()[None, None, :, :]
sin = emb.sin()[None, None, :, :]
return cos, sin
def rotate_half(x):
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin):
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class GPT2Attention(nn.Module):
def __init__(self, config: GPT2Config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_heads = config.num_heads
self.head_dim = self.hidden_size // self.num_heads
self.scale = self.head_dim ** -0.5
assert self.hidden_size % self.num_heads == 0, "hidden_size must be divisible by num_heads"
self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.use_bias)
self.k_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.use_bias)
self.v_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.use_bias)
self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.use_bias)
self.rotary_emb = RotaryEmbedding(self.head_dim, max_position_embeddings=config.max_position_embeddings)
self.dropout = nn.Dropout(config.dropout)
def forward(self, hidden_states, attention_mask=None):
batch_size, seq_len, _ = hidden_states.shape
q = self.q_proj(hidden_states)
k = self.k_proj(hidden_states)
v = self.v_proj(hidden_states)
q = q.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
k = k.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
v = v.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
cos, sin = self.rotary_emb(None, seq_len)
q, k = apply_rotary_pos_emb(q, k, cos, sin)
attn_weights = torch.matmul(q, k.transpose(-2, -1)) * self.scale
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = self.dropout(attn_weights)
attn_output = torch.matmul(attn_weights, v)
attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, seq_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
attn_output = self.dropout(attn_output)
return attn_output
class GPT2FFN(nn.Module):
def __init__(self, config: GPT2Config):
super().__init__()
self.gate_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=config.use_bias)
self.up_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=config.use_bias)
self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.use_bias)
self.dropout = nn.Dropout(config.dropout)
self.act = nn.SiLU()
def forward(self, x):
gate = self.act(self.gate_proj(x))
up = self.up_proj(x)
feed_forward_hidden = gate * up
feed_forward_hidden = self.dropout(feed_forward_hidden)
output = self.down_proj(feed_forward_hidden)
return output
class GPT2Block(nn.Module):
def __init__(self, config: GPT2Config):
super().__init__()
self.attn_norm = RMSNorm(config.hidden_size, config.rms_norm_eps)
self.ffn_norm = RMSNorm(config.hidden_size, config.rms_norm_eps)
self.attention = GPT2Attention(config)
self.ffn = GPT2FFN(config)
self.dropout = nn.Dropout(config.dropout)
def forward(self, hidden_states, attention_mask=None):
residual = hidden_states
hidden_states = self.attn_norm(hidden_states)
attn_output = self.attention(hidden_states, attention_mask)
attn_output = self.dropout(attn_output)
hidden_states = residual + attn_output
residual = hidden_states
hidden_states = self.ffn_norm(hidden_states)
ffn_output = self.ffn(hidden_states)
ffn_output = self.dropout(ffn_output)
hidden_states = residual + ffn_output
return hidden_states
class GPT2Model(nn.Module):
def __init__(self, config: GPT2Config):
super().__init__()
self.config = config
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
self.layers = nn.ModuleList([GPT2Block(config) for _ in range(config.num_layers)])
self.norm = RMSNorm(config.hidden_size, config.rms_norm_eps)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.apply(self._init_weights)
self.lm_head.weight = self.embed_tokens.weight
def _init_weights(self, module):
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
def forward(self, input_ids, attention_mask=None):
batch_size, seq_len = input_ids.shape
hidden_states = self.embed_tokens(input_ids)
if attention_mask is None:
causal_mask = torch.full((seq_len, seq_len), float('-inf'), device=input_ids.device)
causal_mask = torch.triu(causal_mask, diagonal=1)
attention_mask = causal_mask[None, None, :, :]
for layer in self.layers:
hidden_states = layer(hidden_states, attention_mask)
hidden_states = self.norm(hidden_states)
logits = self.lm_head(hidden_states)
return logits