import torch import torch.nn as nn import torch.nn.functional as F from dataclasses import dataclass import math @dataclass class Config: vocab_size: int = 50257 block_size: int = 512 n_layer: int = 6 n_head: int = 8 n_embd: int = 384 class RMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = nn.Parameter(torch.ones(dim)) def forward(self, x): return x * self.scale / (x.pow(2).mean(-1, keepdim=True) + 1e-6).sqrt() def apply_rotary_emb(q, k, cos, sin): head_dim = q.shape[-1] q_real, q_imag = q[..., :head_dim//2], q[..., head_dim//2:] k_real, k_imag = k[..., :head_dim//2], k[..., head_dim//2:] q_rot = torch.cat((q_real * cos - q_imag * sin, q_real * sin + q_imag * cos), dim=-1) k_rot = torch.cat((k_real * cos - k_imag * sin, k_real * sin + k_imag * cos), dim=-1) return q_rot, k_rot class MiniGPTBlock(nn.Module): def __init__(self, config): super().__init__() self.n_head = config.n_head self.n_embd = config.n_embd head_size = self.n_embd // self.n_head self.ln_1 = RMSNorm(config.n_embd) self.ln_2 = RMSNorm(config.n_embd) self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False) self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False) hidden_dim = 8 * config.n_embd // 3 self.mlp = nn.ModuleDict({ 'c_fc1': nn.Linear(config.n_embd, hidden_dim, bias=False), 'c_fc2': nn.Linear(config.n_embd, hidden_dim, bias=False), 'c_proj': nn.Linear(hidden_dim, config.n_embd, bias=False), }) def forward(self, x, cos, sin): x = x + self._attn_block(self.ln_1(x), cos, sin) x = x + self._mlp_block(self.ln_2(x)) return x def _attn_block(self, x, cos, sin): B, T, C = x.size() q, k, v = self.c_attn(x).split(self.n_embd, dim=2) q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) q, k = apply_rotary_emb(q, k, cos, sin) y = F.scaled_dot_product_attention(q, k, v, is_causal=True) y = y.transpose(1, 2).contiguous().view(B, T, C) return self.c_proj(y) def _mlp_block(self, x): gate = F.silu(self.mlp.c_fc1(x)) val = self.mlp.c_fc2(x) return self.mlp.c_proj(gate * val) class MiniGPT(nn.Module): def __init__(self, config): super().__init__() self.config = config self.transformer = nn.ModuleDict({ 'wte': nn.Embedding(config.vocab_size, config.n_embd), 'h': nn.ModuleList([MiniGPTBlock(config) for _ in range(config.n_layer)]), 'ln_f': RMSNorm(config.n_embd), }) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.lm_head.weight = self.transformer.wte.weight dim = config.n_embd // config.n_head max_len = config.block_size * 2 freqs = 1.0 / (10000.0 ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim)) t = torch.arange(max_len, dtype=torch.float32) freqs = torch.outer(t, freqs).float() self.register_buffer("freqs_cos", freqs.cos().unsqueeze(0).unsqueeze(0)) self.register_buffer("freqs_sin", freqs.sin().unsqueeze(0).unsqueeze(0)) self.apply(self._init_weights) def _init_weights(self, module): if isinstance(module, nn.Linear): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) if module.bias is not None: torch.nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) def forward(self, idx, targets=None): device = idx.device b, t = idx.size() assert t <= self.config.block_size tok_emb = self.transformer.wte(idx) pos = torch.arange(0, t, dtype=torch.long, device=device) cos = self.freqs_cos[:, :, :t, :] sin = self.freqs_sin[:, :, :t, :] x = tok_emb for block in self.transformer.h: x = block(x, cos, sin) x = self.transformer.ln_f(x) logits = self.lm_head(x) loss = None if targets is not None: loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) return logits, loss