Spaces:
Running
Running
| """Small but modern decoder-only transformer (~50M params). | |
| Uses RoPE, RMSNorm, SwiGLU FFN, tied embeddings, and PyTorch SDPA | |
| for causal attention (which lights up MPS fast-paths where available). | |
| """ | |
| import math | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from config import ModelConfig | |
| def precompute_rope(head_dim: int, seq_len: int, theta: float = 10000.0, device=None): | |
| inv_freq = 1.0 / (theta ** (torch.arange(0, head_dim, 2, device=device).float() / head_dim)) | |
| t = torch.arange(seq_len, device=device).float() | |
| freqs = torch.outer(t, inv_freq) # (T, head_dim/2) | |
| return freqs.cos(), freqs.sin() | |
| def apply_rope(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: | |
| # x: (B, H, T, D); cos/sin: (T, D/2) | |
| x1, x2 = x.chunk(2, dim=-1) | |
| cos = cos[None, None, :, :] | |
| sin = sin[None, None, :, :] | |
| return torch.cat([x1 * cos - x2 * sin, x1 * sin + x2 * cos], dim=-1) | |
| class RMSNorm(nn.Module): | |
| def __init__(self, d: int, eps: float = 1e-5): | |
| super().__init__() | |
| self.weight = nn.Parameter(torch.ones(d)) | |
| self.eps = eps | |
| def forward(self, x: torch.Tensor) -> torch.Tensor: | |
| # Always compute the norm in fp32 for stability, then cast back. | |
| dtype = x.dtype | |
| x32 = x.float() | |
| norm = torch.rsqrt(x32.pow(2).mean(-1, keepdim=True) + self.eps) | |
| return (x32 * norm).to(dtype) * self.weight | |
| class Attention(nn.Module): | |
| def __init__(self, cfg: ModelConfig): | |
| super().__init__() | |
| assert cfg.d_model % cfg.n_heads == 0 | |
| self.n_heads = cfg.n_heads | |
| self.head_dim = cfg.d_model // cfg.n_heads | |
| self.qkv = nn.Linear(cfg.d_model, 3 * cfg.d_model, bias=False) | |
| self.o = nn.Linear(cfg.d_model, cfg.d_model, bias=False) | |
| self.dropout = cfg.dropout | |
| def forward(self, x, cos, sin): | |
| B, T, C = x.shape | |
| q, k, v = self.qkv(x).chunk(3, dim=-1) | |
| q = q.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) | |
| k = k.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) | |
| v = v.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) | |
| q = apply_rope(q, cos[:T], sin[:T]) | |
| k = apply_rope(k, cos[:T], sin[:T]) | |
| y = F.scaled_dot_product_attention( | |
| q, k, v, | |
| is_causal=True, | |
| dropout_p=self.dropout if self.training else 0.0, | |
| ) | |
| y = y.transpose(1, 2).contiguous().view(B, T, C) | |
| return self.o(y) | |
| class SwiGLU(nn.Module): | |
| def __init__(self, cfg: ModelConfig): | |
| super().__init__() | |
| self.w1 = nn.Linear(cfg.d_model, cfg.d_ff, bias=False) # gate | |
| self.w2 = nn.Linear(cfg.d_ff, cfg.d_model, bias=False) # down | |
| self.w3 = nn.Linear(cfg.d_model, cfg.d_ff, bias=False) # up | |
| def forward(self, x): | |
| return self.w2(F.silu(self.w1(x)) * self.w3(x)) | |
| class Block(nn.Module): | |
| def __init__(self, cfg: ModelConfig): | |
| super().__init__() | |
| self.attn_norm = RMSNorm(cfg.d_model, cfg.norm_eps) | |
| self.attn = Attention(cfg) | |
| self.ffn_norm = RMSNorm(cfg.d_model, cfg.norm_eps) | |
| self.ffn = SwiGLU(cfg) | |
| def forward(self, x, cos, sin): | |
| x = x + self.attn(self.attn_norm(x), cos, sin) | |
| x = x + self.ffn(self.ffn_norm(x)) | |
| return x | |
| class IntelliteGPT(nn.Module): | |
| def __init__(self, cfg: ModelConfig): | |
| super().__init__() | |
| self.cfg = cfg | |
| self.tok_emb = nn.Embedding(cfg.vocab_size, cfg.d_model) | |
| self.blocks = nn.ModuleList([Block(cfg) for _ in range(cfg.n_layers)]) | |
| self.norm = RMSNorm(cfg.d_model, cfg.norm_eps) | |
| self.lm_head = nn.Linear(cfg.d_model, cfg.vocab_size, bias=False) | |
| if cfg.tie_embeddings: | |
| self.lm_head.weight = self.tok_emb.weight | |
| cos, sin = precompute_rope(cfg.d_model // cfg.n_heads, cfg.seq_len, cfg.rope_theta) | |
| self.register_buffer("cos", cos, persistent=False) | |
| self.register_buffer("sin", sin, persistent=False) | |
| self.apply(self._init_weights) | |
| # GPT-2 style: scale residual projections by 1/sqrt(2*n_layers) | |
| scale = 0.02 / math.sqrt(2 * cfg.n_layers) | |
| for n, p in self.named_parameters(): | |
| if n.endswith("attn.o.weight") or n.endswith("ffn.w2.weight"): | |
| nn.init.normal_(p, mean=0.0, std=scale) | |
| def _init_weights(m): | |
| if isinstance(m, nn.Linear): | |
| nn.init.normal_(m.weight, mean=0.0, std=0.02) | |
| if m.bias is not None: | |
| nn.init.zeros_(m.bias) | |
| elif isinstance(m, nn.Embedding): | |
| nn.init.normal_(m.weight, mean=0.0, std=0.02) | |
| def num_params(self, exclude_embedding: bool = False) -> int: | |
| n = sum(p.numel() for p in self.parameters()) | |
| if exclude_embedding: | |
| n -= self.tok_emb.weight.numel() | |
| return n | |
| def forward(self, idx: torch.Tensor, targets: torch.Tensor | None = None): | |
| B, T = idx.shape | |
| x = self.tok_emb(idx) | |
| cos, sin = self.cos, self.sin | |
| for block in self.blocks: | |
| x = block(x, cos, sin) | |
| x = self.norm(x) | |
| logits = self.lm_head(x) | |
| loss = None | |
| if targets is not None: | |
| loss = F.cross_entropy( | |
| logits.view(-1, logits.size(-1)).float(), | |
| targets.view(-1), | |
| ignore_index=-1, | |
| ) | |
| return logits, loss | |
| def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None): | |
| for _ in range(max_new_tokens): | |
| idx_cond = idx[:, -self.cfg.seq_len:] | |
| logits, _ = self(idx_cond) | |
| logits = logits[:, -1, :] / max(temperature, 1e-5) | |
| if top_k is not None: | |
| v, _ = torch.topk(logits, min(top_k, logits.size(-1))) | |
| logits[logits < v[:, [-1]]] = -float("inf") | |
| probs = F.softmax(logits, dim=-1) | |
| next_tok = torch.multinomial(probs, num_samples=1) | |
| idx = torch.cat([idx, next_tok], dim=1) | |
| return idx | |