Spaces:
Running on Zero
Running on Zero
Initial release: Gradio Space, weights pulled from ProCreations/intellite-500m-sft at startup
2d55544 verified | """Small but modern decoder-only transformer. | |
| Uses RoPE/NoPE hybrid attention, optional GQA, RMSNorm, SwiGLU FFN, | |
| tied embeddings, and PyTorch SDPA for causal attention. | |
| """ | |
| import math | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from config import ModelConfig | |
| def precompute_rope(head_dim: int, seq_len: int, theta: float = 10000.0, device=None): | |
| inv_freq = 1.0 / (theta ** (torch.arange(0, head_dim, 2, device=device).float() / head_dim)) | |
| t = torch.arange(seq_len, device=device).float() | |
| freqs = torch.outer(t, inv_freq) # (T, head_dim/2) | |
| return freqs.cos(), freqs.sin() | |
| def apply_rope(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: | |
| # x: (B, H, T, D); cos/sin: (T, D/2) | |
| x1, x2 = x.chunk(2, dim=-1) | |
| cos = cos[None, None, :, :] | |
| sin = sin[None, None, :, :] | |
| return torch.cat([x1 * cos - x2 * sin, x1 * sin + x2 * cos], dim=-1) | |
| class RMSNorm(nn.Module): | |
| def __init__(self, d: int, eps: float = 1e-5): | |
| super().__init__() | |
| self.weight = nn.Parameter(torch.ones(d)) | |
| self.eps = eps | |
| def forward(self, x: torch.Tensor) -> torch.Tensor: | |
| # Always compute the norm in fp32 for stability, then cast back. | |
| dtype = x.dtype | |
| x32 = x.float() | |
| norm = torch.rsqrt(x32.pow(2).mean(-1, keepdim=True) + self.eps) | |
| return (x32 * norm).to(dtype) * self.weight | |
| class Attention(nn.Module): | |
| def __init__(self, cfg: ModelConfig, layer_idx: int = 0): | |
| super().__init__() | |
| assert cfg.d_model % cfg.n_heads == 0 | |
| self.n_heads = cfg.n_heads | |
| self.head_dim = cfg.d_model // cfg.n_heads | |
| self.n_kv_heads = cfg.n_kv_heads or cfg.n_heads | |
| assert cfg.n_heads % self.n_kv_heads == 0, "n_heads must be divisible by n_kv_heads" | |
| self.kv_dim = self.n_kv_heads * self.head_dim | |
| self.use_gqa = self.n_kv_heads != self.n_heads | |
| if self.use_gqa: | |
| self.q = nn.Linear(cfg.d_model, cfg.d_model, bias=False) | |
| self.k = nn.Linear(cfg.d_model, self.kv_dim, bias=False) | |
| self.v = nn.Linear(cfg.d_model, self.kv_dim, bias=False) | |
| else: | |
| # Keep the legacy key name so old full-MHA checkpoints still load. | |
| self.qkv = nn.Linear(cfg.d_model, 3 * cfg.d_model, bias=False) | |
| self.o = nn.Linear(cfg.d_model, cfg.d_model, bias=False) | |
| self.dropout = cfg.dropout | |
| self.is_global = ( | |
| cfg.sliding_window is None | |
| or cfg.global_attn_every <= 1 | |
| or ((layer_idx + 1) % cfg.global_attn_every == 0) | |
| ) | |
| self.use_rope = not (cfg.nope_every and (layer_idx + 1) % cfg.nope_every == 0) | |
| # QK-Norm (OLMo-2 / Gemma-3 / SmolLM3). Per-head RMSNorm on Q and K | |
| # BEFORE RoPE — stops attn-logit drift that Muon's spectral updates | |
| # don't constrain. Adds only 2 × head_dim parameters per layer. | |
| if getattr(cfg, "qk_norm", False): | |
| self.q_norm = RMSNorm(self.head_dim, cfg.norm_eps) | |
| self.k_norm = RMSNorm(self.head_dim, cfg.norm_eps) | |
| else: | |
| self.q_norm = None | |
| self.k_norm = None | |
| def forward(self, x, cos, sin, local_mask=None): | |
| B, T, C = x.shape | |
| if self.use_gqa: | |
| q = self.q(x) | |
| k = self.k(x) | |
| v = self.v(x) | |
| q = q.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) | |
| k = k.view(B, T, self.n_kv_heads, self.head_dim).transpose(1, 2) | |
| v = v.view(B, T, self.n_kv_heads, self.head_dim).transpose(1, 2) | |
| else: | |
| q, k, v = self.qkv(x).chunk(3, dim=-1) | |
| q = q.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) | |
| k = k.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) | |
| v = v.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) | |
| if self.q_norm is not None: | |
| q = self.q_norm(q) | |
| k = self.k_norm(k) | |
| if self.use_rope: | |
| q = apply_rope(q, cos[:T], sin[:T]) | |
| k = apply_rope(k, cos[:T], sin[:T]) | |
| if self.use_gqa: | |
| repeat = self.n_heads // self.n_kv_heads | |
| k = k.repeat_interleave(repeat, dim=1) | |
| v = v.repeat_interleave(repeat, dim=1) | |
| attn_mask = None if self.is_global or local_mask is None else local_mask[:T, :T] | |
| y = F.scaled_dot_product_attention( | |
| q, k, v, | |
| attn_mask=attn_mask, | |
| is_causal=attn_mask is None, | |
| dropout_p=self.dropout if self.training else 0.0, | |
| ) | |
| y = y.transpose(1, 2).contiguous().view(B, T, C) | |
| return self.o(y) | |
| class SwiGLU(nn.Module): | |
| def __init__(self, cfg: ModelConfig): | |
| super().__init__() | |
| self.w1 = nn.Linear(cfg.d_model, cfg.d_ff, bias=False) # gate | |
| self.w2 = nn.Linear(cfg.d_ff, cfg.d_model, bias=False) # down | |
| self.w3 = nn.Linear(cfg.d_model, cfg.d_ff, bias=False) # up | |
| def forward(self, x): | |
| return self.w2(F.silu(self.w1(x)) * self.w3(x)) | |
| class Block(nn.Module): | |
| def __init__(self, cfg: ModelConfig, layer_idx: int = 0): | |
| super().__init__() | |
| self.norm_order = cfg.norm_order | |
| self.attn_norm = RMSNorm(cfg.d_model, cfg.norm_eps) | |
| self.attn = Attention(cfg, layer_idx) | |
| self.ffn_norm = RMSNorm(cfg.d_model, cfg.norm_eps) | |
| self.ffn = SwiGLU(cfg) | |
| def forward(self, x, cos, sin, local_mask=None): | |
| if self.norm_order == "post": | |
| x = x + self.attn_norm(self.attn(x, cos, sin, local_mask)) | |
| x = x + self.ffn_norm(self.ffn(x)) | |
| else: | |
| x = x + self.attn(self.attn_norm(x), cos, sin, local_mask) | |
| x = x + self.ffn(self.ffn_norm(x)) | |
| return x | |
| class IntelliteGPT(nn.Module): | |
| def __init__(self, cfg: ModelConfig): | |
| super().__init__() | |
| self.cfg = cfg | |
| self.tok_emb = nn.Embedding(cfg.vocab_size, cfg.d_model) | |
| self.blocks = nn.ModuleList([Block(cfg, i) for i in range(cfg.n_layers)]) | |
| self.norm = RMSNorm(cfg.d_model, cfg.norm_eps) | |
| self.lm_head = nn.Linear(cfg.d_model, cfg.vocab_size, bias=False) | |
| if cfg.tie_embeddings: | |
| self.lm_head.weight = self.tok_emb.weight | |
| cos, sin = precompute_rope(cfg.d_model // cfg.n_heads, cfg.seq_len, cfg.rope_theta) | |
| self.register_buffer("cos", cos, persistent=False) | |
| self.register_buffer("sin", sin, persistent=False) | |
| self._set_local_attention_mask(cfg.seq_len) | |
| self.apply(self._init_weights) | |
| # GPT-2 style: scale residual projections by 1/sqrt(2*n_layers) | |
| scale = 0.02 / math.sqrt(2 * cfg.n_layers) | |
| for n, p in self.named_parameters(): | |
| if n.endswith("attn.o.weight") or n.endswith("ffn.w2.weight"): | |
| nn.init.normal_(p, mean=0.0, std=scale) | |
| def _init_weights(m): | |
| if isinstance(m, nn.Linear): | |
| nn.init.normal_(m.weight, mean=0.0, std=0.02) | |
| if m.bias is not None: | |
| nn.init.zeros_(m.bias) | |
| elif isinstance(m, nn.Embedding): | |
| nn.init.normal_(m.weight, mean=0.0, std=0.02) | |
| def num_params(self, exclude_embedding: bool = False) -> int: | |
| n = sum(p.numel() for p in self.parameters()) | |
| if exclude_embedding: | |
| n -= self.tok_emb.weight.numel() | |
| return n | |
| def _set_local_attention_mask(self, seq_len: int): | |
| window = getattr(self.cfg, "sliding_window", None) | |
| if window is None: | |
| self.register_buffer("local_attn_mask", None, persistent=False) | |
| return | |
| mask = torch.ones(seq_len, seq_len, dtype=torch.bool).tril() | |
| mask = torch.triu(mask, diagonal=-(window - 1)) | |
| self.register_buffer("local_attn_mask", mask, persistent=False) | |
| def retune_rope(self, new_seq_len: int, rope_theta: float | None = None): | |
| """Recompute RoPE cos/sin buffers for a longer inference context. | |
| The model was trained with rope_theta wide enough (e.g. 500k) that | |
| positions up to ~3× the training length stay in-distribution without | |
| any scaling — just call this once after loading the checkpoint.""" | |
| head_dim = self.cfg.d_model // self.cfg.n_heads | |
| theta = rope_theta if rope_theta is not None else self.cfg.rope_theta | |
| device = self.cos.device | |
| cos, sin = precompute_rope(head_dim, new_seq_len, theta, device=device) | |
| self.register_buffer("cos", cos, persistent=False) | |
| self.register_buffer("sin", sin, persistent=False) | |
| self._set_local_attention_mask(new_seq_len) | |
| if self.local_attn_mask is not None: | |
| self.local_attn_mask = self.local_attn_mask.to(device=device) | |
| self.cfg.seq_len = new_seq_len | |
| return self | |
| def _loss_logits(self, logits: torch.Tensor) -> torch.Tensor: | |
| flat = logits.view(-1, logits.size(-1)) | |
| loss_dtype = getattr(self.cfg, "loss_dtype", "float32") | |
| if loss_dtype in (None, "native"): | |
| return flat | |
| if loss_dtype in ("bf16", "bfloat16"): | |
| return flat.bfloat16() | |
| if loss_dtype in ("fp32", "float32"): | |
| return flat.float() | |
| raise ValueError(f"unknown loss_dtype: {loss_dtype!r}") | |
| def forward(self, idx: torch.Tensor, targets: torch.Tensor | None = None): | |
| B, T = idx.shape | |
| x = self.tok_emb(idx) | |
| cos, sin = self.cos, self.sin | |
| local_mask = self.local_attn_mask | |
| for block in self.blocks: | |
| x = block(x, cos, sin, local_mask) | |
| x = self.norm(x) | |
| logits = self.lm_head(x) | |
| # Tanh logit soft-cap (Gemma-2/3, modded-nanogpt). Zero-parameter, | |
| # caps outputs in [-cap, +cap]; composes with z-loss below. | |
| cap = getattr(self.cfg, "logit_soft_cap", None) | |
| if cap: | |
| logits = cap * torch.tanh(logits / cap) | |
| loss = None | |
| if targets is not None: | |
| flat = self._loss_logits(logits) | |
| # Disable autocast here so H200 bf16 loss_dtype does not get | |
| # silently promoted back to a full fp32 logits tensor. | |
| with torch.autocast(device_type=flat.device.type, enabled=False): | |
| ce = F.cross_entropy(flat, targets.view(-1), ignore_index=-1) | |
| loss = ce | |
| # PaLM-style z-loss — penalizes drift of the log-partition function. | |
| # Prevents BF16 overflow at the LM head on long runs. | |
| z_coef = getattr(self.cfg, "z_loss_coef", 0.0) | |
| if z_coef: | |
| # Only average over supervised positions (targets != -1). | |
| supervised = (targets.view(-1) != -1) | |
| if supervised.any(): | |
| z = torch.logsumexp(flat[supervised], dim=-1).float() | |
| loss = loss + z_coef * (z ** 2).mean() | |
| return logits, loss | |
| def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None): | |
| for _ in range(max_new_tokens): | |
| idx_cond = idx[:, -self.cfg.seq_len:] | |
| logits, _ = self(idx_cond) | |
| logits = logits[:, -1, :] / max(temperature, 1e-5) | |
| if top_k is not None: | |
| v, _ = torch.topk(logits, min(top_k, logits.size(-1))) | |
| logits[logits < v[:, [-1]]] = -float("inf") | |
| probs = F.softmax(logits, dim=-1) | |
| next_tok = torch.multinomial(probs, num_samples=1) | |
| idx = torch.cat([idx, next_tok], dim=1) | |
| return idx | |