Spaces:
Sleeping
Sleeping
| # beeper.py | |
| # -------------------------------------------------------------------------------------------------- | |
| # Beeper Full Penta Controller — Rose-based tiny GPT (inference module with runtime pentachora influence) | |
| # - Decoder-only GPT with SDPA (FlashAttention path on Ampere/Hopper) | |
| # - Runtime "vertex pull" uses config["runtime_pentachora"] to bias hidden states toward | |
| # pentachora vertices (coarse/topic/mood) exactly like training-time behavior, but non-destructive | |
| # and fully toggleable. | |
| # -------------------------------------------------------------------------------------------------- | |
| from __future__ import annotations | |
| import math | |
| import re | |
| import inspect | |
| from contextlib import nullcontext | |
| from typing import Optional, Tuple, Dict, Any | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| # --- Prefer high-throughput matmul where possible (Ampere/Hopper) --- | |
| torch.set_float32_matmul_precision("high") | |
| torch.backends.cuda.matmul.allow_tf32 = True | |
| torch.backends.cudnn.allow_tf32 = True | |
| # ---- Version-safe SDPA (FlashAttention) selection ------------------------------------------------- | |
| try: | |
| # PyTorch 2.3+ modern API | |
| from torch.nn.attention import sdpa_kernel as _sdpa_kernel_modern | |
| from torch.nn.attention import SDPBackend as _SDPBackend | |
| _SDPA_SIG = inspect.signature(_sdpa_kernel_modern) | |
| _sdpa_kernel = _sdpa_kernel_modern | |
| except Exception: | |
| try: | |
| # Legacy API | |
| from torch.backends.cuda import sdp_kernel as _sdpa_kernel_legacy | |
| _SDPA_SIG = inspect.signature(_sdpa_kernel_legacy) | |
| _SDPBackend = None | |
| _sdpa_kernel = _sdpa_kernel_legacy | |
| except Exception: | |
| _SDPA_SIG = None | |
| _SDPBackend = None | |
| _sdpa_kernel = None | |
| def sdpa_ctx_prefer_flash(): | |
| """Bias SDPA toward FlashAttention where possible; otherwise no-op.""" | |
| if _sdpa_kernel is None or _SDPA_SIG is None: | |
| return nullcontext() | |
| params = {p.name for p in _SDPA_SIG.parameters.values()} | |
| try: | |
| if "backends" in params and _SDPBackend is not None: | |
| return _sdpa_kernel(backends=[ | |
| _SDPBackend.FLASH_ATTENTION, | |
| _SDPBackend.EFFICIENT_ATTENTION, | |
| _SDPBackend.MATH | |
| ]) | |
| if "backend" in params and _SDPBackend is not None: | |
| return _sdpa_kernel(backend=_SDPBackend.FLASH_ATTENTION) | |
| if {"enable_flash", "enable_math", "enable_mem_efficient"} <= params: | |
| return _sdpa_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=True) | |
| if {"use_flash", "use_math", "use_mem_efficient"} <= params: | |
| return _sdpa_kernel(use_flash=True, use_math=False, use_mem_efficient=True) | |
| except Exception: | |
| pass | |
| return nullcontext() | |
| # --------------------------------- Core blocks ------------------------------------------------------ | |
| class CausalSelfAttention(nn.Module): | |
| """Multi-head causal self-attention using PyTorch SDPA.""" | |
| def __init__(self, dim: int, n_heads: int, attn_dropout: float = 0.0): | |
| super().__init__() | |
| assert dim % n_heads == 0, "dim must be divisible by n_heads" | |
| self.nh = int(n_heads) | |
| self.hd = dim // self.nh | |
| self.qkv = nn.Linear(dim, 3 * dim, bias=False) | |
| self.proj = nn.Linear(dim, dim, bias=False) | |
| self.attn_dropout = float(attn_dropout) | |
| def forward(self, x: torch.Tensor) -> torch.Tensor: | |
| B, T, C = x.shape | |
| qkv = self.qkv(x) | |
| q, k, v = qkv.chunk(3, dim=-1) | |
| q = q.view(B, T, self.nh, self.hd).transpose(1, 2) # [B,H,T,D] | |
| k = k.view(B, T, self.nh, self.hd).transpose(1, 2) | |
| v = v.view(B, T, self.nh, self.hd).transpose(1, 2) | |
| if x.is_cuda: | |
| with sdpa_ctx_prefer_flash(): | |
| y = F.scaled_dot_product_attention( | |
| q, k, v, | |
| is_causal=True, | |
| dropout_p=self.attn_dropout if self.training else 0.0, | |
| ) | |
| else: | |
| scale = 1.0 / math.sqrt(self.hd) | |
| att = (q @ k.transpose(-2, -1)) * scale | |
| mask = torch.full((1, 1, T, T), float("-inf"), device=x.device) | |
| mask = torch.triu(mask, diagonal=1) | |
| att = (att + mask).softmax(dim=-1) | |
| y = att @ v | |
| y = y.transpose(1, 2).contiguous().view(B, T, C) | |
| return self.proj(y) | |
| class MLP(nn.Module): | |
| """GELU MLP with dropout, sized by mlp_ratio.""" | |
| def __init__(self, dim: int, mlp_ratio: float = 4.0, dropout: float = 0.1): | |
| super().__init__() | |
| hidden = int(dim * mlp_ratio) | |
| self.fc1 = nn.Linear(dim, hidden) | |
| self.fc2 = nn.Linear(hidden, dim) | |
| self.drop = nn.Dropout(dropout) | |
| def forward(self, x: torch.Tensor) -> torch.Tensor: | |
| x = self.fc1(x) | |
| x = F.gelu(x, approximate="tanh") | |
| x = self.drop(x) | |
| x = self.fc2(x) | |
| x = self.drop(x) | |
| return x | |
| # --------------------------------- Beeper Model ----------------------------------------------------- | |
| class BeeperRoseGPT(nn.Module): | |
| """ | |
| Decoder-only GPT used by Beeper during training and inference. | |
| Config keys used: | |
| - vocab_size, dim, context, n_heads, n_layers, mlp_ratio | |
| - resid_dropout, dropout, grad_checkpoint | |
| - runtime_pentachora: { | |
| "enable": bool, | |
| "pool": "mean" | "last", | |
| "temp": float, # similarity temperature (default: 0.10) | |
| "coarse_alpha": float, # hidden blend strength for coarse bank | |
| "topic_alpha": float, # hidden blend strength for topic bank | |
| "mood_alpha": float # hidden blend strength for mood bank | |
| } | |
| Notes: | |
| - Shares token embedding with LM head (tied weights). | |
| - Includes Rose anchors and pentachora banks; at runtime we can apply a *non-destructive* | |
| vertex pull to hidden states before the LM head using the above config. | |
| """ | |
| def __init__(self, cfg: dict): | |
| super().__init__() | |
| V, D, Ctx = cfg["vocab_size"], cfg["dim"], cfg["context"] | |
| H, L, MR = cfg["n_heads"], cfg["n_layers"], cfg["mlp_ratio"] | |
| RD, AD = cfg.get("resid_dropout", 0.1), cfg.get("dropout", 0.0) | |
| self.grad_checkpoint = bool(cfg.get("grad_checkpoint", False)) | |
| self.runtime_cfg: Dict[str, Any] = dict(cfg.get("runtime_pentachora", {}) or {}) | |
| self.vocab_size, self.context = int(V), int(Ctx) | |
| self.token_emb = nn.Embedding(V, D) | |
| self.pos_emb = nn.Parameter(torch.zeros(1, Ctx, D)) | |
| self.drop = nn.Dropout(RD) | |
| self.blocks = nn.ModuleList([ | |
| nn.ModuleDict({ | |
| "norm1": nn.LayerNorm(D), | |
| "attn": CausalSelfAttention(D, H, attn_dropout=AD), | |
| "norm2": nn.LayerNorm(D), | |
| "mlp": MLP(D, mlp_ratio=MR, dropout=RD), | |
| }) | |
| for _ in range(L) | |
| ]) | |
| self.norm = nn.LayerNorm(D) | |
| self.lm_head = nn.Linear(D, V, bias=False) | |
| self.lm_head.weight = self.token_emb.weight # weight tying | |
| # Rose projection + anchors (present in checkpoints) | |
| self.rose_proj = nn.Linear(D, D, bias=False) | |
| self.rose_anchors = nn.Parameter(torch.randn(3, D) / (D ** 0.5)) | |
| # Pentachora banks (created lazily to match state dict) | |
| self.register_buffer("pent_inited", torch.tensor(0, dtype=torch.uint8), persistent=False) | |
| self.penta_coarse: Optional[nn.Parameter] = None # [C,5,D] | |
| self.penta_medium: Optional[nn.Parameter] = None # [T,5,D] | |
| self.penta_fine: Optional[nn.Parameter] = None # [M,5,D] | |
| self.apply(self._init_weights) | |
| def _init_weights(m: nn.Module): | |
| if isinstance(m, nn.Linear): | |
| nn.init.normal_(m.weight, mean=0.0, std=0.02) | |
| if m.bias is not None: | |
| nn.init.zeros_(m.bias) | |
| elif isinstance(m, nn.Embedding): | |
| nn.init.normal_(m.weight, mean=0.0, std=0.02) | |
| # ---- Pentachora creation (must match sizes in checkpoint before strict load) ------------------- | |
| def ensure_pentachora(self, coarse_C: int, medium_C: int, fine_C: int, dim: int, device: torch.device): | |
| """Initialize pentachora banks if not already present.""" | |
| if self.pent_inited.item() == 1: | |
| return | |
| def bank(C: int) -> nn.Parameter: | |
| if C <= 0: | |
| return nn.Parameter(torch.zeros((0, 5, dim), device=device)) | |
| pts = torch.randn(C, 5, dim, device=device) | |
| pts = F.normalize(pts - pts.mean(dim=1, keepdim=True), dim=-1) | |
| return nn.Parameter(pts) | |
| self.penta_coarse = bank(int(coarse_C)) | |
| self.penta_medium = bank(int(medium_C)) | |
| self.penta_fine = bank(int(fine_C)) | |
| self.pent_inited.fill_(1) | |
| # ---- Runtime configuration helpers ------------------------------------------------------------- | |
| def set_runtime_pentachora(self, cfg: Dict[str, Any]) -> None: | |
| """Update runtime pentachora behavior (enable/alphas/temp/pool).""" | |
| self.runtime_cfg.update(cfg or {}) | |
| def _pool_hidden(self, h: torch.Tensor, mode: str) -> torch.Tensor: | |
| return h.mean(dim=1) if mode == "mean" else h[:, -1, :] | |
| def _weighted_nearest_vertex_target( | |
| pooled: torch.Tensor, # [B,D] | |
| bank: torch.Tensor, # [C,5,D] | |
| temp: float | |
| ) -> torch.Tensor: | |
| """ | |
| For each class (simplex) pick its nearest vertex to the pooled latent, | |
| then compute a softmax over classes of -min_dists/temp and take the | |
| weighted average of those nearest vertices => [B,D] target. | |
| """ | |
| B, D = pooled.shape | |
| C = bank.size(0) | |
| if C == 0: | |
| return pooled | |
| # distances to each vertex | |
| diffs = pooled[:, None, None, :] - bank[None, :, :, :] # [B,C,5,D] | |
| dists = torch.norm(diffs, dim=-1) # [B,C,5] | |
| min_dists, min_idx = dists.min(dim=2) # [B,C], [B,C] | |
| sims = -min_dists / max(1e-8, float(temp)) # [B,C] | |
| weights = F.softmax(sims, dim=-1) # [B,C] | |
| # gather nearest vertex vectors: [B,C,D] | |
| bank_exp = bank.unsqueeze(0).expand(B, -1, -1, -1) # [B,C,5,D] | |
| gather_idx = min_idx.unsqueeze(-1).unsqueeze(-1).expand(B, C, 1, D) | |
| nearest = torch.gather(bank_exp, 2, gather_idx).squeeze(2) # [B,C,D] | |
| target = (weights.unsqueeze(-1) * nearest).sum(dim=1) # [B,D] | |
| return target | |
| def _apply_runtime_vertex_pull( | |
| self, | |
| h: torch.Tensor, # [B,T,D] | |
| runtime_cfg: Dict[str, Any] | |
| ) -> torch.Tensor: | |
| """ | |
| Apply non-destructive vertex pull to hidden states using banks selected by runtime_cfg. | |
| We compute a pooled latent, a per-bank target vector, form a delta, and blend it back into h. | |
| """ | |
| if not runtime_cfg or not runtime_cfg.get("enable", False): | |
| return h | |
| pool_mode = str(runtime_cfg.get("pool", "mean")) | |
| temp = float(runtime_cfg.get("temp", 0.10)) | |
| # Strengths per bank | |
| alpha_coarse = float(runtime_cfg.get("coarse_alpha", 0.0)) | |
| alpha_topic = float(runtime_cfg.get("topic_alpha", 0.0)) | |
| alpha_mood = float(runtime_cfg.get("mood_alpha", 0.0)) | |
| if (alpha_coarse <= 0 and alpha_topic <= 0 and alpha_mood <= 0): | |
| return h | |
| pooled = self._pool_hidden(h, pool_mode) # [B,D] | |
| total_delta = None | |
| if alpha_coarse > 0 and getattr(self, "penta_coarse", None) is not None: | |
| tgt = self._weighted_nearest_vertex_target(pooled, self.penta_coarse, temp) | |
| delta = tgt - pooled | |
| total_delta = (alpha_coarse * delta) if total_delta is None else total_delta + alpha_coarse * delta | |
| if alpha_topic > 0 and getattr(self, "penta_medium", None) is not None: | |
| tgt = self._weighted_nearest_vertex_target(pooled, self.penta_medium, temp) | |
| delta = tgt - pooled | |
| total_delta = delta * alpha_topic if total_delta is None else total_delta + alpha_topic * delta | |
| if alpha_mood > 0 and getattr(self, "penta_fine", None) is not None: | |
| tgt = self._weighted_nearest_vertex_target(pooled, self.penta_fine, temp) | |
| delta = tgt - pooled | |
| total_delta = delta * alpha_mood if total_delta is None else total_delta + alpha_mood * delta | |
| if total_delta is None: | |
| return h | |
| # Broadcast same delta to all time steps (global conditioning shift) | |
| h = h + total_delta.unsqueeze(1) # [B,T,D] | |
| return h | |
| # ---- Backbone / forward ----------------------------------------------------------------------- | |
| def _block_forward(self, blk: nn.ModuleDict, x: torch.Tensor) -> torch.Tensor: | |
| x = x + blk["attn"](blk["norm1"](x)) | |
| x = x + blk["mlp"](blk["norm2"](x)) | |
| return x | |
| def backbone(self, idx: torch.Tensor) -> torch.Tensor: | |
| B, T = idx.shape | |
| x = self.token_emb(idx) + self.pos_emb[:, :T, :] | |
| x = self.drop(x) | |
| if self.grad_checkpoint and self.training: | |
| from torch.utils.checkpoint import checkpoint | |
| for blk in self.blocks: | |
| x = checkpoint(lambda _x: self._block_forward(blk, _x), x) # type: ignore[arg-type] | |
| else: | |
| for blk in self.blocks: | |
| x = self._block_forward(blk, x) | |
| return self.norm(x) | |
| def forward(self, idx: torch.Tensor, runtime_cfg: Optional[Dict[str, Any]] = None) -> torch.Tensor: | |
| """ | |
| Forward pass with optional runtime pentachora influence. | |
| If runtime_cfg is None, falls back to self.runtime_cfg set at init or via set_runtime_pentachora(). | |
| """ | |
| h = self.backbone(idx) | |
| cfg = self.runtime_cfg if runtime_cfg is None else {**self.runtime_cfg, **(runtime_cfg or {})} | |
| h = self._apply_runtime_vertex_pull(h, cfg) | |
| return self.lm_head(h) | |
| # ---- Utilities --------------------------------------------------------------------------------- | |
| def hidden_states(self, idx: torch.Tensor) -> torch.Tensor: | |
| """Return final hidden states (pre-LM head).""" | |
| return self.backbone(idx) | |
| def rose_hidden_pool(self, h: torch.Tensor, mode: str = "mean") -> torch.Tensor: | |
| """Pool hidden states for Rose-related terms.""" | |
| return h.mean(dim=1) if mode == "mean" else h[:, -1, :] | |
| # --------------------------------- Loader helpers --------------------------------------------------- | |
| def prepare_model_for_state_dict( | |
| model: BeeperRoseGPT, | |
| state_dict: "dict[str, torch.Tensor]", | |
| device: Optional[torch.device] = None, | |
| ) -> None: | |
| """ | |
| Ensure model has pentachora parameters sized to match the incoming state_dict, | |
| so we can load with strict=True. No-op if checkpoint lacks penta_* keys. | |
| """ | |
| device = device or next(model.parameters()).device | |
| need = all(k in state_dict for k in ("penta_coarse", "penta_medium", "penta_fine")) | |
| if not need: | |
| return | |
| pc, pt, pm = state_dict["penta_coarse"], state_dict["penta_medium"], state_dict["penta_fine"] | |
| def dims_ok(t: torch.Tensor, D: int) -> bool: | |
| return t.ndim == 3 and t.size(1) == 5 and t.size(2) == D | |
| D = model.token_emb.embedding_dim | |
| if not (dims_ok(pc, D) and dims_ok(pt, D) and dims_ok(pm, D)): | |
| return | |
| model.ensure_pentachora(pc.size(0), pt.size(0), pm.size(0), dim=D, device=device) | |
| # --------------------------------- Generation ------------------------------------------------------- | |
| def _detok(text: str) -> str: | |
| text = re.sub(r"\s+([,.;:!?%])", r"\1", text) | |
| text = re.sub(r"\s+([\)\]\}])", r"\1", text) | |
| text = re.sub(r"([\(\[\{])\s+", r"\1", text) | |
| return text | |
| def generate( | |
| model: BeeperRoseGPT, | |
| tok, # Hugging Face Tokenizers `Tokenizer` | |
| cfg: dict, | |
| prompt: str, | |
| max_new_tokens: int = 120, | |
| temperature: Optional[float] = None, | |
| top_k: Optional[int] = None, | |
| top_p: Optional[float] = None, | |
| repetition_penalty: Optional[float] = None, | |
| presence_penalty: Optional[float] = None, | |
| frequency_penalty: Optional[float] = None, | |
| device: Optional[torch.device] = None, | |
| detokenize: bool = True, | |
| runtime_cfg: Optional[Dict[str, Any]] = None, # <— NEW: pass-through to forward() | |
| ) -> str: | |
| """ | |
| Penalized nucleus sampling with optional runtime pentachora influence. | |
| """ | |
| temperature = cfg.get("temperature", 0.9) if temperature is None else float(temperature) | |
| top_k = cfg.get("top_k", 40) if top_k is None else int(top_k) | |
| top_p = cfg.get("top_p", 0.9) if top_p is None else float(top_p) | |
| repetition_penalty = cfg.get("repetition_penalty", 1.10) if repetition_penalty is None else float(repetition_penalty) | |
| presence_penalty = cfg.get("presence_penalty", 0.6) if presence_penalty is None else float(presence_penalty) | |
| frequency_penalty = cfg.get("frequency_penalty", 0.0) if frequency_penalty is None else float(frequency_penalty) | |
| device = device or next(model.parameters()).device | |
| model.eval() | |
| ids = tok.encode(prompt).ids | |
| x = torch.tensor([ids], dtype=torch.long, device=device) | |
| V = int(cfg["vocab_size"]) | |
| counts = torch.zeros(V, dtype=torch.int32, device=device) | |
| for t in ids: | |
| if 0 <= t < V: | |
| counts[t] += 1 | |
| for _ in range(int(max_new_tokens)): | |
| logits = model(x[:, -cfg["context"]:], runtime_cfg=runtime_cfg) | |
| logits = logits[:, -1, :] | |
| # Repetition penalty | |
| if repetition_penalty and repetition_penalty != 1.0: | |
| mask = counts > 0 | |
| if mask.any(): | |
| pos = logits[:, mask] > 0 | |
| logits[:, mask][pos] /= repetition_penalty | |
| logits[:, mask][~pos] *= repetition_penalty | |
| # Presence/frequency penalties | |
| if presence_penalty or frequency_penalty: | |
| pen = counts.float() * (frequency_penalty or 0.0) + (counts > 0).float() * (presence_penalty or 0.0) | |
| logits = logits - pen.unsqueeze(0) | |
| logits = logits / max(1e-8, temperature) | |
| if top_k and top_k > 0: | |
| k = min(top_k, logits.size(-1)) | |
| v, ix = torch.topk(logits, k, dim=-1) | |
| filt = torch.full_like(logits, float("-inf")) | |
| logits = filt.scatter_(-1, ix, v) | |
| if top_p and top_p < 1.0: | |
| sl, si = torch.sort(logits, descending=True) | |
| ps = F.softmax(sl, dim=-1) | |
| cdf = torch.cumsum(ps, dim=-1) | |
| cutoff = (cdf > top_p).float().argmax(dim=-1) | |
| mask = torch.arange(logits.size(-1), device=device).unsqueeze(0) > cutoff.unsqueeze(-1) | |
| sl = sl.masked_fill(mask, float("-inf")) | |
| logits = torch.full_like(logits, float("-inf")).scatter(-1, si, sl) | |
| probs = F.softmax(logits, dim=-1) | |
| next_id = torch.multinomial(probs, num_samples=1) | |
| x = torch.cat([x, next_id], dim=1) | |
| nid = next_id.item() | |
| if 0 <= nid < V: | |
| counts[nid] += 1 | |
| out = tok.decode(x[0].tolist()) | |
| return _detok(out) if detokenize else out | |