Spaces:
Sleeping
Sleeping
Update beeper_model.py
Browse files- beeper_model.py +60 -36
beeper_model.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
"""
|
| 2 |
-
Rose Beeper Model
|
| 3 |
Extracted classes and utilities for model inference
|
| 4 |
"""
|
| 5 |
|
|
@@ -10,15 +10,17 @@ import torch.nn as nn
|
|
| 10 |
import torch.nn.functional as F
|
| 11 |
from typing import Optional, Tuple, Dict, Any
|
| 12 |
from contextlib import nullcontext
|
| 13 |
-
import re
|
| 14 |
import inspect
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
-
#
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
torch.backends.cudnn.allow_tf32 = True
|
| 20 |
|
| 21 |
-
#
|
| 22 |
try:
|
| 23 |
from torch.nn.attention import sdpa_kernel as _sdpa_kernel_modern
|
| 24 |
from torch.nn.attention import SDPBackend as _SDPBackend
|
|
@@ -35,6 +37,7 @@ except Exception:
|
|
| 35 |
_SDPBackend = None
|
| 36 |
_sdpa_kernel = None
|
| 37 |
|
|
|
|
| 38 |
def sdpa_ctx_prefer_flash():
|
| 39 |
"""Bias SDPA toward FlashAttention when available; no-op if unknown."""
|
| 40 |
if _sdpa_kernel is None or _SDPA_SIG is None:
|
|
@@ -42,14 +45,17 @@ def sdpa_ctx_prefer_flash():
|
|
| 42 |
|
| 43 |
params = {p.name for p in _SDPA_SIG.parameters.values()}
|
| 44 |
try:
|
|
|
|
| 45 |
if "backends" in params and _SDPBackend is not None:
|
| 46 |
return _sdpa_kernel(backends=[
|
| 47 |
_SDPBackend.FLASH_ATTENTION,
|
| 48 |
_SDPBackend.EFFICIENT_ATTENTION,
|
| 49 |
_SDPBackend.MATH
|
| 50 |
])
|
|
|
|
| 51 |
if "backend" in params and _SDPBackend is not None:
|
| 52 |
return _sdpa_kernel(backend=_SDPBackend.FLASH_ATTENTION)
|
|
|
|
| 53 |
if {"enable_flash", "enable_math", "enable_mem_efficient"} <= params:
|
| 54 |
return _sdpa_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=True)
|
| 55 |
if {"use_flash", "use_math", "use_mem_efficient"} <= params:
|
|
@@ -58,7 +64,11 @@ def sdpa_ctx_prefer_flash():
|
|
| 58 |
pass
|
| 59 |
return nullcontext()
|
| 60 |
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
class CausalSelfAttention(nn.Module):
|
| 63 |
"""Multi-head causal self-attention with optional FlashAttention."""
|
| 64 |
|
|
@@ -97,8 +107,9 @@ class CausalSelfAttention(nn.Module):
|
|
| 97 |
y = y.transpose(1, 2).contiguous().view(B, T, C)
|
| 98 |
return self.proj(y)
|
| 99 |
|
|
|
|
| 100 |
class MLP(nn.Module):
|
| 101 |
-
"""Feed-forward
|
| 102 |
|
| 103 |
def __init__(self, dim, mlp_ratio=4.0, dropout=0.1):
|
| 104 |
super().__init__()
|
|
@@ -115,8 +126,9 @@ class MLP(nn.Module):
|
|
| 115 |
x = self.drop(x)
|
| 116 |
return x
|
| 117 |
|
|
|
|
| 118 |
class BeeperRoseGPT(nn.Module):
|
| 119 |
-
"""
|
| 120 |
|
| 121 |
def __init__(self, cfg: dict):
|
| 122 |
super().__init__()
|
|
@@ -141,7 +153,7 @@ class BeeperRoseGPT(nn.Module):
|
|
| 141 |
self.lm_head = nn.Linear(D, V, bias=False)
|
| 142 |
self.lm_head.weight = self.token_emb.weight
|
| 143 |
|
| 144 |
-
# Rose projection + anchors
|
| 145 |
self.rose_proj = nn.Linear(D, D, bias=False)
|
| 146 |
self.rose_anchors = nn.Parameter(torch.randn(3, D) / (D**0.5))
|
| 147 |
|
|
@@ -209,12 +221,17 @@ class BeeperRoseGPT(nn.Module):
|
|
| 209 |
def rose_hidden_pool(self, h: torch.Tensor, mode="mean"):
|
| 210 |
return h.mean(dim=1) if mode == "mean" else h[:, -1, :]
|
| 211 |
|
| 212 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 213 |
class BeeperIO:
|
| 214 |
-
"""Utilities for
|
| 215 |
|
| 216 |
@staticmethod
|
| 217 |
def clean_state(sd: dict):
|
|
|
|
| 218 |
out = {}
|
| 219 |
for k, v in sd.items():
|
| 220 |
if k.startswith("_orig_mod."):
|
|
@@ -226,31 +243,33 @@ class BeeperIO:
|
|
| 226 |
|
| 227 |
@staticmethod
|
| 228 |
def load_into_model(model: nn.Module, path: str, map_location="cpu", strict: bool = False):
|
| 229 |
-
"""Load weights from
|
| 230 |
ext = os.path.splitext(path)[1].lower()
|
| 231 |
-
|
| 232 |
if ext == ".safetensors":
|
| 233 |
-
from safetensors.torch import load_file as load_safetensors
|
| 234 |
sd = load_safetensors(path, device="cpu")
|
| 235 |
else:
|
| 236 |
raw = torch.load(path, map_location="cpu")
|
| 237 |
sd = raw["model"] if isinstance(raw, dict) and "model" in raw else raw
|
| 238 |
-
|
| 239 |
sd = BeeperIO.clean_state(sd)
|
| 240 |
result = model.load_state_dict(sd, strict=strict)
|
| 241 |
return result.missing_keys, result.unexpected_keys
|
| 242 |
|
| 243 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 244 |
def _detok(text: str) -> str:
|
| 245 |
-
"""Clean up
|
| 246 |
text = re.sub(r"\s+([,.;:!?%])", r"\1", text)
|
| 247 |
text = re.sub(r"\s+([\)\]\}])", r"\1", text)
|
| 248 |
text = re.sub(r"([\(\[\{])\s+", r"\1", text)
|
| 249 |
return text
|
| 250 |
|
|
|
|
| 251 |
@torch.no_grad()
|
| 252 |
def generate(model: BeeperRoseGPT,
|
| 253 |
-
tok:
|
| 254 |
cfg: dict,
|
| 255 |
prompt: str,
|
| 256 |
max_new_tokens: int = 120,
|
|
@@ -263,26 +282,27 @@ def generate(model: BeeperRoseGPT,
|
|
| 263 |
device: Optional[torch.device] = None,
|
| 264 |
detokenize: bool = True) -> str:
|
| 265 |
"""
|
| 266 |
-
Generate text from
|
| 267 |
|
| 268 |
Args:
|
| 269 |
model: The BeeperRoseGPT model
|
| 270 |
tok: Tokenizer instance
|
| 271 |
cfg: Configuration dictionary
|
| 272 |
-
prompt: Input prompt
|
| 273 |
-
max_new_tokens: Maximum tokens to generate
|
| 274 |
-
temperature: Sampling temperature
|
| 275 |
top_k: Top-k sampling parameter
|
| 276 |
top_p: Top-p (nucleus) sampling parameter
|
| 277 |
repetition_penalty: Penalty for repeated tokens
|
| 278 |
-
presence_penalty: Penalty for
|
| 279 |
frequency_penalty: Penalty based on token frequency
|
| 280 |
device: Device to run on
|
| 281 |
-
detokenize: Whether to clean up tokenization
|
| 282 |
|
| 283 |
Returns:
|
| 284 |
Generated text string
|
| 285 |
"""
|
|
|
|
| 286 |
# Use defaults from config if not specified
|
| 287 |
temperature = cfg["temperature"] if temperature is None else temperature
|
| 288 |
top_k = cfg["top_k"] if top_k is None else top_k
|
|
@@ -294,12 +314,12 @@ def generate(model: BeeperRoseGPT,
|
|
| 294 |
device = device or next(model.parameters()).device
|
| 295 |
model.eval()
|
| 296 |
|
| 297 |
-
#
|
| 298 |
ids = tok.encode(prompt).ids
|
| 299 |
x = torch.tensor([ids], dtype=torch.long, device=device)
|
| 300 |
-
counts = torch.zeros(cfg["vocab_size"], dtype=torch.int32, device=device)
|
| 301 |
|
| 302 |
-
# Track token
|
|
|
|
| 303 |
for t in ids:
|
| 304 |
if 0 <= t < cfg["vocab_size"]:
|
| 305 |
counts[t] += 1
|
|
@@ -323,17 +343,17 @@ def generate(model: BeeperRoseGPT,
|
|
| 323 |
pen = counts.float() * (frequency_penalty or 0.0) + (counts > 0).float() * (presence_penalty or 0.0)
|
| 324 |
logits = logits - pen.unsqueeze(0)
|
| 325 |
|
| 326 |
-
#
|
| 327 |
logits = logits / max(1e-8, temperature)
|
| 328 |
|
| 329 |
-
#
|
| 330 |
if top_k and top_k > 0:
|
| 331 |
k = min(top_k, logits.size(-1))
|
| 332 |
v, ix = torch.topk(logits, k, dim=-1)
|
| 333 |
filt = torch.full_like(logits, float("-inf"))
|
| 334 |
logits = filt.scatter_(-1, ix, v)
|
| 335 |
|
| 336 |
-
#
|
| 337 |
if top_p and top_p < 1.0:
|
| 338 |
sl, si = torch.sort(logits, descending=True)
|
| 339 |
ps = F.softmax(sl, dim=-1)
|
|
@@ -353,9 +373,13 @@ def generate(model: BeeperRoseGPT,
|
|
| 353 |
out = tok.decode(x[0].tolist())
|
| 354 |
return _detok(out) if detokenize else out
|
| 355 |
|
| 356 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 357 |
def get_default_config():
|
| 358 |
-
"""
|
| 359 |
return {
|
| 360 |
"name": "Rose-Beeper",
|
| 361 |
"context": 512,
|
|
@@ -368,7 +392,7 @@ def get_default_config():
|
|
| 368 |
"resid_dropout": 0.1,
|
| 369 |
"grad_checkpoint": False,
|
| 370 |
|
| 371 |
-
# Generation
|
| 372 |
"temperature": 0.9,
|
| 373 |
"top_k": 40,
|
| 374 |
"top_p": 0.9,
|
|
@@ -376,7 +400,7 @@ def get_default_config():
|
|
| 376 |
"presence_penalty": 0.6,
|
| 377 |
"frequency_penalty": 0.0,
|
| 378 |
|
| 379 |
-
# Capoera
|
| 380 |
"capoera": {
|
| 381 |
"enable": True,
|
| 382 |
"topic_bins": 512,
|
|
|
|
| 1 |
"""
|
| 2 |
+
Rose Beeper Model - Inference Components
|
| 3 |
Extracted classes and utilities for model inference
|
| 4 |
"""
|
| 5 |
|
|
|
|
| 10 |
import torch.nn.functional as F
|
| 11 |
from typing import Optional, Tuple, Dict, Any
|
| 12 |
from contextlib import nullcontext
|
|
|
|
| 13 |
import inspect
|
| 14 |
+
import re
|
| 15 |
+
from tokenizers import Tokenizer
|
| 16 |
+
from safetensors.torch import load_file as load_safetensors
|
| 17 |
+
|
| 18 |
|
| 19 |
+
# ============================================================================
|
| 20 |
+
# SDPA (Scaled Dot Product Attention) Configuration
|
| 21 |
+
# ============================================================================
|
|
|
|
| 22 |
|
| 23 |
+
# Version-safe SDPA context helper
|
| 24 |
try:
|
| 25 |
from torch.nn.attention import sdpa_kernel as _sdpa_kernel_modern
|
| 26 |
from torch.nn.attention import SDPBackend as _SDPBackend
|
|
|
|
| 37 |
_SDPBackend = None
|
| 38 |
_sdpa_kernel = None
|
| 39 |
|
| 40 |
+
|
| 41 |
def sdpa_ctx_prefer_flash():
|
| 42 |
"""Bias SDPA toward FlashAttention when available; no-op if unknown."""
|
| 43 |
if _sdpa_kernel is None or _SDPA_SIG is None:
|
|
|
|
| 45 |
|
| 46 |
params = {p.name for p in _SDPA_SIG.parameters.values()}
|
| 47 |
try:
|
| 48 |
+
# Modern API (PyTorch 2.3+): backends=[...]
|
| 49 |
if "backends" in params and _SDPBackend is not None:
|
| 50 |
return _sdpa_kernel(backends=[
|
| 51 |
_SDPBackend.FLASH_ATTENTION,
|
| 52 |
_SDPBackend.EFFICIENT_ATTENTION,
|
| 53 |
_SDPBackend.MATH
|
| 54 |
])
|
| 55 |
+
# Modern API (alt): backend=...
|
| 56 |
if "backend" in params and _SDPBackend is not None:
|
| 57 |
return _sdpa_kernel(backend=_SDPBackend.FLASH_ATTENTION)
|
| 58 |
+
# Legacy boolean flags (old CUDA backend)
|
| 59 |
if {"enable_flash", "enable_math", "enable_mem_efficient"} <= params:
|
| 60 |
return _sdpa_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=True)
|
| 61 |
if {"use_flash", "use_math", "use_mem_efficient"} <= params:
|
|
|
|
| 64 |
pass
|
| 65 |
return nullcontext()
|
| 66 |
|
| 67 |
+
|
| 68 |
+
# ============================================================================
|
| 69 |
+
# Model Components
|
| 70 |
+
# ============================================================================
|
| 71 |
+
|
| 72 |
class CausalSelfAttention(nn.Module):
|
| 73 |
"""Multi-head causal self-attention with optional FlashAttention."""
|
| 74 |
|
|
|
|
| 107 |
y = y.transpose(1, 2).contiguous().view(B, T, C)
|
| 108 |
return self.proj(y)
|
| 109 |
|
| 110 |
+
|
| 111 |
class MLP(nn.Module):
|
| 112 |
+
"""Feed-forward network with GELU activation."""
|
| 113 |
|
| 114 |
def __init__(self, dim, mlp_ratio=4.0, dropout=0.1):
|
| 115 |
super().__init__()
|
|
|
|
| 126 |
x = self.drop(x)
|
| 127 |
return x
|
| 128 |
|
| 129 |
+
|
| 130 |
class BeeperRoseGPT(nn.Module):
|
| 131 |
+
"""Rose Beeper GPT model with pentachora banks for multi-level control."""
|
| 132 |
|
| 133 |
def __init__(self, cfg: dict):
|
| 134 |
super().__init__()
|
|
|
|
| 153 |
self.lm_head = nn.Linear(D, V, bias=False)
|
| 154 |
self.lm_head.weight = self.token_emb.weight
|
| 155 |
|
| 156 |
+
# Optional Rose projection + anchors
|
| 157 |
self.rose_proj = nn.Linear(D, D, bias=False)
|
| 158 |
self.rose_anchors = nn.Parameter(torch.randn(3, D) / (D**0.5))
|
| 159 |
|
|
|
|
| 221 |
def rose_hidden_pool(self, h: torch.Tensor, mode="mean"):
|
| 222 |
return h.mean(dim=1) if mode == "mean" else h[:, -1, :]
|
| 223 |
|
| 224 |
+
|
| 225 |
+
# ============================================================================
|
| 226 |
+
# Model I/O Utilities
|
| 227 |
+
# ============================================================================
|
| 228 |
+
|
| 229 |
class BeeperIO:
|
| 230 |
+
"""Utilities for saving and loading model weights."""
|
| 231 |
|
| 232 |
@staticmethod
|
| 233 |
def clean_state(sd: dict):
|
| 234 |
+
"""Clean state dict keys from various wrappings."""
|
| 235 |
out = {}
|
| 236 |
for k, v in sd.items():
|
| 237 |
if k.startswith("_orig_mod."):
|
|
|
|
| 243 |
|
| 244 |
@staticmethod
|
| 245 |
def load_into_model(model: nn.Module, path: str, map_location="cpu", strict: bool = False):
|
| 246 |
+
"""Load weights from file into model."""
|
| 247 |
ext = os.path.splitext(path)[1].lower()
|
|
|
|
| 248 |
if ext == ".safetensors":
|
|
|
|
| 249 |
sd = load_safetensors(path, device="cpu")
|
| 250 |
else:
|
| 251 |
raw = torch.load(path, map_location="cpu")
|
| 252 |
sd = raw["model"] if isinstance(raw, dict) and "model" in raw else raw
|
|
|
|
| 253 |
sd = BeeperIO.clean_state(sd)
|
| 254 |
result = model.load_state_dict(sd, strict=strict)
|
| 255 |
return result.missing_keys, result.unexpected_keys
|
| 256 |
|
| 257 |
+
|
| 258 |
+
# ============================================================================
|
| 259 |
+
# Text Generation
|
| 260 |
+
# ============================================================================
|
| 261 |
+
|
| 262 |
def _detok(text: str) -> str:
|
| 263 |
+
"""Clean up tokenized text spacing."""
|
| 264 |
text = re.sub(r"\s+([,.;:!?%])", r"\1", text)
|
| 265 |
text = re.sub(r"\s+([\)\]\}])", r"\1", text)
|
| 266 |
text = re.sub(r"([\(\[\{])\s+", r"\1", text)
|
| 267 |
return text
|
| 268 |
|
| 269 |
+
|
| 270 |
@torch.no_grad()
|
| 271 |
def generate(model: BeeperRoseGPT,
|
| 272 |
+
tok: Tokenizer,
|
| 273 |
cfg: dict,
|
| 274 |
prompt: str,
|
| 275 |
max_new_tokens: int = 120,
|
|
|
|
| 282 |
device: Optional[torch.device] = None,
|
| 283 |
detokenize: bool = True) -> str:
|
| 284 |
"""
|
| 285 |
+
Generate text from a prompt using the model.
|
| 286 |
|
| 287 |
Args:
|
| 288 |
model: The BeeperRoseGPT model
|
| 289 |
tok: Tokenizer instance
|
| 290 |
cfg: Configuration dictionary
|
| 291 |
+
prompt: Input text prompt
|
| 292 |
+
max_new_tokens: Maximum number of tokens to generate
|
| 293 |
+
temperature: Sampling temperature (higher = more random)
|
| 294 |
top_k: Top-k sampling parameter
|
| 295 |
top_p: Top-p (nucleus) sampling parameter
|
| 296 |
repetition_penalty: Penalty for repeated tokens
|
| 297 |
+
presence_penalty: Penalty for tokens that have appeared
|
| 298 |
frequency_penalty: Penalty based on token frequency
|
| 299 |
device: Device to run on
|
| 300 |
+
detokenize: Whether to clean up tokenization artifacts
|
| 301 |
|
| 302 |
Returns:
|
| 303 |
Generated text string
|
| 304 |
"""
|
| 305 |
+
|
| 306 |
# Use defaults from config if not specified
|
| 307 |
temperature = cfg["temperature"] if temperature is None else temperature
|
| 308 |
top_k = cfg["top_k"] if top_k is None else top_k
|
|
|
|
| 314 |
device = device or next(model.parameters()).device
|
| 315 |
model.eval()
|
| 316 |
|
| 317 |
+
# Tokenize prompt
|
| 318 |
ids = tok.encode(prompt).ids
|
| 319 |
x = torch.tensor([ids], dtype=torch.long, device=device)
|
|
|
|
| 320 |
|
| 321 |
+
# Track token counts for penalties
|
| 322 |
+
counts = torch.zeros(cfg["vocab_size"], dtype=torch.int32, device=device)
|
| 323 |
for t in ids:
|
| 324 |
if 0 <= t < cfg["vocab_size"]:
|
| 325 |
counts[t] += 1
|
|
|
|
| 343 |
pen = counts.float() * (frequency_penalty or 0.0) + (counts > 0).float() * (presence_penalty or 0.0)
|
| 344 |
logits = logits - pen.unsqueeze(0)
|
| 345 |
|
| 346 |
+
# Apply temperature
|
| 347 |
logits = logits / max(1e-8, temperature)
|
| 348 |
|
| 349 |
+
# Apply top-k sampling
|
| 350 |
if top_k and top_k > 0:
|
| 351 |
k = min(top_k, logits.size(-1))
|
| 352 |
v, ix = torch.topk(logits, k, dim=-1)
|
| 353 |
filt = torch.full_like(logits, float("-inf"))
|
| 354 |
logits = filt.scatter_(-1, ix, v)
|
| 355 |
|
| 356 |
+
# Apply top-p (nucleus) sampling
|
| 357 |
if top_p and top_p < 1.0:
|
| 358 |
sl, si = torch.sort(logits, descending=True)
|
| 359 |
ps = F.softmax(sl, dim=-1)
|
|
|
|
| 373 |
out = tok.decode(x[0].tolist())
|
| 374 |
return _detok(out) if detokenize else out
|
| 375 |
|
| 376 |
+
|
| 377 |
+
# ============================================================================
|
| 378 |
+
# Default Configuration
|
| 379 |
+
# ============================================================================
|
| 380 |
+
|
| 381 |
def get_default_config():
|
| 382 |
+
"""Get the default configuration for the model."""
|
| 383 |
return {
|
| 384 |
"name": "Rose-Beeper",
|
| 385 |
"context": 512,
|
|
|
|
| 392 |
"resid_dropout": 0.1,
|
| 393 |
"grad_checkpoint": False,
|
| 394 |
|
| 395 |
+
# Generation defaults
|
| 396 |
"temperature": 0.9,
|
| 397 |
"top_k": 40,
|
| 398 |
"top_p": 0.9,
|
|
|
|
| 400 |
"presence_penalty": 0.6,
|
| 401 |
"frequency_penalty": 0.0,
|
| 402 |
|
| 403 |
+
# Capoera configuration
|
| 404 |
"capoera": {
|
| 405 |
"enable": True,
|
| 406 |
"topic_bins": 512,
|