rosa-plus / rosaplus.py
fasdfsa's picture
init
b65604f
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ROSA Plus v2 (character-level)
--------------------------------
A boundary-aware Suffix Automaton (SAM) with a probabilistic fallback LM
(Witten–Bell interpolation along suffix links). This module exposes a single
high-level class `ROSAPlus` and supporting classes that let you:
- Create a model object
- Train on individual samples (examples) *without* crossing boundaries
- Build the fallback LM (from accumulated examples) and keep the SAM frozen at inference
- Save/load a JSON model
- Provide a pre-existing context and:
* generate text (string) with sampling controls, or
* query the probability of the next character (or the full next-char distribution)
This is a modular split of the original script into a library you can import.
The companion CLI wrapper `rosa_plus_train.py` maintains the original flags.
"""
from __future__ import annotations
from collections import deque
from typing import List, Optional, Dict, Tuple
from collections import defaultdict, Counter
import json as _stdlib_json
import math
import random
try:
import orjson as _json
_dumps = _json.dumps # returns bytes
_loads = _json.loads
except Exception: # pragma: no cover
# Fallback to stdlib json (slower, but keeps things working)
_json = _stdlib_json
_dumps = lambda obj: _stdlib_json.dumps(obj).encode("utf-8")
_loads = lambda b: _stdlib_json.loads(b.decode("utf-8"))
from tqdm import tqdm
__all__ = [
"load_examples_from_file",
"ROSACharPredictor",
"ROSAFallbackLM",
"ROSAPlus",
"ROSAGRUAdapter",
]
# =========================
# 1) Load & split examples
# =========================
def load_examples_from_file(
path: str,
delimiter: str = "<|ENDOFTEXT|>",
strip_each: bool = True,
use_eot: bool = True,
eot_char: str = "\u0004", # single-char EOT (U+0004 by default)
) -> List[str]:
"""Reads a file, splits by `delimiter`, optional strip, drops empties.
If `use_eot`, appends a single-character EOT to each example.
"""
if not isinstance(eot_char, str) or len(eot_char) != 1:
raise ValueError("eot_char must be a single character.")
with open(path, "r", encoding="utf-8") as f:
raw = f.read()
parts = raw.split(delimiter)
if strip_each:
parts = [p.strip() for p in parts]
parts = [p for p in parts if p]
if use_eot:
parts = [p + eot_char for p in parts]
return parts[:100000]
# =========================
# 2) Boundary-aware SAM
# =========================
class ROSACharPredictor:
"""
Online/streaming SAM builder (ROSA core), boundary-aware.
Arrays:
- b: transitions (list[dict(char->next_state)])
- c: suffix links (list[int])
- d: max length (list[int])
- e: rightmost previous end position per state (list[int])
- g: last (active) state (int)
Additionally:
- text: list[str] training characters in sequence
- boundary_after[i] = True if an example boundary occurs immediately AFTER position i
"""
def __init__(self):
self.b: List[Dict[str, int]] = [{}] # transitions
self.c: List[int] = [-1] # suffix links
self.d: List[int] = [0] # max length
self.e: List[int] = [-1] # rightmost previous indices
self.g: int = 0 # last state
self.text: List[str] = [] # stores training text characters
self.boundary_after: List[bool] = [] # boundary flags per char index
def feed(self, ch: str) -> None:
"""Extend SAM with `ch` (training step). Deterministic prediction is computed at inference."""
i = len(self.text)
self.text.append(ch)
if len(self.boundary_after) < len(self.text):
self.boundary_after.append(False)
b, c, d, e, g = self.b, self.c, self.d, self.e, self.g
# --- SAM extend ---
r = len(b)
b.append({})
c.append(0)
d.append(d[g] + 1)
e.append(-1)
p = g
while p != -1 and ch not in b[p]:
b[p][ch] = r
p = c[p]
if p == -1:
c[r] = 0
else:
q = b[p][ch]
if d[p] + 1 == d[q]:
c[r] = q
else:
# clone q -> u
u = len(b)
b.append(b[q].copy())
c.append(c[q])
d.append(d[p] + 1)
e.append(e[q])
while p != -1 and b[p].get(ch) == q:
b[p][ch] = u
p = c[p]
c[q] = c[r] = u
self.g = r
# --- update rightmost indices ---
v = self.g
while v != -1 and self.e[v] < i:
self.e[v] = i
v = self.c[v]
def mark_boundary(self) -> None:
"""Call after finishing an example."""
if self.text:
self.boundary_after[len(self.text) - 1] = True
self.g = 0 # reset 'last' to root for a fresh example
# ----- Serialization -----
def to_state_dict(self) -> Dict:
return {
"b": self.b,
"c": self.c,
"d": self.d,
"e": self.e,
"g": self.g,
"text_str": "".join(self.text),
"boundary_after": self.boundary_after,
}
@classmethod
def from_state_dict(cls, obj: Dict) -> "ROSACharPredictor":
inst = cls()
inst.b = obj["b"]
inst.c = obj["c"]
inst.d = obj["d"]
inst.e = obj["e"]
inst.g = obj.get("g", 0)
inst.text = list(obj.get("text_str", ""))
inst.boundary_after = obj.get("boundary_after", [False] * len(inst.text))
if len(inst.boundary_after) < len(inst.text):
inst.boundary_after += [False] * (len(inst.text) - len(inst.boundary_after))
return inst
# ===============================
# 3) Walk a frozen SAM (inference)
# ===============================
def _advance_state(b, c, d, v: int, ch: str) -> int:
"""Walk the trained SAM with character `ch` (no learning). Returns new state v'."""
while v != -1 and ch not in b[v]:
v = c[v]
if v == -1:
return b[0].get(ch, 0)
return b[v][ch]
def _predict_from_state(
b, c, d, e, train_text: str, v: int, boundary_after: Optional[List[bool]] = None
) -> Optional[str]:
"""Deterministic ROSA next-char from current state v using rightmost indices.
Refuses to cross an example boundary.
"""
u = v
n = len(train_text)
while u != -1:
i = e[u]
j = i + 1
if d[u] > 0 and 0 <= j < n:
if boundary_after is not None and 0 <= i < len(boundary_after) and boundary_after[i]:
u = c[u]
continue
return train_text[j]
u = c[u]
return None
# ============================================
# 4) Fallback LM that never crosses boundaries
# ============================================
class ROSAFallbackLM:
"""Character LM using Witten–Bell interpolation down the SAM suffix chain.
IMPORTANT: counts are constructed per-example; no cross-boundary pairs.
"""
def __init__(self, sam: ROSACharPredictor, examples: List[str], max_order: Optional[int] = None, show_progress: bool = True):
self.b, self.c, self.d, self.e = sam.b, sam.c, sam.d, sam.e
self.max_order = max_order
joined = "".join(examples)
self.alphabet = sorted(set(joined)) or ['\n']
self.freq: List[Dict[str, int]] = [defaultdict(int) for _ in range(len(self.b))]
self.unigram = Counter(joined) if joined else Counter({'\n': 1})
self._cache: Dict[int, Dict[str, float]] = {}
self._build_counts_examples_fast(examples, show_progress=show_progress)
def _build_counts_examples_fast(self, examples: List[str], show_progress: bool = True) -> None:
"""Optimized: single pass per example, update-exclusion at the longest context,
optionally clamped by `max_order`.
"""
total_pairs = sum(max(0, len(seg) - 1) for seg in examples if seg)
pbar = tqdm(total=total_pairs, desc="Training fallback LM (pairs)", disable=not show_progress, leave=False)
b, c, d = self.b, self.c, self.d
freq = self.freq
max_order = self.max_order
for seg in examples:
if not seg:
continue
v = 0 # root
for i in range(len(seg) - 1):
ch = seg[i]
# inline _advance_state
u = v
while u != -1 and ch not in b[u]:
u = c[u]
if u == -1:
v = b[0].get(ch, 0)
else:
v = b[u][ch]
ctx = v
if max_order is not None:
while ctx != -1 and d[ctx] > max_order:
ctx = c[ctx]
if ctx == -1:
ctx = 0
nxt = seg[i + 1]
freq[ctx][nxt] += 1
pbar.update(1)
pbar.close()
self._propagate_counts_up_suffix_links()
# invalidate any cached distributions built before propagation
self._cache.clear()
def ensure_capacity(self) -> None:
"""Make sure freq has one bucket per SAM state (called after SAM grows)."""
missing = len(self.b) - len(self.freq)
if missing > 0:
self.freq.extend(defaultdict(int) for _ in range(missing))
def observe_pair(self, ctx_state: int, next_ch: str, *, propagate: bool = True) -> None:
"""
Online update: record one observation of (ctx_state -> next_ch).
If propagate=True we mirror the offline build's suffix-up accumulation.
"""
# Keep alphabet fixed during online learning (simple path).
if next_ch not in self.alphabet:
return # or raise if you prefer a hard failure
self.ensure_capacity()
self.freq[ctx_state][next_ch] += 1
if propagate:
u = self.c[ctx_state]
while u != -1:
self.freq[u][next_ch] += 1
u = self.c[u]
# Invalidate memoized distributions for this chain
u = ctx_state
while u != -1:
self._cache.pop(u, None)
u = self.c[u]
def _probs_for_state(self, v: int) -> Dict[str, float]:
"""Witten–Bell interpolation down suffix links, memoized by state."""
if v in self._cache:
return self._cache[v]
# Build suffix chain (optionally truncated by max_order)
chain = []
u = v
while u != -1:
if self.max_order is not None and self.d[u] > self.max_order:
u = self.c[u]
continue
chain.append(u)
u = self.c[u]
residual = 1.0
probs: Dict[str, float] = {}
def add_counts(state: int, scale: float):
if scale <= 0.0:
return
total = sum(self.freq[state].values())
if total == 0:
return
inv_total = 1.0 / total
for ch, cnt in self.freq[state].items():
probs[ch] = probs.get(ch, 0.0) + scale * (cnt * inv_total)
for state in chain:
N = sum(self.freq[state].values())
T = len(self.freq[state])
if N == 0:
continue
lam = N / (N + T) if T > 0 else 1.0 # Witten–Bell
add_counts(state, residual * lam)
residual *= (1.0 - lam)
# Unigram fallback
total_uni = sum(self.unigram.values())
if total_uni > 0 and residual > 0.0:
inv_total = 1.0 / total_uni
for ch, cnt in self.unigram.items():
probs[ch] = probs.get(ch, 0.0) + residual * (cnt * inv_total)
s = sum(probs.values())
if s > 0:
inv_s = 1.0 / s
for k in list(probs.keys()):
probs[k] *= inv_s
else:
u = 1.0 / max(1, len(self.alphabet))
probs = {ch: u for ch in self.alphabet}
self._cache[v] = probs
return probs
def _propagate_counts_up_suffix_links(self) -> None:
"""
After filling self.freq only at the longest contexts, push counts up the
suffix-link tree so every shorter context has aggregated counts.
Process states in decreasing d[v] so children flow into parents.
"""
order = sorted(range(len(self.b)), key=lambda v: self.d[v], reverse=True)
for v in order:
p = self.c[v]
if p < 0: # root has no parent
continue
if not self.freq[v]:
continue
dv = self.freq[v]
dp = self.freq[p]
for ch, cnt in dv.items():
dp[ch] += cnt
@staticmethod
def _sample_from_dist(
dist: Dict[str, float],
temperature: float = 1.0,
top_p: Optional[float] = 0.9,
top_k: Optional[int] = None,
) -> str:
if temperature <= 0:
temperature = 1e-6
items = sorted(dist.items(), key=lambda x: x[1], reverse=True)
if top_k is not None and top_k > 0:
items = items[:max(1, top_k)]
if top_p is not None:
cum, cut = 0.0, []
for ch, p in items:
cum += p
cut.append((ch, p))
if cum >= top_p:
break
items = cut or items[:1]
logits = [math.log(max(p, 1e-12)) / temperature for _, p in items]
m = max(logits)
exps = [math.exp(z - m) for z in logits]
Z = sum(exps)
probs = [x / Z for x in exps]
idx = random.choices(range(len(items)), weights=probs, k=1)[0]
return items[idx][0]
# ----- Serialization -----
def to_state_dict(self) -> Dict:
freq_plain = [{k: int(v) for k, v in d.items()} for d in tqdm(self.freq, leave=False)]
return {
"alphabet": self.alphabet,
"unigram": {k: int(v) for k, v in self.unigram.items()},
"freq": freq_plain,
"max_order": self.max_order,
}
@classmethod
def from_state_dict(cls, sam: ROSACharPredictor, obj: Dict) -> "ROSAFallbackLM":
inst = cls.__new__(cls) # bypass __init__
inst.b, inst.c, inst.d, inst.e = sam.b, sam.c, sam.d, sam.e
inst.max_order = obj.get("max_order", None)
inst.alphabet = obj["alphabet"]
inst.unigram = Counter({k: int(v) for k, v in obj["unigram"].items()})
inst.freq = [defaultdict(int, {k: int(v) for k, v in d.items()}) for d in tqdm(obj["freq"], leave=False)]
inst._cache = {}
return inst
# ============================================
# 5) Mixed generation: deterministic + fallback
# ============================================
def _generate_mixed(
sam: ROSACharPredictor,
lm: ROSAFallbackLM,
prompt: str,
max_steps: int = 200,
always_fallback = False,
stop_at: Optional[str] = None,
fallback_temperature: float = 1.0,
fallback_top_p: Optional[float] = 0.9,
fallback_top_k: Optional[int] = 50,
) -> str:
b, c, d, e = sam.b, sam.c, sam.d, sam.e
train_text = "".join(sam.text)
v = 0
for ch in prompt:
v = _advance_state(b, c, d, v, ch)
out: List[str] = []
for _ in range(max_steps):
# 1) Deterministic ROSA
ch = None
if not always_fallback:
ch = _predict_from_state(b, c, d, e, train_text, v, boundary_after=sam.boundary_after)
if ch is None:
dist = lm._probs_for_state(v)
ch = ROSAFallbackLM._sample_from_dist(
dist, temperature=fallback_temperature, top_p=fallback_top_p, top_k=fallback_top_k
)
out.append(ch)
if stop_at is not None and ch == stop_at:
break
v = _advance_state(b, c, d, v, ch)
return "".join(out)
# ============================================
# 6) High-level wrapper class
# ============================================
MODEL_MAGIC = "rosa_pb_v2" # v2 **only**
class ROSAPlus:
"""High-level model wrapper exposing a clean API.
Typical usage:
m = ROSAPlus(max_order=1048576, use_eot=True, eot_char="\u0004", seed=0)
m.train_example("hello world\u0004") # or skip EOT if you set use_eot=False
m.build_lm() # required before generation / probs
out = m.generate("he", steps=20, temperature=0.7)
p = m.next_char_prob("he", "l")
m.save("model.bin")
m2 = ROSAPlus.load("model.bin")
"""
def __init__(self, *, max_order: Optional[int] = 1048576, use_eot: bool = True, eot_char: str = "\u0004", seed: int = 0):
if not isinstance(eot_char, str) or len(eot_char) != 1:
raise ValueError("eot_char must be a single character.")
self.max_order = max_order
self.use_eot = use_eot
self.eot_char = eot_char
self.seed = seed
random.seed(seed)
self.sam = ROSACharPredictor()
self._examples: List[str] = []
self.lm: Optional[ROSAFallbackLM] = None # built later
self.neural: Optional["ROSAGRUAdapter"] = None # optional GRU adapter
# ---- Training API ----
def train_example(self, example: str) -> None:
"""Train on a single example string. Appends EOT if `use_eot` and it's not present as the final char."""
if not example:
return
if self.use_eot:
if example[-1] != self.eot_char:
example = example + self.eot_char
self._examples.append(example)
for ch in tqdm(example):
self.sam.feed(ch)
self.sam.mark_boundary()
def fit_from_examples(self, examples: List[str], *, show_progress: bool = True) -> None:
"""Convenience: train on many examples and build LM."""
total_chars = sum(len(ex) for ex in examples)
with tqdm(total=total_chars, desc="Training SAM (ROSA) over chars", disable=not show_progress) as pbar:
for ex in examples:
self.train_example(ex) # this already appends EOT if enabled
pbar.update(len(ex) if ex else 0)
self.build_lm(show_progress=show_progress)
def build_lm(self, *, show_progress: bool = True) -> None:
if not self._examples:
raise RuntimeError("No training examples available. Use train_example() first.")
self.lm = ROSAFallbackLM(self.sam, self._examples, max_order=self.max_order, show_progress=show_progress)
# ---- Optional neural adapter integration ----
def attach_gru_adapter(self, adapter: "ROSAGRUAdapter") -> None:
"""Attach a trained (or untrained) GRU adapter."""
self.neural = adapter
def train_gru_adapter(
self,
*,
examples,
emb_dim: int = 128,
hidden_dim: int = 256,
num_layers: int = 1,
dropout: float = 0.0,
combine: str = "poe",
beta: float = 1.0,
device: Optional[str] = None,
epochs: int = 1,
lr: float = 1e-3,
max_tokens_per_step: int = 4096*10,
clip_grad: float = 1.0,
show_progress: bool = True,
# NEW:
online_sam: bool = False,
online_lm: bool = False,
propagate_updates: bool = True,
) -> "ROSAGRUAdapter":
if self.lm is None:
raise RuntimeError("Fallback LM not built. Call build_lm() first.")
adapter = ROSAGRUAdapter(
self.lm.alphabet,
emb_dim=emb_dim,
hidden_dim=hidden_dim,
num_layers=num_layers,
dropout=dropout,
combine=combine,
beta=beta,
device=device,
)
adapter.fit(
self.sam,
self.lm,
examples,
eot_char=self.eot_char,
epochs=epochs,
lr=lr,
max_tokens_per_step=max_tokens_per_step,
clip_grad=clip_grad,
show_progress=show_progress,
online_sam=online_sam,
online_lm=online_lm,
propagate_updates=propagate_updates,
)
self.neural = adapter
return adapter
# ---- Inference API ----
def generate(
self,
prompt: str,
*,
steps: int = 200,
always_fallback = False,
stop_at: Optional[str] = None,
temperature: float = 0.5,
top_p: Optional[float] = 0.9,
top_k: Optional[int] = 50,
use_gru_adapter: bool = True, # enable/disable neural reweighting
) -> str:
"""
Generate a continuation from `prompt`.
Order of operations per token:
1) Try deterministic ROSA next-char (won't cross boundaries).
2) Otherwise sample from fallback LM; if a GRU adapter is attached,
refine the LM distribution first. The adapter consumes one base
distribution per token; we *always* call step_with_char(ch) after
emitting a char—this becomes a no-op if refine_distribution() was
called in the same tick (the adapter guards against double steps).
"""
if self.lm is None:
raise RuntimeError("Fallback LM not built. Call build_lm() after training.")
if stop_at is None and self.use_eot:
stop_at = self.eot_char
if top_k is not None and top_k <= 0:
top_k = None
# Unpack SAM internals
b, c, d, e = self.sam.b, self.sam.c, self.sam.d, self.sam.e
train_text = "".join(self.sam.text)
# Walk SAM with the prompt to get the starting state
v = 0
for ch in prompt:
v = _advance_state(b, c, d, v, ch)
# Prime GRU with the prompt by consuming base distributions along the path
if use_gru_adapter and self.neural is not None:
try:
self.neural.reset()
v_prime = 0
for ch in prompt:
v_prime = _advance_state(b, c, d, v_prime, ch)
base = self.lm._probs_for_state(v_prime)
if hasattr(self.neural, "step_with_dist"):
self.neural.step_with_dist(base)
except Exception:
# Priming is best-effort; fall back silently if adapter errors
pass
out: List[str] = []
for _ in range(steps):
# 1) Deterministic ROSA (if allowed)
ch = None
if not always_fallback:
ch = _predict_from_state(
b, c, d, e, train_text, v, boundary_after=self.sam.boundary_after
)
# 2) Otherwise sample from (possibly GRU-refined) fallback LM
if ch is None:
base_dist = self.lm._probs_for_state(v)
if use_gru_adapter and self.neural is not None and hasattr(self.neural, "refine_distribution"):
try:
dist = self.neural.refine_distribution(base_dist) # consumes one step
except Exception:
dist = base_dist
else:
dist = base_dist
ch = ROSAFallbackLM._sample_from_dist(
dist, temperature=temperature, top_p=top_p, top_k=top_k
)
out.append(ch)
if stop_at is not None and ch == stop_at:
break
# Advance SAM (and adapter) with the emitted char
v = _advance_state(b, c, d, v, ch)
if use_gru_adapter and self.neural is not None and hasattr(self.neural, "step_with_char"):
try:
# If refine_distribution() already stepped this tick, this is a no-op.
self.neural.step_with_char(ch)
except Exception:
pass
return "".join(out)
def get_dist(self, context: str, deterministic=False) -> Dict[str, float]:
"""Return a dict of next-char probabilities given `context`.
If deterministic=True: Get direct prediction instead of probability (no fallback)
"""
if self.lm is None:
raise RuntimeError("Fallback LM not built. Call build_lm() after training.")
b, c, d, e = self.sam.b, self.sam.c, self.sam.d, self.sam.e
train_text = "".join(self.sam.text)
v = 0
for ch in context:
v = _advance_state(b, c, d, v, ch)
if deterministic:
det = _predict_from_state(b, c, d, e, train_text, v, boundary_after=self.sam.boundary_after)
if det is not None:
return {det: 1.0}
base = self.lm._probs_for_state(v)
# If neural adapter is attached, offer a reweighted distribution helper-style
if self.neural is not None and hasattr(self.neural, "refine_distribution"):
try:
return self.neural.refine_distribution(base)
except Exception:
return base
return base
# ---- Persistence ----
def save(self, path: str) -> None:
if self.lm is None:
raise RuntimeError("Fallback LM not built. Call build_lm() before saving.")
payload = {
"magic": MODEL_MAGIC,
"sam": self.sam.to_state_dict(),
"lm": self.lm.to_state_dict(),
"meta": {
"use_eot": self.use_eot,
"eot_char": self.eot_char,
"max_order": self.max_order,
"seed": self.seed,
},
# Note: neural adapter is NOT serialized here by default to avoid
# adding a framework dependency into the model blob.
}
with open(path, "wb") as f:
f.write(_dumps(payload))
@classmethod
def load(cls, path: str) -> "ROSAPlus":
with open(path, "rb") as f:
payload = _loads(f.read())
magic = payload.get("magic")
if magic != MODEL_MAGIC:
raise ValueError(f"Unrecognized or unsupported model magic in {path}: {magic} (v2 only)")
meta = payload.get("meta", {})
inst = cls(
max_order=meta.get("max_order", 1048576),
use_eot=meta.get("use_eot", True),
eot_char=meta.get("eot_char", "\u0004"),
seed=meta.get("seed", 0),
)
inst.sam = ROSACharPredictor.from_state_dict(payload["sam"]) # type: ignore[arg-type]
inst.lm = ROSAFallbackLM.from_state_dict(inst.sam, payload["lm"]) # type: ignore[arg-type]
return inst
# ---- Utilities ----
@staticmethod
def decode_escape(s: str) -> str:
try:
return s.encode("utf-8").decode("unicode_escape")
except Exception:
return s