File size: 27,003 Bytes
b65604f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ROSA Plus v2 (character-level)
--------------------------------
A boundary-aware Suffix Automaton (SAM) with a probabilistic fallback LM
(Witten–Bell interpolation along suffix links). This module exposes a single
high-level class `ROSAPlus` and supporting classes that let you:
- Create a model object
- Train on individual samples (examples) *without* crossing boundaries
- Build the fallback LM (from accumulated examples) and keep the SAM frozen at inference
- Save/load a JSON model
- Provide a pre-existing context and:
* generate text (string) with sampling controls, or
* query the probability of the next character (or the full next-char distribution)
This is a modular split of the original script into a library you can import.
The companion CLI wrapper `rosa_plus_train.py` maintains the original flags.
"""
from __future__ import annotations
from collections import deque
from typing import List, Optional, Dict, Tuple
from collections import defaultdict, Counter
import json as _stdlib_json
import math
import random
try:
import orjson as _json
_dumps = _json.dumps # returns bytes
_loads = _json.loads
except Exception: # pragma: no cover
# Fallback to stdlib json (slower, but keeps things working)
_json = _stdlib_json
_dumps = lambda obj: _stdlib_json.dumps(obj).encode("utf-8")
_loads = lambda b: _stdlib_json.loads(b.decode("utf-8"))
from tqdm import tqdm
__all__ = [
"load_examples_from_file",
"ROSACharPredictor",
"ROSAFallbackLM",
"ROSAPlus",
"ROSAGRUAdapter",
]
# =========================
# 1) Load & split examples
# =========================
def load_examples_from_file(
path: str,
delimiter: str = "<|ENDOFTEXT|>",
strip_each: bool = True,
use_eot: bool = True,
eot_char: str = "\u0004", # single-char EOT (U+0004 by default)
) -> List[str]:
"""Reads a file, splits by `delimiter`, optional strip, drops empties.
If `use_eot`, appends a single-character EOT to each example.
"""
if not isinstance(eot_char, str) or len(eot_char) != 1:
raise ValueError("eot_char must be a single character.")
with open(path, "r", encoding="utf-8") as f:
raw = f.read()
parts = raw.split(delimiter)
if strip_each:
parts = [p.strip() for p in parts]
parts = [p for p in parts if p]
if use_eot:
parts = [p + eot_char for p in parts]
return parts[:100000]
# =========================
# 2) Boundary-aware SAM
# =========================
class ROSACharPredictor:
"""
Online/streaming SAM builder (ROSA core), boundary-aware.
Arrays:
- b: transitions (list[dict(char->next_state)])
- c: suffix links (list[int])
- d: max length (list[int])
- e: rightmost previous end position per state (list[int])
- g: last (active) state (int)
Additionally:
- text: list[str] training characters in sequence
- boundary_after[i] = True if an example boundary occurs immediately AFTER position i
"""
def __init__(self):
self.b: List[Dict[str, int]] = [{}] # transitions
self.c: List[int] = [-1] # suffix links
self.d: List[int] = [0] # max length
self.e: List[int] = [-1] # rightmost previous indices
self.g: int = 0 # last state
self.text: List[str] = [] # stores training text characters
self.boundary_after: List[bool] = [] # boundary flags per char index
def feed(self, ch: str) -> None:
"""Extend SAM with `ch` (training step). Deterministic prediction is computed at inference."""
i = len(self.text)
self.text.append(ch)
if len(self.boundary_after) < len(self.text):
self.boundary_after.append(False)
b, c, d, e, g = self.b, self.c, self.d, self.e, self.g
# --- SAM extend ---
r = len(b)
b.append({})
c.append(0)
d.append(d[g] + 1)
e.append(-1)
p = g
while p != -1 and ch not in b[p]:
b[p][ch] = r
p = c[p]
if p == -1:
c[r] = 0
else:
q = b[p][ch]
if d[p] + 1 == d[q]:
c[r] = q
else:
# clone q -> u
u = len(b)
b.append(b[q].copy())
c.append(c[q])
d.append(d[p] + 1)
e.append(e[q])
while p != -1 and b[p].get(ch) == q:
b[p][ch] = u
p = c[p]
c[q] = c[r] = u
self.g = r
# --- update rightmost indices ---
v = self.g
while v != -1 and self.e[v] < i:
self.e[v] = i
v = self.c[v]
def mark_boundary(self) -> None:
"""Call after finishing an example."""
if self.text:
self.boundary_after[len(self.text) - 1] = True
self.g = 0 # reset 'last' to root for a fresh example
# ----- Serialization -----
def to_state_dict(self) -> Dict:
return {
"b": self.b,
"c": self.c,
"d": self.d,
"e": self.e,
"g": self.g,
"text_str": "".join(self.text),
"boundary_after": self.boundary_after,
}
@classmethod
def from_state_dict(cls, obj: Dict) -> "ROSACharPredictor":
inst = cls()
inst.b = obj["b"]
inst.c = obj["c"]
inst.d = obj["d"]
inst.e = obj["e"]
inst.g = obj.get("g", 0)
inst.text = list(obj.get("text_str", ""))
inst.boundary_after = obj.get("boundary_after", [False] * len(inst.text))
if len(inst.boundary_after) < len(inst.text):
inst.boundary_after += [False] * (len(inst.text) - len(inst.boundary_after))
return inst
# ===============================
# 3) Walk a frozen SAM (inference)
# ===============================
def _advance_state(b, c, d, v: int, ch: str) -> int:
"""Walk the trained SAM with character `ch` (no learning). Returns new state v'."""
while v != -1 and ch not in b[v]:
v = c[v]
if v == -1:
return b[0].get(ch, 0)
return b[v][ch]
def _predict_from_state(
b, c, d, e, train_text: str, v: int, boundary_after: Optional[List[bool]] = None
) -> Optional[str]:
"""Deterministic ROSA next-char from current state v using rightmost indices.
Refuses to cross an example boundary.
"""
u = v
n = len(train_text)
while u != -1:
i = e[u]
j = i + 1
if d[u] > 0 and 0 <= j < n:
if boundary_after is not None and 0 <= i < len(boundary_after) and boundary_after[i]:
u = c[u]
continue
return train_text[j]
u = c[u]
return None
# ============================================
# 4) Fallback LM that never crosses boundaries
# ============================================
class ROSAFallbackLM:
"""Character LM using Witten–Bell interpolation down the SAM suffix chain.
IMPORTANT: counts are constructed per-example; no cross-boundary pairs.
"""
def __init__(self, sam: ROSACharPredictor, examples: List[str], max_order: Optional[int] = None, show_progress: bool = True):
self.b, self.c, self.d, self.e = sam.b, sam.c, sam.d, sam.e
self.max_order = max_order
joined = "".join(examples)
self.alphabet = sorted(set(joined)) or ['\n']
self.freq: List[Dict[str, int]] = [defaultdict(int) for _ in range(len(self.b))]
self.unigram = Counter(joined) if joined else Counter({'\n': 1})
self._cache: Dict[int, Dict[str, float]] = {}
self._build_counts_examples_fast(examples, show_progress=show_progress)
def _build_counts_examples_fast(self, examples: List[str], show_progress: bool = True) -> None:
"""Optimized: single pass per example, update-exclusion at the longest context,
optionally clamped by `max_order`.
"""
total_pairs = sum(max(0, len(seg) - 1) for seg in examples if seg)
pbar = tqdm(total=total_pairs, desc="Training fallback LM (pairs)", disable=not show_progress, leave=False)
b, c, d = self.b, self.c, self.d
freq = self.freq
max_order = self.max_order
for seg in examples:
if not seg:
continue
v = 0 # root
for i in range(len(seg) - 1):
ch = seg[i]
# inline _advance_state
u = v
while u != -1 and ch not in b[u]:
u = c[u]
if u == -1:
v = b[0].get(ch, 0)
else:
v = b[u][ch]
ctx = v
if max_order is not None:
while ctx != -1 and d[ctx] > max_order:
ctx = c[ctx]
if ctx == -1:
ctx = 0
nxt = seg[i + 1]
freq[ctx][nxt] += 1
pbar.update(1)
pbar.close()
self._propagate_counts_up_suffix_links()
# invalidate any cached distributions built before propagation
self._cache.clear()
def ensure_capacity(self) -> None:
"""Make sure freq has one bucket per SAM state (called after SAM grows)."""
missing = len(self.b) - len(self.freq)
if missing > 0:
self.freq.extend(defaultdict(int) for _ in range(missing))
def observe_pair(self, ctx_state: int, next_ch: str, *, propagate: bool = True) -> None:
"""
Online update: record one observation of (ctx_state -> next_ch).
If propagate=True we mirror the offline build's suffix-up accumulation.
"""
# Keep alphabet fixed during online learning (simple path).
if next_ch not in self.alphabet:
return # or raise if you prefer a hard failure
self.ensure_capacity()
self.freq[ctx_state][next_ch] += 1
if propagate:
u = self.c[ctx_state]
while u != -1:
self.freq[u][next_ch] += 1
u = self.c[u]
# Invalidate memoized distributions for this chain
u = ctx_state
while u != -1:
self._cache.pop(u, None)
u = self.c[u]
def _probs_for_state(self, v: int) -> Dict[str, float]:
"""Witten–Bell interpolation down suffix links, memoized by state."""
if v in self._cache:
return self._cache[v]
# Build suffix chain (optionally truncated by max_order)
chain = []
u = v
while u != -1:
if self.max_order is not None and self.d[u] > self.max_order:
u = self.c[u]
continue
chain.append(u)
u = self.c[u]
residual = 1.0
probs: Dict[str, float] = {}
def add_counts(state: int, scale: float):
if scale <= 0.0:
return
total = sum(self.freq[state].values())
if total == 0:
return
inv_total = 1.0 / total
for ch, cnt in self.freq[state].items():
probs[ch] = probs.get(ch, 0.0) + scale * (cnt * inv_total)
for state in chain:
N = sum(self.freq[state].values())
T = len(self.freq[state])
if N == 0:
continue
lam = N / (N + T) if T > 0 else 1.0 # Witten–Bell
add_counts(state, residual * lam)
residual *= (1.0 - lam)
# Unigram fallback
total_uni = sum(self.unigram.values())
if total_uni > 0 and residual > 0.0:
inv_total = 1.0 / total_uni
for ch, cnt in self.unigram.items():
probs[ch] = probs.get(ch, 0.0) + residual * (cnt * inv_total)
s = sum(probs.values())
if s > 0:
inv_s = 1.0 / s
for k in list(probs.keys()):
probs[k] *= inv_s
else:
u = 1.0 / max(1, len(self.alphabet))
probs = {ch: u for ch in self.alphabet}
self._cache[v] = probs
return probs
def _propagate_counts_up_suffix_links(self) -> None:
"""
After filling self.freq only at the longest contexts, push counts up the
suffix-link tree so every shorter context has aggregated counts.
Process states in decreasing d[v] so children flow into parents.
"""
order = sorted(range(len(self.b)), key=lambda v: self.d[v], reverse=True)
for v in order:
p = self.c[v]
if p < 0: # root has no parent
continue
if not self.freq[v]:
continue
dv = self.freq[v]
dp = self.freq[p]
for ch, cnt in dv.items():
dp[ch] += cnt
@staticmethod
def _sample_from_dist(
dist: Dict[str, float],
temperature: float = 1.0,
top_p: Optional[float] = 0.9,
top_k: Optional[int] = None,
) -> str:
if temperature <= 0:
temperature = 1e-6
items = sorted(dist.items(), key=lambda x: x[1], reverse=True)
if top_k is not None and top_k > 0:
items = items[:max(1, top_k)]
if top_p is not None:
cum, cut = 0.0, []
for ch, p in items:
cum += p
cut.append((ch, p))
if cum >= top_p:
break
items = cut or items[:1]
logits = [math.log(max(p, 1e-12)) / temperature for _, p in items]
m = max(logits)
exps = [math.exp(z - m) for z in logits]
Z = sum(exps)
probs = [x / Z for x in exps]
idx = random.choices(range(len(items)), weights=probs, k=1)[0]
return items[idx][0]
# ----- Serialization -----
def to_state_dict(self) -> Dict:
freq_plain = [{k: int(v) for k, v in d.items()} for d in tqdm(self.freq, leave=False)]
return {
"alphabet": self.alphabet,
"unigram": {k: int(v) for k, v in self.unigram.items()},
"freq": freq_plain,
"max_order": self.max_order,
}
@classmethod
def from_state_dict(cls, sam: ROSACharPredictor, obj: Dict) -> "ROSAFallbackLM":
inst = cls.__new__(cls) # bypass __init__
inst.b, inst.c, inst.d, inst.e = sam.b, sam.c, sam.d, sam.e
inst.max_order = obj.get("max_order", None)
inst.alphabet = obj["alphabet"]
inst.unigram = Counter({k: int(v) for k, v in obj["unigram"].items()})
inst.freq = [defaultdict(int, {k: int(v) for k, v in d.items()}) for d in tqdm(obj["freq"], leave=False)]
inst._cache = {}
return inst
# ============================================
# 5) Mixed generation: deterministic + fallback
# ============================================
def _generate_mixed(
sam: ROSACharPredictor,
lm: ROSAFallbackLM,
prompt: str,
max_steps: int = 200,
always_fallback = False,
stop_at: Optional[str] = None,
fallback_temperature: float = 1.0,
fallback_top_p: Optional[float] = 0.9,
fallback_top_k: Optional[int] = 50,
) -> str:
b, c, d, e = sam.b, sam.c, sam.d, sam.e
train_text = "".join(sam.text)
v = 0
for ch in prompt:
v = _advance_state(b, c, d, v, ch)
out: List[str] = []
for _ in range(max_steps):
# 1) Deterministic ROSA
ch = None
if not always_fallback:
ch = _predict_from_state(b, c, d, e, train_text, v, boundary_after=sam.boundary_after)
if ch is None:
dist = lm._probs_for_state(v)
ch = ROSAFallbackLM._sample_from_dist(
dist, temperature=fallback_temperature, top_p=fallback_top_p, top_k=fallback_top_k
)
out.append(ch)
if stop_at is not None and ch == stop_at:
break
v = _advance_state(b, c, d, v, ch)
return "".join(out)
# ============================================
# 6) High-level wrapper class
# ============================================
MODEL_MAGIC = "rosa_pb_v2" # v2 **only**
class ROSAPlus:
"""High-level model wrapper exposing a clean API.
Typical usage:
m = ROSAPlus(max_order=1048576, use_eot=True, eot_char="\u0004", seed=0)
m.train_example("hello world\u0004") # or skip EOT if you set use_eot=False
m.build_lm() # required before generation / probs
out = m.generate("he", steps=20, temperature=0.7)
p = m.next_char_prob("he", "l")
m.save("model.bin")
m2 = ROSAPlus.load("model.bin")
"""
def __init__(self, *, max_order: Optional[int] = 1048576, use_eot: bool = True, eot_char: str = "\u0004", seed: int = 0):
if not isinstance(eot_char, str) or len(eot_char) != 1:
raise ValueError("eot_char must be a single character.")
self.max_order = max_order
self.use_eot = use_eot
self.eot_char = eot_char
self.seed = seed
random.seed(seed)
self.sam = ROSACharPredictor()
self._examples: List[str] = []
self.lm: Optional[ROSAFallbackLM] = None # built later
self.neural: Optional["ROSAGRUAdapter"] = None # optional GRU adapter
# ---- Training API ----
def train_example(self, example: str) -> None:
"""Train on a single example string. Appends EOT if `use_eot` and it's not present as the final char."""
if not example:
return
if self.use_eot:
if example[-1] != self.eot_char:
example = example + self.eot_char
self._examples.append(example)
for ch in tqdm(example):
self.sam.feed(ch)
self.sam.mark_boundary()
def fit_from_examples(self, examples: List[str], *, show_progress: bool = True) -> None:
"""Convenience: train on many examples and build LM."""
total_chars = sum(len(ex) for ex in examples)
with tqdm(total=total_chars, desc="Training SAM (ROSA) over chars", disable=not show_progress) as pbar:
for ex in examples:
self.train_example(ex) # this already appends EOT if enabled
pbar.update(len(ex) if ex else 0)
self.build_lm(show_progress=show_progress)
def build_lm(self, *, show_progress: bool = True) -> None:
if not self._examples:
raise RuntimeError("No training examples available. Use train_example() first.")
self.lm = ROSAFallbackLM(self.sam, self._examples, max_order=self.max_order, show_progress=show_progress)
# ---- Optional neural adapter integration ----
def attach_gru_adapter(self, adapter: "ROSAGRUAdapter") -> None:
"""Attach a trained (or untrained) GRU adapter."""
self.neural = adapter
def train_gru_adapter(
self,
*,
examples,
emb_dim: int = 128,
hidden_dim: int = 256,
num_layers: int = 1,
dropout: float = 0.0,
combine: str = "poe",
beta: float = 1.0,
device: Optional[str] = None,
epochs: int = 1,
lr: float = 1e-3,
max_tokens_per_step: int = 4096*10,
clip_grad: float = 1.0,
show_progress: bool = True,
# NEW:
online_sam: bool = False,
online_lm: bool = False,
propagate_updates: bool = True,
) -> "ROSAGRUAdapter":
if self.lm is None:
raise RuntimeError("Fallback LM not built. Call build_lm() first.")
adapter = ROSAGRUAdapter(
self.lm.alphabet,
emb_dim=emb_dim,
hidden_dim=hidden_dim,
num_layers=num_layers,
dropout=dropout,
combine=combine,
beta=beta,
device=device,
)
adapter.fit(
self.sam,
self.lm,
examples,
eot_char=self.eot_char,
epochs=epochs,
lr=lr,
max_tokens_per_step=max_tokens_per_step,
clip_grad=clip_grad,
show_progress=show_progress,
online_sam=online_sam,
online_lm=online_lm,
propagate_updates=propagate_updates,
)
self.neural = adapter
return adapter
# ---- Inference API ----
def generate(
self,
prompt: str,
*,
steps: int = 200,
always_fallback = False,
stop_at: Optional[str] = None,
temperature: float = 0.5,
top_p: Optional[float] = 0.9,
top_k: Optional[int] = 50,
use_gru_adapter: bool = True, # enable/disable neural reweighting
) -> str:
"""
Generate a continuation from `prompt`.
Order of operations per token:
1) Try deterministic ROSA next-char (won't cross boundaries).
2) Otherwise sample from fallback LM; if a GRU adapter is attached,
refine the LM distribution first. The adapter consumes one base
distribution per token; we *always* call step_with_char(ch) after
emitting a char—this becomes a no-op if refine_distribution() was
called in the same tick (the adapter guards against double steps).
"""
if self.lm is None:
raise RuntimeError("Fallback LM not built. Call build_lm() after training.")
if stop_at is None and self.use_eot:
stop_at = self.eot_char
if top_k is not None and top_k <= 0:
top_k = None
# Unpack SAM internals
b, c, d, e = self.sam.b, self.sam.c, self.sam.d, self.sam.e
train_text = "".join(self.sam.text)
# Walk SAM with the prompt to get the starting state
v = 0
for ch in prompt:
v = _advance_state(b, c, d, v, ch)
# Prime GRU with the prompt by consuming base distributions along the path
if use_gru_adapter and self.neural is not None:
try:
self.neural.reset()
v_prime = 0
for ch in prompt:
v_prime = _advance_state(b, c, d, v_prime, ch)
base = self.lm._probs_for_state(v_prime)
if hasattr(self.neural, "step_with_dist"):
self.neural.step_with_dist(base)
except Exception:
# Priming is best-effort; fall back silently if adapter errors
pass
out: List[str] = []
for _ in range(steps):
# 1) Deterministic ROSA (if allowed)
ch = None
if not always_fallback:
ch = _predict_from_state(
b, c, d, e, train_text, v, boundary_after=self.sam.boundary_after
)
# 2) Otherwise sample from (possibly GRU-refined) fallback LM
if ch is None:
base_dist = self.lm._probs_for_state(v)
if use_gru_adapter and self.neural is not None and hasattr(self.neural, "refine_distribution"):
try:
dist = self.neural.refine_distribution(base_dist) # consumes one step
except Exception:
dist = base_dist
else:
dist = base_dist
ch = ROSAFallbackLM._sample_from_dist(
dist, temperature=temperature, top_p=top_p, top_k=top_k
)
out.append(ch)
if stop_at is not None and ch == stop_at:
break
# Advance SAM (and adapter) with the emitted char
v = _advance_state(b, c, d, v, ch)
if use_gru_adapter and self.neural is not None and hasattr(self.neural, "step_with_char"):
try:
# If refine_distribution() already stepped this tick, this is a no-op.
self.neural.step_with_char(ch)
except Exception:
pass
return "".join(out)
def get_dist(self, context: str, deterministic=False) -> Dict[str, float]:
"""Return a dict of next-char probabilities given `context`.
If deterministic=True: Get direct prediction instead of probability (no fallback)
"""
if self.lm is None:
raise RuntimeError("Fallback LM not built. Call build_lm() after training.")
b, c, d, e = self.sam.b, self.sam.c, self.sam.d, self.sam.e
train_text = "".join(self.sam.text)
v = 0
for ch in context:
v = _advance_state(b, c, d, v, ch)
if deterministic:
det = _predict_from_state(b, c, d, e, train_text, v, boundary_after=self.sam.boundary_after)
if det is not None:
return {det: 1.0}
base = self.lm._probs_for_state(v)
# If neural adapter is attached, offer a reweighted distribution helper-style
if self.neural is not None and hasattr(self.neural, "refine_distribution"):
try:
return self.neural.refine_distribution(base)
except Exception:
return base
return base
# ---- Persistence ----
def save(self, path: str) -> None:
if self.lm is None:
raise RuntimeError("Fallback LM not built. Call build_lm() before saving.")
payload = {
"magic": MODEL_MAGIC,
"sam": self.sam.to_state_dict(),
"lm": self.lm.to_state_dict(),
"meta": {
"use_eot": self.use_eot,
"eot_char": self.eot_char,
"max_order": self.max_order,
"seed": self.seed,
},
# Note: neural adapter is NOT serialized here by default to avoid
# adding a framework dependency into the model blob.
}
with open(path, "wb") as f:
f.write(_dumps(payload))
@classmethod
def load(cls, path: str) -> "ROSAPlus":
with open(path, "rb") as f:
payload = _loads(f.read())
magic = payload.get("magic")
if magic != MODEL_MAGIC:
raise ValueError(f"Unrecognized or unsupported model magic in {path}: {magic} (v2 only)")
meta = payload.get("meta", {})
inst = cls(
max_order=meta.get("max_order", 1048576),
use_eot=meta.get("use_eot", True),
eot_char=meta.get("eot_char", "\u0004"),
seed=meta.get("seed", 0),
)
inst.sam = ROSACharPredictor.from_state_dict(payload["sam"]) # type: ignore[arg-type]
inst.lm = ROSAFallbackLM.from_state_dict(inst.sam, payload["lm"]) # type: ignore[arg-type]
return inst
# ---- Utilities ----
@staticmethod
def decode_escape(s: str) -> str:
try:
return s.encode("utf-8").decode("unicode_escape")
except Exception:
return s
|