OpenTransformer commited on
Commit
538e1f4
Β·
verified Β·
1 Parent(s): f34b7aa

Upload 2 files

Browse files
Files changed (2) hide show
  1. epr.py +948 -0
  2. step07113398.pt +3 -0
epr.py ADDED
@@ -0,0 +1,948 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # ep.py β€” joint AR+NAT+SAT trainer/decoder (Qwen3 tokenizer)
3
+ # Robust fresh-start, ignores *.pt.tmp, AMP dtype auto, OOM backoff, progressive block growth.
4
+ # Added: repetition/presence/frequency penalties, top-k/top-p/min-p, greedy, no-repeat-ngrams.
5
+ # Fixes: SAT multinomial shape; checkpoint loads on CPU; cfg fallback if ckpt missing cfg.
6
+ # UPDATE: time-based checkpointing only (monotonic), no step-based saving. Resume respects interval.
7
+ # NEW: Graceful shutdown: catches SIGINT/SIGTERM, writes an atomic "interrupt.pt", then exits.
8
+ # NEW-OUTPUT: Prompt text is colorized by default; choose color via --prompt_color.
9
+
10
+ from __future__ import annotations
11
+ import argparse, json, math, pathlib, random, time, os, sys, signal, atexit, threading, traceback
12
+ from contextlib import nullcontext
13
+ from typing import Dict, Any, List, Optional, Tuple
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ import torch.nn.functional as F
18
+ from datasets import load_dataset
19
+ from transformers import AutoTokenizer, logging as hf_log
20
+ from tqdm.auto import tqdm
21
+
22
+ # ───────────────────────── Globals ─────────────────────────
23
+ hf_log.set_verbosity_error()
24
+ DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
25
+ torch.backends.cuda.matmul.allow_tf32 = True
26
+ try:
27
+ torch.set_float32_matmul_precision("high")
28
+ except Exception:
29
+ pass
30
+
31
+ # Optional Windows-friendly ANSI init
32
+ try:
33
+ import colorama # type: ignore
34
+ colorama.just_fix_windows_console()
35
+ except Exception:
36
+ pass
37
+
38
+ # ANSI color helpers
39
+ ANSI_RESET = "\033[0m"
40
+ ANSI_COLORS = {
41
+ # normal
42
+ "black": "\033[30m", "red": "\033[31m", "green": "\033[32m", "yellow": "\033[33m",
43
+ "blue": "\033[34m", "magenta": "\033[35m", "cyan": "\033[36m", "white": "\033[37m",
44
+ # bright
45
+ "bright_black": "\033[90m", "bright_red": "\033[91m", "bright_green": "\033[92m",
46
+ "bright_yellow": "\033[93m", "bright_blue": "\033[94m", "bright_magenta": "\033[95m",
47
+ "bright_cyan": "\033[96m", "bright_white": "\033[97m",
48
+ }
49
+ DEFAULT_PROMPT_COLOR = "cyan"
50
+
51
+ def _normalize_color_name(name: str | None) -> str:
52
+ if not name:
53
+ return DEFAULT_PROMPT_COLOR
54
+ n = name.strip().lower()
55
+ if n in ("default", "auto"):
56
+ return DEFAULT_PROMPT_COLOR
57
+ return n
58
+
59
+ def _ansi_for_color(name: str) -> str:
60
+ n = _normalize_color_name(name)
61
+ if n in ("none", "off", "no", "false"):
62
+ return ""
63
+ # named
64
+ if n in ANSI_COLORS:
65
+ return ANSI_COLORS[n]
66
+ # hex #RRGGBB
67
+ if n.startswith("#") and len(n) == 7:
68
+ try:
69
+ r = int(n[1:3], 16); g = int(n[3:5], 16); b = int(n[5:7], 16)
70
+ return f"\033[38;2;{r};{g};{b}m"
71
+ except Exception:
72
+ return ANSI_COLORS[DEFAULT_PROMPT_COLOR]
73
+ # rgb(r,g,b)
74
+ if n.startswith("rgb(") and n.endswith(")"):
75
+ try:
76
+ parts = n[4:-1].split(",")
77
+ r, g, b = [max(0, min(255, int(p.strip()))) for p in parts]
78
+ return f"\033[38;2;{r};{g};{b}m"
79
+ except Exception:
80
+ return ANSI_COLORS[DEFAULT_PROMPT_COLOR]
81
+ # fallback
82
+ return ANSI_COLORS.get(DEFAULT_PROMPT_COLOR, "")
83
+
84
+ def _colorize_prompt_then_reset(prompt_text: str, rest_text: str, prompt_color: str) -> str:
85
+ code = _ansi_for_color(prompt_color)
86
+ if not code:
87
+ return f"{prompt_text}{rest_text}"
88
+ return f"{code}{prompt_text}{ANSI_RESET}{rest_text}"
89
+
90
+ # Use the Qwen3 tokenizer (can override with env TOKENIZER_ID if needed)
91
+ TOKENIZER_ID = os.environ.get("TOKENIZER_ID", "Qwen/Qwen3-235B-A22B-Thinking-2507")
92
+
93
+ tok = AutoTokenizer.from_pretrained(TOKENIZER_ID, use_fast=True, trust_remote_code=True)
94
+ if tok.pad_token is None:
95
+ tok.add_special_tokens({"pad_token": "[PAD]"})
96
+ VOCAB, BLANK, EOS = (
97
+ max(tok.get_vocab().values()) + 1,
98
+ tok.pad_token_id,
99
+ tok.eos_token_id if tok.eos_token_id is not None else tok.sep_token_id
100
+ )
101
+
102
+ PRESETS: Dict[str, Dict[str, int]] = {
103
+ "small": dict(d=512, layers=8, heads=16, rank=64),
104
+ "smallx2": dict(d=512, layers=16, heads=16, rank=64),
105
+ "base": dict(d=768, layers=12, heads=24, rank=96),
106
+ }
107
+
108
+ DEFAULT_BLOCK = 576
109
+ SAT_BLOCK = 2
110
+ LR_CORE, LR_HEAD = 5e-5, 2e-4
111
+ EMIT_LAMBDA = 0.1
112
+ DEFAULT_SAVE_SEC = 24 * 3600
113
+ CKDIR = pathlib.Path("ckpts_joint")
114
+
115
+ # Interrupt state
116
+ _interrupt_flag = threading.Event()
117
+ _interrupt_reason = {"sig": None, "trace": None}
118
+ _last_emergency_save_mono = 0.0
119
+
120
+ # ───────────────────────── Utilities ─────────────────────────
121
+ def rng_state():
122
+ if DEV.type == "cuda":
123
+ try:
124
+ return torch.cuda.get_rng_state(DEV)
125
+ except TypeError:
126
+ return torch.cuda.get_rng_state()
127
+ return torch.get_rng_state()
128
+
129
+ def _is_probably_ckpt(path: pathlib.Path) -> bool:
130
+ try:
131
+ return path.is_file() and path.suffix == ".pt" and not path.name.endswith(".pt.tmp") and path.stat().st_size > (1<<20)
132
+ except Exception:
133
+ return False
134
+
135
+ def _resolve_ckpt(path: pathlib.Path) -> pathlib.Path | None:
136
+ """
137
+ Return a solid .pt (never .tmp). If 'path' is dir, pick newest *.pt.
138
+ If not usable, return None.
139
+ """
140
+ try:
141
+ if path.is_dir():
142
+ cands = sorted([p for p in path.glob("*.pt") if _is_probably_ckpt(p)],
143
+ key=lambda p: p.stat().st_mtime, reverse=True)
144
+ return cands[0] if cands else None
145
+ if path.suffix == ".tmp":
146
+ solid = path.with_suffix("")
147
+ return solid if _is_probably_ckpt(solid) else _resolve_ckpt(path.parent)
148
+ return path if _is_probably_ckpt(path) else _resolve_ckpt(path.parent)
149
+ except Exception:
150
+ return None
151
+
152
+ def _try_load(path: pathlib.Path, map_location="cpu"):
153
+ """
154
+ Always load on CPU to avoid CUDA fragmentation/OOM during torch.load.
155
+ """
156
+ try:
157
+ # When PyTorch flips default, this still works. If you want, pass weights_only=True later.
158
+ return torch.load(path, map_location=map_location)
159
+ except Exception as e:
160
+ print(f"[ckpt-skip] {path} not usable: {e}")
161
+ return None
162
+
163
+ # ───────────────────────── AMP helper ─────────────────────────
164
+ try:
165
+ from torch.amp import autocast as _ac, GradScaler
166
+ except ImportError:
167
+ from torch.cuda.amp import autocast as _ac, GradScaler
168
+
169
+ def _auto_amp_dtype():
170
+ if DEV.type == "cuda":
171
+ try:
172
+ if torch.cuda.is_bf16_supported():
173
+ return torch.bfloat16
174
+ return torch.float16
175
+ except Exception:
176
+ return torch.float16
177
+ return torch.float32
178
+
179
+ def amp(enabled: bool):
180
+ return nullcontext() if not (enabled and DEV.type == "cuda") else _ac(device_type="cuda", dtype=_auto_amp_dtype())
181
+
182
+ # ───────────────────────── Data stream ─────────────────────────
183
+ def token_stream(ds_name: str, target: int, seed: int = 42):
184
+ ds = load_dataset(ds_name, split="train", streaming=True)
185
+ ds = ds.shuffle(buffer_size=10_000, seed=seed)
186
+ emitted = 0
187
+ for ex in ds:
188
+ enc = tok.encode(ex["text"])
189
+ if EOS is not None and (len(enc) == 0 or enc[-1] != EOS):
190
+ enc = enc + [EOS]
191
+ for t in enc:
192
+ yield t
193
+ emitted += 1
194
+ if emitted >= target:
195
+ return
196
+
197
+ # ───────────────────────── Relative positional bias (ALiBi) ─────────────────────────
198
+ def _alibi_slopes(n_heads: int):
199
+ import math
200
+ def pow2slopes(n):
201
+ start = 2 ** (-2 ** -(math.log2(n) - 3))
202
+ ratio = start
203
+ return [start * (ratio ** i) for i in range(n)]
204
+ if math.log2(n_heads).is_integer():
205
+ vals = pow2slopes(n_heads)
206
+ else:
207
+ closest = 2 ** math.floor(math.log2(n_heads))
208
+ vals = pow2slopes(closest)
209
+ extra = pow2slopes(2 * closest)
210
+ vals += extra[0::2][: n_heads - closest]
211
+ return torch.tensor(vals, device=DEV).view(1, n_heads, 1, 1)
212
+
213
+ def alibi_bias(n_heads: int, n_tokens: int):
214
+ i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
215
+ j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
216
+ dist = (j - i).clamp_min(0)
217
+ slopes = _alibi_slopes(n_heads)
218
+ return -slopes * dist
219
+
220
+ # ───────────────────────── Model components ─────────────────────────
221
+ class LowRankMHA(nn.Module):
222
+ def __init__(self, d: int, h: int, r: int, use_relpos: bool = True):
223
+ super().__init__()
224
+ assert d % h == 0, "d must be divisible by number of heads"
225
+ self.h, self.dk = h, d // h
226
+ self.use_relpos = use_relpos
227
+ self.q = nn.Linear(d, d, bias=False)
228
+ self.k = nn.Linear(d, d, bias=False)
229
+ self.v = nn.Linear(d, d, bias=False)
230
+ self.U = nn.Parameter(torch.randn(self.dk, r))
231
+ nn.init.orthogonal_(self.U)
232
+ self.proj = nn.Linear(h * r, d, bias=False)
233
+ self.drop = nn.Dropout(0.1)
234
+
235
+ def _proj(self, x):
236
+ B, N, _ = x.shape
237
+ return (x.view(B, N, self.h, self.dk).transpose(1, 2) @ self.U)
238
+
239
+ def forward(self, x, mask=None, rel_bias_tokens: Optional[int] = None,
240
+ kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: bool = False):
241
+ q = self._proj(self.q(x))
242
+ k_new = self._proj(self.k(x))
243
+ v_new = self._proj(self.v(x))
244
+ if kv_cache is None:
245
+ k, v = k_new, v_new
246
+ else:
247
+ k, v = kv_cache
248
+ if use_cache:
249
+ k = torch.cat([k, k_new], dim=2)
250
+ v = torch.cat([v, v_new], dim=2)
251
+ att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
252
+ if q.size(2) == k.size(2):
253
+ if self.use_relpos and rel_bias_tokens is not None:
254
+ att = att + alibi_bias(self.h, rel_bias_tokens)
255
+ if mask is not None:
256
+ att = att + mask
257
+ z = (att.softmax(-1) @ v).transpose(1, 2)
258
+ z = z.reshape(x.size(0), x.size(1), -1)
259
+ out = self.drop(self.proj(z))
260
+ return (out, (k, v)) if use_cache else out
261
+
262
+ class Block(nn.Module):
263
+ def __init__(self, d: int, h: int, r: int):
264
+ super().__init__()
265
+ self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
266
+ self.mha = LowRankMHA(d, h, r, use_relpos=True)
267
+ self.ff = nn.Sequential(nn.Linear(d, 4 * d), nn.ReLU(), nn.Linear(4 * d, d))
268
+
269
+ def forward(self, x, mask, kv=None, use_cache: bool = False):
270
+ n = x.size(1)
271
+ if use_cache:
272
+ y, new_kv = self.mha(self.ln1(x), mask, rel_bias_tokens=n if mask is not None else None, kv_cache=kv, use_cache=True)
273
+ x = x + y
274
+ x = x + self.ff(self.ln2(x))
275
+ return x, new_kv
276
+ else:
277
+ x = x + self.mha(self.ln1(x), mask, rel_bias_tokens=n)
278
+ return x + self.ff(self.ln2(x))
279
+
280
+ class Encoder(nn.Module):
281
+ def __init__(self, cfg: Dict[str, int]):
282
+ super().__init__()
283
+ d, l, h, r = cfg["d"], cfg["layers"], cfg["heads"], cfg["rank"]
284
+ self.emb = nn.Embedding(VOCAB, d)
285
+ self.blocks = nn.ModuleList([Block(d, h, r) for _ in range(l)])
286
+ self.ln = nn.LayerNorm(d)
287
+
288
+ def forward(self, ids, mask, kv_caches: Optional[List[Optional[Tuple[torch.Tensor, torch.Tensor]]]] = None, use_cache: bool = False):
289
+ x = self.emb(ids)
290
+ if not use_cache:
291
+ for blk in self.blocks:
292
+ x = blk(x, mask)
293
+ return self.ln(x)
294
+ new_kvs: List[Tuple[torch.Tensor, torch.Tensor]] = []
295
+ for i, blk in enumerate(self.blocks):
296
+ kv = kv_caches[i] if (kv_caches is not None) else None
297
+ x, kv_out = blk(x, mask, kv, use_cache=True)
298
+ new_kvs.append(kv_out)
299
+ return self.ln(x), new_kvs
300
+
301
+ class ARHead(nn.Module):
302
+ def __init__(self, d): super().__init__(); self.proj = nn.Linear(d, VOCAB)
303
+ def forward(self, h): return self.proj(h)
304
+
305
+ class NATHead(nn.Module):
306
+ def __init__(self, d): super().__init__(); self.proj = nn.Linear(d, VOCAB)
307
+ def forward(self, h): return self.proj(h)
308
+
309
+ class SATHead(nn.Module):
310
+ def __init__(self, d, mode="var"):
311
+ super().__init__()
312
+ self.proj = nn.Linear(d, VOCAB)
313
+ self.mode = mode
314
+ self.gate = nn.Linear(d, 2) if mode == "var" else None
315
+ def forward(self, h_last):
316
+ logits = self.proj(h_last)
317
+ gate = self.gate(h_last[:, 0]) if self.gate is not None else None
318
+ return logits, gate
319
+
320
+ # ───────────────────────── Masks ─────────────────────────
321
+ def causal_mask(n):
322
+ m = torch.full((1, 1, n, n), float("-inf"), device=DEV)
323
+ return torch.triu(m, 1)
324
+
325
+ def sat_mask(n, block=SAT_BLOCK):
326
+ idx = torch.arange(n, device=DEV)
327
+ grp = idx.unsqueeze(0) // block
328
+ allow = (grp.T == grp) | (grp.T > grp)
329
+ return torch.where(allow, 0.0, float("-inf")).unsqueeze(0).unsqueeze(0)
330
+
331
+ # ───────────────────────── Checkpoint helpers ─────────────────────────
332
+ def save_ckpt(path: pathlib.Path, core: nn.Module, ar_h: nn.Module, nat_h: nn.Module, sat_h: nn.Module,
333
+ opt: torch.optim.Optimizer, scaler: GradScaler, meta: Dict[str, Any]):
334
+ path.parent.mkdir(exist_ok=True, parents=True)
335
+ tmp = path.with_suffix(path.suffix + ".tmp")
336
+ state = {
337
+ "core": core.state_dict(),
338
+ "ar": ar_h.state_dict(),
339
+ "nat": nat_h.state_dict(),
340
+ "sat": sat_h.state_dict(),
341
+ "opt": opt.state_dict(),
342
+ "scaler": scaler.state_dict(),
343
+ "cfg": meta.get("cfg"),
344
+ "tokenizer_id": TOKENIZER_ID,
345
+ **{k: v for k, v in meta.items() if k != "cfg"},
346
+ }
347
+ torch.save(state, tmp, _use_new_zipfile_serialization=False)
348
+ tmp.replace(path)
349
+ (path.parent / "latest.json").write_text(json.dumps({"path": str(path), "step": meta["step"]}))
350
+ print(f"\nβœ“ saved checkpoint {path.name}")
351
+
352
+ def load_ckpt(path: pathlib.Path, core: nn.Module, ar_h: nn.Module, nat_h: nn.Module, sat_h: nn.Module,
353
+ opt: torch.optim.Optimizer, scaler: GradScaler):
354
+ """
355
+ Load a full training state from a checkpoint file or directory.
356
+ Returns (step, seen_tok, wall_time)
357
+ """
358
+ p = _resolve_ckpt(path) or path
359
+ ck = _try_load(p, map_location="cpu")
360
+ if ck is None:
361
+ raise FileNotFoundError(f"No valid checkpoint at {p}")
362
+ # core
363
+ if "core" in ck: core.load_state_dict(ck["core"])
364
+ if "ar" in ck: ar_h.load_state_dict(ck["ar"])
365
+ if "nat" in ck: nat_h.load_state_dict(ck["nat"])
366
+ if "sat" in ck: sat_h.load_state_dict(ck["sat"])
367
+ # opt/scaler can be missing if you saved partials; load best-effort
368
+ try:
369
+ if "opt" in ck: opt.load_state_dict(ck["opt"])
370
+ except Exception as e:
371
+ print(f"[resume] optimizer load skipped: {e}")
372
+ try:
373
+ if "scaler" in ck: scaler.load_state_dict(ck["scaler"])
374
+ except Exception as e:
375
+ print(f"[resume] scaler load skipped: {e}")
376
+ return ck.get("step", 0), ck.get("seen_tok", 0), ck.get("wall_time", time.time())
377
+
378
+ def _safe_load_any(path: pathlib.Path, tgt: nn.Module, key: str | None = None, rename: str | None = None):
379
+ p = _resolve_ckpt(path) or path
380
+ if not p.exists(): return 0
381
+ ck = _try_load(p, map_location="cpu")
382
+ if ck is None: return 0
383
+ sd = ck.get(key, ck) if key else ck
384
+ if isinstance(sd, dict) and "state_dict" in sd:
385
+ sd = sd["state_dict"]
386
+ if rename:
387
+ sd = {k.replace(rename, "proj."): v for k, v in sd.items() if rename in k}
388
+ tgt_sd = tgt.state_dict()
389
+ filt = {k: v for k, v in sd.items() if k in tgt_sd and v.shape == tgt_sd[k].shape}
390
+ if filt:
391
+ tgt.load_state_dict(filt, strict=False)
392
+ return len(filt)
393
+
394
+ def infer_cfg_from_ckpt(path: pathlib.Path):
395
+ p = _resolve_ckpt(path) or path
396
+ if not p.exists(): return None
397
+ sd = _try_load(p, map_location="cpu")
398
+ if sd is None: return None
399
+ if isinstance(sd, dict) and "cfg" in sd and isinstance(sd["cfg"], dict):
400
+ return dict(sd["cfg"])
401
+ core = sd.get("core")
402
+ if core is None: return None
403
+ emb_w = core.get("emb.weight")
404
+ if emb_w is None: return None
405
+ d = emb_w.shape[1]
406
+ layer_ids = []
407
+ for k in core.keys():
408
+ if k.startswith("blocks."):
409
+ parts = k.split(".")
410
+ if len(parts) > 2 and parts[1].isdigit():
411
+ layer_ids.append(int(parts[1]))
412
+ layers = (max(layer_ids) + 1) if layer_ids else None
413
+ U = core.get("blocks.0.mha.U")
414
+ heads = rank = None
415
+ if U is not None:
416
+ dk, r = U.shape
417
+ rank = r
418
+ heads = d // dk if dk > 0 else None
419
+ out = {"d": d}
420
+ if layers is not None: out["layers"] = layers
421
+ if heads is not None: out["heads"] = heads
422
+ if rank is not None: out["rank"] = rank
423
+ return out
424
+
425
+ # ───────────────────────── Interrupt handling ─────────────────────────
426
+ def _mark_interrupt(sig_name: str):
427
+ if not _interrupt_flag.is_set():
428
+ _interrupt_reason["sig"] = sig_name
429
+ try:
430
+ _interrupt_reason["trace"] = "".join(traceback.format_stack(limit=5))
431
+ except Exception:
432
+ _interrupt_reason["trace"] = None
433
+ _interrupt_flag.set()
434
+ print(f"\n[interrupt] received {sig_name}; will save an emergency checkpoint and exit...")
435
+
436
+ def _install_signal_handlers():
437
+ def _handler(signum, frame):
438
+ name = {signal.SIGINT: "SIGINT", signal.SIGTERM: "SIGTERM"}.get(signum, f"SIG{signum}")
439
+ _mark_interrupt(name)
440
+ try: signal.signal(signal.SIGINT, _handler)
441
+ except Exception: pass
442
+ try: signal.signal(signal.SIGTERM, _handler)
443
+ except Exception: pass
444
+
445
+ _install_signal_handlers()
446
+
447
+ # ───────────────────────── Train loop ─────────────────────────
448
+ def _parse_grow_plan(s: str) -> List[int]:
449
+ steps = []
450
+ for part in s.split(","):
451
+ part = part.strip()
452
+ if part:
453
+ v = int(part)
454
+ if v >= 128:
455
+ steps.append(v)
456
+ return sorted(set(steps))
457
+
458
+ def _init_save_timers(resume_wall_time: float | None, interval_sec: int) -> Tuple[float, float]:
459
+ now_wall = time.time()
460
+ now_mono = time.monotonic()
461
+ if resume_wall_time is None:
462
+ return now_wall, now_mono
463
+ elapsed_wall = max(0.0, now_wall - resume_wall_time)
464
+ elapsed_clamped = min(float(interval_sec), elapsed_wall)
465
+ return now_wall, now_mono - elapsed_clamped
466
+
467
+ def _emergency_save_if_needed(args, meta_basics, core, ar_h, nat_h, sat_h, opt, scaler):
468
+ global _last_emergency_save_mono
469
+ if not _interrupt_flag.is_set():
470
+ return False
471
+ now = time.monotonic()
472
+ if now - _last_emergency_save_mono < 1.0:
473
+ return True
474
+ _last_emergency_save_mono = now
475
+ out_dir = pathlib.Path(args.save_dir)
476
+ out_path = out_dir / "interrupt.pt"
477
+ meta = {**meta_basics, "interrupt": {"sig": _interrupt_reason.get("sig"), "trace": _interrupt_reason.get("trace"), "wall_time": time.time()}}
478
+ try:
479
+ save_ckpt(out_path, core, ar_h, nat_h, sat_h, opt, scaler, meta)
480
+ print("πŸ›‘ emergency checkpoint written; exiting due to interrupt.")
481
+ except Exception as e:
482
+ print(f"[interrupt-save-failed] {e}")
483
+ return True
484
+
485
+ def train(args):
486
+ cfg = PRESETS[args.preset].copy()
487
+
488
+ # Previous topology probe (unless --fresh)
489
+ if not args.fresh:
490
+ src_probe = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
491
+ prev_cfg = infer_cfg_from_ckpt(src_probe)
492
+ else:
493
+ prev_cfg = None
494
+
495
+ if prev_cfg:
496
+ cfg["d"] = prev_cfg.get("d", cfg["d"])
497
+ if prev_cfg.get("heads"): cfg["heads"] = prev_cfg["heads"]
498
+ if args.rank is None and prev_cfg.get("rank"): cfg["rank"] = prev_cfg["rank"]
499
+ if prev_cfg.get("layers"): cfg["layers"] = prev_cfg["layers"]
500
+ if args.x2 and prev_cfg.get("layers"): cfg["layers"] = max(cfg["layers"], prev_cfg["layers"] * 2)
501
+ if args.rank: cfg["rank"] = args.rank
502
+ if args.x2 and not prev_cfg: cfg["layers"] *= 2
503
+
504
+ BLOCK = args.block or DEFAULT_BLOCK
505
+
506
+ core = Encoder(cfg).to(DEV)
507
+ ar_h, nat_h = ARHead(cfg["d"]).to(DEV), NATHead(cfg["d"]).to(DEV)
508
+ sat_h = SATHead(cfg["d"], mode="var").to(DEV)
509
+
510
+ # Warm start unless --fresh
511
+ loaded = 0
512
+ if not args.fresh:
513
+ src = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
514
+ src = _resolve_ckpt(src)
515
+ if src:
516
+ loaded += _safe_load_any(src, core, key="core")
517
+ loaded += _safe_load_any(src, ar_h, key="ar")
518
+ loaded += _safe_load_any(src, nat_h, key="nat")
519
+ loaded += _safe_load_any(src, sat_h, key="sat")
520
+ if loaded:
521
+ print(f"Warm-start: loaded {loaded} matching tensors from {src}")
522
+
523
+ opt = torch.optim.AdamW(
524
+ [
525
+ {"params": core.parameters(), "lr": LR_CORE},
526
+ {"params": ar_h.parameters(), "lr": LR_HEAD},
527
+ {"params": nat_h.parameters(), "lr": LR_HEAD},
528
+ {"params": sat_h.parameters(), "lr": LR_HEAD},
529
+ ]
530
+ )
531
+ scaler = GradScaler(enabled=(args.amp and DEV.type == "cuda"))
532
+
533
+ ce_tok = nn.CrossEntropyLoss(label_smoothing=0.1)
534
+ ctc = nn.CTCLoss(blank=BLANK, zero_infinity=True)
535
+ ce_gate = nn.CrossEntropyLoss()
536
+
537
+ # ---------- resume bookkeeping ----------
538
+ start_step, seen_tok = 0, 0
539
+ last_save_wall = None
540
+ if args.resume and not args.fresh:
541
+ start_step, seen_tok, last_save_wall = load_ckpt(pathlib.Path(args.resume), core, ar_h, nat_h, sat_h, opt, scaler)
542
+ print(f"βœ“ resumed from step {start_step:,}, seen_tokens={seen_tok:,}")
543
+ last_save_wall, last_save_mono = _init_save_timers(last_save_wall, args.save_every_sec)
544
+
545
+ # Target tokens
546
+ if args.target_tokens:
547
+ target_tokens = args.target_tokens
548
+ else:
549
+ param_count = sum(p.numel() for p in core.parameters())
550
+ target_tokens = int(25 * param_count)
551
+
552
+ new_tokens_needed = target_tokens - seen_tok
553
+ if new_tokens_needed <= 0:
554
+ print("Target already reached – nothing to train.")
555
+ return
556
+ new_steps = new_tokens_needed // BLOCK
557
+ if args.steps:
558
+ new_steps = min(new_steps, args.steps)
559
+ new_tokens_needed = new_steps * BLOCK
560
+
561
+ total_tokens_needed = seen_tok + new_tokens_needed
562
+ print(f"[auto-steps] {new_steps:,} training steps (@ {BLOCK} tokens/step)")
563
+
564
+ # Progressive growth plan
565
+ grow_plan = _parse_grow_plan(args.grow_plan) if args.auto_grow else []
566
+ if args.auto_grow:
567
+ if BLOCK not in grow_plan:
568
+ grow_plan = sorted(set(grow_plan + [BLOCK]))
569
+ print(f"[auto-grow] plan: {grow_plan} every {args.grow_every_steps} steps")
570
+
571
+ stream = token_stream(args.source, target_tokens, seed=42)
572
+ buf: list[int] = []
573
+ pbar = tqdm(total=total_tokens_needed, initial=seen_tok, unit="tok")
574
+ step = start_step
575
+ steps_since_last_grow = 0
576
+
577
+ def _atexit_note():
578
+ if _interrupt_flag.is_set():
579
+ print("[atexit] process exiting after interrupt; latest emergency checkpoint already attempted.")
580
+ atexit.register(_atexit_note)
581
+
582
+ while seen_tok < total_tokens_needed:
583
+ if _emergency_save_if_needed(
584
+ args,
585
+ meta_basics={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(),
586
+ "py_state": random.getstate(), "torch_state": rng_state()},
587
+ core=core, ar_h=ar_h, nat_h=nat_h, sat_h=sat_h, opt=opt, scaler=scaler
588
+ ):
589
+ return
590
+
591
+ try:
592
+ while len(buf) < BLOCK:
593
+ buf.append(next(stream))
594
+ except StopIteration:
595
+ break
596
+ ids = torch.tensor(buf[:BLOCK], device=DEV).unsqueeze(0)
597
+ buf = buf[BLOCK:]
598
+
599
+ tgt_ar = ids.clone()
600
+ ids_nat = torch.repeat_interleave(ids, 2, 1)
601
+
602
+ try:
603
+ with amp(args.amp):
604
+ # AR
605
+ h_ar = core(ids, causal_mask(ids.size(1)))
606
+ logits_ar = ar_h(h_ar)[:, :-1]
607
+ loss_ar = ce_tok(logits_ar.reshape(-1, VOCAB), tgt_ar[:, 1:].reshape(-1))
608
+ # NAT
609
+ h_nat = core(ids_nat, None)
610
+ log_nat = nat_h(h_nat).log_softmax(-1).transpose(0, 1)
611
+ ilen = tlen = torch.tensor([ids_nat.size(1) // 2], device=DEV)
612
+ loss_nat = ctc(log_nat, tgt_ar, ilen, tlen)
613
+ # SAT
614
+ h_sat = core(ids, sat_mask(ids.size(1)))
615
+ logits_sat, gate = sat_h(h_sat[:, -SAT_BLOCK:])
616
+ tgt_sat = ids[:, 1:SAT_BLOCK+1]
617
+ loss_sat = ce_tok(logits_sat.reshape(-1, VOCAB), tgt_sat.reshape(-1))
618
+ if gate is not None:
619
+ loss_sat += EMIT_LAMBDA * ce_gate(gate, torch.ones(ids.size(0), device=DEV, dtype=torch.long))
620
+ loss = loss_ar + loss_nat + loss_sat
621
+
622
+ scaler.scale(loss).backward()
623
+ scaler.unscale_(opt)
624
+ nn.utils.clip_grad_norm_(core.parameters(), 1.0)
625
+ scaler.step(opt)
626
+ scaler.update()
627
+ opt.zero_grad(set_to_none=True)
628
+
629
+ except RuntimeError as e:
630
+ msg = str(e).lower()
631
+ if "out of memory" in msg or "cuda error" in msg:
632
+ new_block = max(128, BLOCK // 2)
633
+ if new_block < BLOCK:
634
+ print(f"\n[OOM] reducing block from {BLOCK} -> {new_block}")
635
+ BLOCK = new_block
636
+ if DEV.type == "cuda":
637
+ torch.cuda.empty_cache()
638
+ buf = ids[0].tolist() + buf
639
+ steps_since_last_grow = 0
640
+ continue
641
+ raise
642
+
643
+ step += 1
644
+ seen_tok += BLOCK
645
+ pbar.update(BLOCK)
646
+ pbar.set_postfix(loss=f"{loss.item():.3f}", block=BLOCK)
647
+
648
+ # time-based checkpoint cadence
649
+ if args.save_every_sec > 0:
650
+ now_mono = time.monotonic()
651
+ if now_mono - last_save_mono >= args.save_every_sec:
652
+ ck_name = f"step{step:08d}.pt"
653
+ save_ckpt(
654
+ pathlib.Path(args.save_dir) / ck_name,
655
+ core, ar_h, nat_h, sat_h, opt, scaler,
656
+ meta={
657
+ "cfg": cfg,
658
+ "step": step,
659
+ "seen_tok": seen_tok,
660
+ "wall_time": time.time(),
661
+ "py_state": random.getstate(),
662
+ "torch_state": rng_state(),
663
+ },
664
+ )
665
+ last_save_mono = now_mono
666
+ last_save_wall = time.time()
667
+
668
+ # progressive growth
669
+ if args.auto_grow:
670
+ steps_since_last_grow += 1
671
+ if steps_since_last_grow >= args.grow_every_steps:
672
+ steps_since_last_grow = 0
673
+ try:
674
+ idx = grow_plan.index(BLOCK)
675
+ if idx + 1 < len(grow_plan):
676
+ candidate = grow_plan[idx + 1]
677
+ print(f"[auto-grow] attempting BLOCK {BLOCK} -> {candidate}")
678
+ BLOCK = candidate
679
+ if DEV.type == "cuda":
680
+ torch.cuda.empty_cache()
681
+ else:
682
+ print("[auto-grow] at max planned block; no further growth.")
683
+ except ValueError:
684
+ grow_plan = sorted(set(grow_plan + [BLOCK]))
685
+ idx = grow_plan.index(BLOCK)
686
+ if idx + 1 < len(grow_plan):
687
+ candidate = grow_plan[idx + 1]
688
+ print(f"[auto-grow] moving to planned BLOCK {candidate}")
689
+ BLOCK = candidate
690
+ if DEV.type == "cuda":
691
+ torch.cuda.empty_cache()
692
+
693
+ pbar.close()
694
+
695
+ if not _interrupt_flag.is_set():
696
+ save_ckpt(
697
+ pathlib.Path(args.save_dir) / "final.pt",
698
+ core, ar_h, nat_h, sat_h, opt, scaler,
699
+ meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(),
700
+ "py_state": random.getstate(), "torch_state": rng_state()}
701
+ )
702
+ print("πŸŽ‰ training complete")
703
+ else:
704
+ print("Ended after interrupt; final save skipped (emergency checkpoint already written).")
705
+
706
+ # ───────────────────────── Sampling utils ─────────────────────────
707
+ def _apply_no_repeat_ngram(logits: torch.Tensor, ids: torch.Tensor, n: int):
708
+ if n <= 0 or ids.size(1) < n - 1: return logits
709
+ prefix = ids[0, - (n - 1):].tolist()
710
+ banned = []
711
+ tokens = ids[0].tolist()
712
+ for i in range(len(tokens) - n + 1):
713
+ if tokens[i:i + n - 1] == prefix:
714
+ banned.append(tokens[i + n - 1])
715
+ if banned:
716
+ banned_idx = torch.tensor(banned, device=logits.device, dtype=torch.long)
717
+ logits[..., banned_idx] = float("-inf")
718
+ return logits
719
+
720
+ def _apply_rep_presence_frequency(logits: torch.Tensor, ids: torch.Tensor, last_n: int,
721
+ repetition_penalty: float, presence_penalty: float, frequency_penalty: float):
722
+ if ids.numel() == 0: return logits
723
+ hist = ids[0, -last_n:].to(torch.long) if last_n > 0 else ids[0].to(torch.long)
724
+ if hist.numel() == 0: return logits
725
+ uniq, counts = torch.unique(hist, return_counts=True)
726
+ if presence_penalty != 0.0 or frequency_penalty != 0.0:
727
+ adjust = presence_penalty + frequency_penalty * counts.to(logits.dtype)
728
+ logits[..., uniq] = logits[..., uniq] - adjust
729
+ if repetition_penalty and abs(repetition_penalty - 1.0) > 1e-6:
730
+ sel = logits[..., uniq]
731
+ sel = torch.where(sel > 0, sel / repetition_penalty, sel * repetition_penalty)
732
+ logits[..., uniq] = sel
733
+ return logits
734
+
735
+ def _filter_top_k_top_p_min_p(logits: torch.Tensor, top_k: int, top_p: float, min_p: float, temperature: float) -> torch.Tensor:
736
+ logits = logits / max(temperature, 1e-8)
737
+ if logits.dim() == 1:
738
+ logits = logits.unsqueeze(0)
739
+ B, V = logits.size(0), logits.size(-1)
740
+ probs = logits.softmax(-1)
741
+ if top_k and top_k < V:
742
+ _, idx = torch.topk(probs, top_k, dim=-1)
743
+ mask = torch.full_like(probs, 0.0)
744
+ mask.scatter_((1 if probs.dim() > 1 else 0), idx, 1.0)
745
+ probs = probs * mask
746
+ if top_p < 1.0:
747
+ sorted_probs, sorted_idx = torch.sort(probs, descending=True, dim=-1)
748
+ cumsum = torch.cumsum(sorted_probs, dim=-1)
749
+ keep = cumsum <= top_p
750
+ keep[..., 0] = True
751
+ mask = torch.zeros_like(probs)
752
+ mask.scatter_(1, sorted_idx, keep.to(mask.dtype))
753
+ probs = probs * mask
754
+ if min_p > 0.0:
755
+ probs = torch.where(probs >= min_p, probs, torch.zeros_like(probs))
756
+ sums = probs.sum(-1, keepdim=True)
757
+ empty = (sums == 0)
758
+ if empty.any():
759
+ fallback_idx = logits.argmax(-1, keepdim=True)
760
+ probs = torch.where(empty, torch.zeros_like(probs), probs)
761
+ probs.scatter_(-1, fallback_idx, torch.where(empty, torch.ones_like(sums), torch.zeros_like(sums)))
762
+ probs = probs / probs.sum(-1, keepdim=True)
763
+ return probs
764
+
765
+ # ───────────────────────── Inference helpers ─────────────────────────
766
+ def load_joint(ckpt: str, preset: str):
767
+ path = _resolve_ckpt(pathlib.Path(ckpt)) or pathlib.Path(ckpt)
768
+ sd = _try_load(path, map_location="cpu")
769
+ if sd is None:
770
+ raise FileNotFoundError(f"No valid checkpoint at {path}")
771
+ cfg = sd["cfg"] if "cfg" in sd and isinstance(sd["cfg"], dict) else (infer_cfg_from_ckpt(path) or PRESETS[preset])
772
+ core = Encoder(cfg).to(DEV)
773
+ ar_h, nat_h = ARHead(cfg["d"]).to(DEV), NATHead(cfg["d"]).to(DEV)
774
+ sat_h = SATHead(cfg["d"]).to(DEV)
775
+ core.load_state_dict(sd["core"])
776
+ ar_h.load_state_dict(sd["ar"])
777
+ nat_h.load_state_dict(sd["nat"])
778
+ sat_h.load_state_dict(sd["sat"])
779
+ return core, ar_h, nat_h, sat_h
780
+
781
+ def _decode_split(ids: torch.Tensor, prompt_len: int) -> Tuple[str, str]:
782
+ """Decode first prompt_len tokens separately from the rest."""
783
+ prefix_ids = ids[0, :prompt_len].tolist() if prompt_len > 0 else []
784
+ suffix_ids = ids[0, prompt_len:].tolist()
785
+ prefix_text = tok.decode(prefix_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
786
+ suffix_text = tok.decode(suffix_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
787
+ return prefix_text, suffix_text
788
+
789
+ @torch.no_grad()
790
+ def ar_decode(core, ar_h, prompt: str, max_new: int, T: float,
791
+ greedy: bool, top_k: int, top_p: float, min_p: float,
792
+ repetition_penalty: float, presence_penalty: float,
793
+ frequency_penalty: float, penalty_last_n: int,
794
+ no_repeat_ngram_size: int, prompt_color: str):
795
+ prompt_ids = tok.encode(prompt)
796
+ prompt_len = len(prompt_ids)
797
+ ids = torch.tensor([prompt_ids if prompt_len > 0 else ([EOS] if EOS is not None else [0])], device=DEV)
798
+ h_full, kvs = core(ids, causal_mask(ids.size(1)), use_cache=True)
799
+ t0 = time.time()
800
+ for _ in range(max_new):
801
+ logits = ar_h(h_full)[:, -1]
802
+ logits = _apply_no_repeat_ngram(logits, ids, no_repeat_ngram_size)
803
+ logits = _apply_rep_presence_frequency(logits, ids, penalty_last_n, repetition_penalty, presence_penalty, frequency_penalty)
804
+ if greedy:
805
+ nxt = logits.argmax(-1, keepdim=True)
806
+ else:
807
+ probs = _filter_top_k_top_p_min_p(logits.squeeze(0), top_k, top_p, min_p, T)
808
+ nxt = probs.multinomial(1)
809
+ ids = torch.cat([ids, nxt.unsqueeze(0) if nxt.dim()==1 else nxt], 1)
810
+ x = ids[:, -1:]
811
+ h_full, kvs = core(x, None, kv_caches=kvs, use_cache=True)
812
+ pre, post = _decode_split(ids, prompt_len)
813
+ print(_colorize_prompt_then_reset(pre, post, prompt_color))
814
+ print(f"[{max_new} tok in {time.time() - t0:.2f}s]")
815
+
816
+ @torch.no_grad()
817
+ def sat_decode(core, sat_h, prompt, max_new, T, var,
818
+ greedy: bool, top_k: int, top_p: float, min_p: float,
819
+ repetition_penalty: float, presence_penalty: float,
820
+ frequency_penalty: float, penalty_last_n: int,
821
+ no_repeat_ngram_size: int, prompt_color: str):
822
+ prompt_ids = tok.encode(prompt)
823
+ prompt_len = len(prompt_ids)
824
+ ids = torch.tensor([prompt_ids], device=DEV)
825
+ added, t0 = 0, time.time()
826
+ while added < max_new:
827
+ h = core(ids, sat_mask(ids.size(1)))
828
+ logits_all, gate = sat_h(h[:, -SAT_BLOCK:])
829
+ stride = 2 if (not var or gate is None) else (gate.softmax(-1).multinomial(1) + 1).item()
830
+ stride = int(stride)
831
+ for pos in range(stride):
832
+ row_logits = logits_all[:, pos, :]
833
+ row_logits = _apply_no_repeat_ngram(row_logits, ids, no_repeat_ngram_size)
834
+ row_logits = _apply_rep_presence_frequency(row_logits, ids, penalty_last_n, repetition_penalty, presence_penalty, frequency_penalty)
835
+ if greedy:
836
+ nxt = row_logits.argmax(-1, keepdim=True)
837
+ else:
838
+ probs = _filter_top_k_top_p_min_p(row_logits.squeeze(0), top_k, top_p, min_p, T)
839
+ nxt = probs.multinomial(1)
840
+ ids = torch.cat([ids, nxt], 1)
841
+ added += 1
842
+ if added >= max_new:
843
+ break
844
+ pre, post = _decode_split(ids, prompt_len)
845
+ print(_colorize_prompt_then_reset(pre, post, prompt_color))
846
+ print(f"[{added} tok in {time.time() - t0:.2f}s]")
847
+
848
+ @torch.no_grad()
849
+ def nat_decode(core, nat_h, prompt, max_new, passes, streams, prompt_color: str):
850
+ prompt_ids = tok.encode(prompt)
851
+ prompt_len = len(prompt_ids)
852
+ ids = torch.tensor([prompt_ids + [BLANK] * (max_new * 2)], device=DEV)
853
+ t0 = time.time()
854
+ for _ in range(passes):
855
+ h = core(ids, None)
856
+ logits = nat_h(h)
857
+ logits[..., BLANK] = -1e9
858
+ cand = logits.topk(streams, -1).indices.permute(2, 0, 1)
859
+ best = (cand != BLANK).float().mean(-1).argmax(0)
860
+ ids = cand[best, torch.arange(ids.size(0), device=DEV)][:, ::2]
861
+ out = [t for t in ids[0].tolist() if t != BLANK]
862
+ # Colorize the first prompt_len tokens (even if NAT rewrites them)
863
+ pre_text = tok.decode(out[:prompt_len], skip_special_tokens=True, clean_up_tokenization_spaces=False)
864
+ post_text = tok.decode(out[prompt_len:], skip_special_tokens=True, clean_up_tokenization_spaces=False)
865
+ print(_colorize_prompt_then_reset(pre_text, post_text, prompt_color))
866
+ print(f"[{len(out)} output tokens in {time.time() - t0:.2f}s]")
867
+
868
+ # ───────────────────────── CLI ─────────────────────────
869
+ def main():
870
+ ap = argparse.ArgumentParser()
871
+ sub = ap.add_subparsers(dest="cmd", required=True)
872
+
873
+ tr = sub.add_parser("train")
874
+ tr.add_argument("--preset", choices=PRESETS, default="small")
875
+ tr.add_argument("--rank", type=int)
876
+ tr.add_argument("--block", type=int, default=DEFAULT_BLOCK)
877
+ tr.add_argument("--source", default="cerebras/SlimPajama-627B")
878
+ tr.add_argument("--target_tokens", type=int)
879
+ tr.add_argument("--steps", type=int)
880
+ tr.add_argument("--amp", action="store_true")
881
+ tr.add_argument("--save_every_sec", type=int, default=DEFAULT_SAVE_SEC)
882
+ tr.add_argument("--save_dir", default=str(CKDIR))
883
+ tr.add_argument("--resume", type=str)
884
+ tr.add_argument("--x2", action="store_true", help="~2x params by doubling layers")
885
+ tr.add_argument("--warmstart_from", type=str, default=None, help="Path to previous final.pt for shape-safe warm start")
886
+ tr.add_argument("--fresh", action="store_true", help="Start from scratch: do not probe or load any checkpoints")
887
+
888
+ # Progressive block growth
889
+ tr.add_argument("--auto_grow", action="store_true", help="Automatically grow block size over time")
890
+ tr.add_argument("--grow_plan", type=str, default="576,640,768,896,1024", help="Comma list of block sizes to try in order")
891
+ tr.add_argument("--grow_every_steps", type=int, default=50000, help="Steps between growth attempts")
892
+
893
+ inf = sub.add_parser("infer")
894
+ inf.add_argument("--mode", choices=["ar", "nat", "sat"], required=True)
895
+ inf.add_argument("--ckpt", required=True)
896
+ inf.add_argument("--preset", default="small")
897
+ inf.add_argument("--prompt", required=True)
898
+ inf.add_argument("--max_new", type=int, default=120)
899
+ inf.add_argument("--temperature", type=float, default=1.0)
900
+
901
+ # New decode controls
902
+ inf.add_argument("--greedy", action="store_true", help="Greedy decode (overrides sampling)")
903
+ inf.add_argument("--top_k", type=int, default=0)
904
+ inf.add_argument("--top_p", type=float, default=1.0)
905
+ inf.add_argument("--min_p", type=float, default=0.0)
906
+
907
+ inf.add_argument("--repetition_penalty", type=float, default=1.0)
908
+ inf.add_argument("--presence_penalty", type=float, default=0.0)
909
+ inf.add_argument("--frequency_penalty", type=float, default=0.0)
910
+ inf.add_argument("--penalty_last_n", type=int, default=64)
911
+ inf.add_argument("--no_repeat_ngram_size", type=int, default=0)
912
+
913
+ inf.add_argument("--var", action="store_true")
914
+ inf.add_argument("--passes", type=int, default=1)
915
+ inf.add_argument("--streams", type=int, default=5)
916
+
917
+ # Output coloring: prompt only
918
+ inf.add_argument(
919
+ "--prompt_color",
920
+ type=str,
921
+ default="default",
922
+ help="Color for the prompt text in output. Named (e.g. cyan, bright_magenta), "
923
+ "hex (#RRGGBB), or rgb(R,G,B). Use 'none' to disable. Default is 'cyan'."
924
+ )
925
+
926
+ args = ap.parse_args()
927
+ if args.cmd == "train":
928
+ train(args)
929
+ else:
930
+ core, ar_h, nat_h, sat_h = load_joint(args.ckpt, args.preset)
931
+ pr_color = _normalize_color_name(args.prompt_color)
932
+ if args.mode == "ar":
933
+ ar_decode(core, ar_h, args.prompt, args.max_new, args.temperature,
934
+ args.greedy, args.top_k, args.top_p, args.min_p,
935
+ args.repetition_penalty, args.presence_penalty,
936
+ args.frequency_penalty, args.penalty_last_n,
937
+ args.no_repeat_ngram_size, pr_color)
938
+ elif args.mode == "sat":
939
+ sat_decode(core, sat_h, args.prompt, args.max_new, args.temperature, args.var,
940
+ args.greedy, args.top_k, args.top_p, args.min_p,
941
+ args.repetition_penalty, args.presence_penalty,
942
+ args.frequency_penalty, args.penalty_last_n,
943
+ args.no_repeat_ngram_size, pr_color)
944
+ else:
945
+ nat_decode(core, nat_h, args.prompt, args.max_new, args.passes, args.streams, pr_color)
946
+
947
+ if __name__ == "__main__":
948
+ main()
step07113398.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cbca7e816d357d257856e8861326d1262a180b68b393f753558492956cda9f4
3
+ size 4388629012