MarxistLeninist commited on
Commit
b930b0f
Β·
verified Β·
1 Parent(s): 867e7a4

Upload apt.py

Browse files
Files changed (1) hide show
  1. apt.py +1093 -0
apt.py ADDED
@@ -0,0 +1,1093 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # 5L.py β€” joint AR+NAT+SAT trainer/decoder (Qwen3 tokenizer)
3
+ # Robust fresh-start, ignores *.pt.tmp, AMP dtype auto, OOM backoff, progressive block growth.
4
+ # Added: repetition/presence/frequency penalties, top-k/top-p/min-p, greedy, no-repeat-ngrams.
5
+ # Fixes: SAT multinomial shape; checkpoint loads on CPU; cfg fallback if ckpt missing cfg.
6
+ # UPDATE: time-based checkpointing only (monotonic), no step-based saving. Resume respects interval.
7
+ # UPDATE2: seed support for train and infer; seeds Python/NumPy/Torch and dataset shuffle.
8
+ # UPDATE3: chat-aware token_stream with role-tag formatting and optional language filter via "dataset:lang"
9
+
10
+ from __future__ import annotations
11
+ import argparse, json, math, pathlib, random, time, os
12
+ from contextlib import nullcontext
13
+ from typing import Dict, Any, List, Optional, Tuple
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ import torch.nn.functional as F
18
+ from datasets import load_dataset
19
+ from transformers import AutoTokenizer, logging as hf_log
20
+ from tqdm.auto import tqdm
21
+
22
+ # ───────────────────────── Globals ─────────────────────────
23
+ hf_log.set_verbosity_error()
24
+ DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
25
+ torch.backends.cuda.matmul.allow_tf32 = True
26
+ try:
27
+ torch.set_float32_matmul_precision("high")
28
+ except Exception:
29
+ pass
30
+
31
+ # Use the Qwen3 tokenizer (can override with env TOKENIZER_ID if needed)
32
+ TOKENIZER_ID = os.environ.get(
33
+ "TOKENIZER_ID",
34
+ "Qwen/Qwen3-235B-A22B-Thinking-2507"
35
+ )
36
+
37
+ # Some Qwen tokenizers require trust_remote_code
38
+ tok = AutoTokenizer.from_pretrained(TOKENIZER_ID, use_fast=True, trust_remote_code=True)
39
+ if tok.pad_token is None:
40
+ tok.add_special_tokens({"pad_token": "[PAD]"})
41
+ VOCAB, BLANK, EOS = (
42
+ max(tok.get_vocab().values()) + 1, # allow new [PAD] if appended
43
+ tok.pad_token_id,
44
+ tok.eos_token_id if tok.eos_token_id is not None else tok.sep_token_id
45
+ )
46
+
47
+ PRESETS: Dict[str, Dict[str, int]] = {
48
+ "small": dict(d=512, layers=8, heads=16, rank=64),
49
+ "smallx2": dict(d=512, layers=16, heads=16, rank=64),
50
+ "base": dict(d=768, layers=12, heads=24, rank=96),
51
+ }
52
+
53
+ # Safe default for 1Γ— Tesla P40; override with --block
54
+ DEFAULT_BLOCK = 576
55
+ SAT_BLOCK = 2
56
+ LR_CORE, LR_HEAD = 5e-5, 2e-4
57
+ EMIT_LAMBDA = 0.1
58
+ # Default interval: 24 hours. Override with --save_every_sec (e.g., 86400).
59
+ DEFAULT_SAVE_SEC = 24 * 3600
60
+ CKDIR = pathlib.Path("ckpts_joint")
61
+
62
+
63
+ # ───────────────────────── Utilities ─────────────────────────
64
+ def set_seed(seed: Optional[int]) -> None:
65
+ """
66
+ Best-effort reproducibility. Seeds Python/NumPy/Torch and toggles CuDNN to deterministic.
67
+ If seed is None, does nothing.
68
+ """
69
+ if seed is None:
70
+ return
71
+ os.environ["PYTHONHASHSEED"] = str(seed)
72
+ import numpy as np
73
+ random.seed(seed)
74
+ np.random.seed(seed)
75
+ torch.manual_seed(seed)
76
+ if torch.cuda.is_available():
77
+ torch.cuda.manual_seed_all(seed)
78
+ try:
79
+ torch.backends.cudnn.deterministic = True
80
+ torch.backends.cudnn.benchmark = False
81
+ except Exception:
82
+ pass
83
+
84
+
85
+ def rng_state():
86
+ if DEV.type == "cuda":
87
+ try:
88
+ return torch.cuda.get_rng_state(DEV)
89
+ except TypeError:
90
+ return torch.cuda.get_rng_state()
91
+ return torch.get_rng_state()
92
+
93
+
94
+ def _is_probably_ckpt(path: pathlib.Path) -> bool:
95
+ try:
96
+ return path.is_file() and path.suffix == ".pt" and not path.name.endswith(".pt.tmp") and path.stat().st_size > (1<<20)
97
+ except Exception:
98
+ return False
99
+
100
+
101
+ def _resolve_ckpt(path: pathlib.Path) -> pathlib.Path | None:
102
+ """
103
+ Return a solid .pt (never .tmp). If 'path' is dir, pick newest *.pt.
104
+ If not usable, return None.
105
+ """
106
+ try:
107
+ if path.is_dir():
108
+ cands = sorted([p for p in path.glob("*.pt") if _is_probably_ckpt(p)],
109
+ key=lambda p: p.stat().st_mtime, reverse=True)
110
+ return cands[0] if cands else None
111
+ if path.suffix == ".tmp":
112
+ solid = path.with_suffix("")
113
+ return solid if _is_probably_ckpt(solid) else _resolve_ckpt(path.parent)
114
+ return path if _is_probably_ckpt(path) else _resolve_ckpt(path.parent)
115
+ except Exception:
116
+ return None
117
+
118
+
119
+ def _try_load(path: pathlib.Path, map_location="cpu"):
120
+ """
121
+ Always load on CPU to avoid CUDA fragmentation/OOM during torch.load.
122
+ """
123
+ try:
124
+ return torch.load(path, map_location="cpu")
125
+ except Exception as e:
126
+ print(f"[ckpt-skip] {path} not usable: {e}")
127
+ return None
128
+
129
+
130
+ # ───────────────────────── AMP helper ────────────────���────────
131
+ try:
132
+ from torch.amp import autocast as _ac, GradScaler
133
+ except ImportError:
134
+ from torch.cuda.amp import autocast as _ac, GradScaler
135
+
136
+ def _auto_amp_dtype():
137
+ if DEV.type == "cuda":
138
+ try:
139
+ if torch.cuda.is_bf16_supported():
140
+ return torch.bfloat16
141
+ return torch.float16
142
+ except Exception:
143
+ return torch.float16
144
+ return torch.float32
145
+
146
+ def amp(enabled: bool):
147
+ # Only enable if explicitly requested AND CUDA is available
148
+ return nullcontext() if not (enabled and DEV.type == "cuda") else _ac(device_type="cuda", dtype=_auto_amp_dtype())
149
+
150
+
151
+ # ───────────────────────── Data stream (chat-aware) ─────────────────────────
152
+ def _extract_text(ex: dict) -> Optional[str]:
153
+ """
154
+ Extracts chat-style text from common schemas:
155
+ - {"messages": [{"role":"user/assistant/system", "content": "..."}]}
156
+ - {"prompt": "...", "response"/"output": "..."} -> stitched as [USER]/[ASSISTANT]
157
+ - {"instruction": "...", "input": "...", "output": "..."} (Alpaca-style)
158
+ - {"role": "...", "text": "..."} -> one turn with role tag
159
+ - plain {"text": "..."} fallback
160
+ Returns a single transcript string or None if nothing usable.
161
+ """
162
+ # 1) messages list
163
+ msgs = ex.get("messages") or ex.get("conversation") or ex.get("conversations")
164
+ if isinstance(msgs, list) and msgs:
165
+ parts: List[str] = []
166
+ for m in msgs:
167
+ if not isinstance(m, dict):
168
+ continue
169
+ role = (m.get("role") or m.get("speaker") or "").strip().upper()
170
+ content = (m.get("content") or m.get("text") or m.get("value") or "").strip()
171
+ if content:
172
+ tag = "ASSISTANT" if role in {"ASSISTANT", "BOT", "AI"} else ("USER" if role in {"USER", "HUMAN"} else (role or "USER"))
173
+ parts.append(f"[{tag}]: {content}")
174
+ if parts:
175
+ return "\n".join(parts)
176
+
177
+ # 2) prompt + response/output
178
+ prompt = ex.get("prompt") or ex.get("question") or ex.get("instruction")
179
+ resp = ex.get("response") or ex.get("output") or ex.get("answer")
180
+ if isinstance(prompt, str) and isinstance(resp, str):
181
+ u = prompt.strip()
182
+ a = resp.strip()
183
+ if u and a:
184
+ inp = ex.get("input")
185
+ if isinstance(inp, str) and inp.strip():
186
+ u = u + ("\n\n" + inp.strip())
187
+ return f"[USER]: {u}\n[ASSISTANT]: {a}"
188
+
189
+ # 3) role + text single turn
190
+ role = ex.get("role")
191
+ text = ex.get("text")
192
+ if isinstance(text, str) and text.strip():
193
+ if isinstance(role, str) and role.strip():
194
+ tag = role.strip().upper()
195
+ if tag not in {"USER", "ASSISTANT", "SYSTEM"}:
196
+ # Map common aliases
197
+ if tag in {"HUMAN"}: tag = "USER"
198
+ elif tag in {"BOT", "AI"}: tag = "ASSISTANT"
199
+ return f"[{tag}]: {text.strip()}"
200
+ return text.strip()
201
+
202
+ # 4) generic fallbacks
203
+ for k in ("content", "output_text"):
204
+ v = ex.get(k)
205
+ if isinstance(v, str) and v.strip():
206
+ return v.strip()
207
+
208
+ return None
209
+
210
+
211
+ def token_stream(ds_name: str, target: int, seed: int = 42):
212
+ """
213
+ Streams tokens from HF datasets. Works for plain-text corpora AND chat datasets.
214
+
215
+ Optional language filter via suffix:
216
+ - "OpenAssistant/oasst1:en" keeps only examples with ex['lang'] == 'en'
217
+ - "OpenAssistant/oasst1" uses all languages if present
218
+
219
+ We format multi-turn as
220
+ [USER]: ...
221
+ [ASSISTANT]: ...
222
+ and ensure an EOS between records.
223
+ """
224
+ if ":" in ds_name:
225
+ base, lang = ds_name.split(":", 1)
226
+ else:
227
+ base, lang = ds_name, None
228
+
229
+ ds = load_dataset(base, split="train", streaming=True)
230
+ ds = ds.shuffle(buffer_size=10_000, seed=seed)
231
+
232
+ if lang:
233
+ try:
234
+ ds = ds.filter(lambda ex: ex.get("lang", "") == lang)
235
+ except Exception:
236
+ pass # not all datasets define 'lang'
237
+
238
+ emitted = 0
239
+ for ex in ds:
240
+ txt = _extract_text(ex)
241
+ if txt is None:
242
+ # last-chance fallback
243
+ raw = ex.get("text")
244
+ if not isinstance(raw, str) or not raw.strip():
245
+ continue
246
+ txt = raw.strip()
247
+
248
+ enc = tok.encode(txt)
249
+ # ensure EOS between records
250
+ if EOS is not None and (len(enc) == 0 or enc[-1] != EOS):
251
+ enc = enc + [EOS]
252
+
253
+ for t in enc:
254
+ yield t
255
+ emitted += 1
256
+ if emitted >= target:
257
+ return
258
+
259
+
260
+ # ───────────────────────── Relative positional bias (ALiBi) ─────────────────────────
261
+ def _alibi_slopes(n_heads: int):
262
+ import math
263
+ def pow2slopes(n):
264
+ start = 2 ** (-2 ** -(math.log2(n) - 3))
265
+ ratio = start
266
+ return [start * (ratio ** i) for i in range(n)]
267
+ if math.log2(n_heads).is_integer():
268
+ vals = pow2slopes(n_heads)
269
+ else:
270
+ closest = 2 ** math.floor(math.log2(n_heads))
271
+ vals = pow2slopes(closest)
272
+ extra = pow2slopes(2 * closest)
273
+ vals += extra[0::2][: n_heads - closest]
274
+ return torch.tensor(vals, device=DEV).view(1, n_heads, 1, 1)
275
+
276
+ def alibi_bias(n_heads: int, n_tokens: int):
277
+ i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
278
+ j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
279
+ dist = (j - i).clamp_min(0) # only penalize future
280
+ slopes = _alibi_slopes(n_heads)
281
+ return -slopes * dist
282
+
283
+
284
+ # ───────────────────────── Model components ─────────────────────────
285
+ class LowRankMHA(nn.Module):
286
+ """
287
+ Cache-aware MHA with low-rank projections; supports kv caching for decode.
288
+ """
289
+ def __init__(self, d: int, h: int, r: int, use_relpos: bool = True):
290
+ super().__init__()
291
+ assert d % h == 0, "d must be divisible by number of heads"
292
+ self.h, self.dk = h, d // h
293
+ self.use_relpos = use_relpos
294
+ self.q = nn.Linear(d, d, bias=False)
295
+ self.k = nn.Linear(d, d, bias=False)
296
+ self.v = nn.Linear(d, d, bias=False)
297
+ self.U = nn.Parameter(torch.randn(self.dk, r))
298
+ nn.init.orthogonal_(self.U)
299
+ self.proj = nn.Linear(h * r, d, bias=False)
300
+ self.drop = nn.Dropout(0.1)
301
+
302
+ def _proj(self, x):
303
+ B, N, _ = x.shape
304
+ return (x.view(B, N, self.h, self.dk).transpose(1, 2) @ self.U)
305
+
306
+ def forward(
307
+ self,
308
+ x: torch.Tensor,
309
+ mask: Optional[torch.Tensor] = None,
310
+ rel_bias_tokens: Optional[int] = None,
311
+ kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
312
+ use_cache: bool = False,
313
+ ):
314
+ q = self._proj(self.q(x))
315
+ k_new = self._proj(self.k(x))
316
+ v_new = self._proj(self.v(x))
317
+
318
+ if kv_cache is None:
319
+ k, v = k_new, v_new
320
+ else:
321
+ k, v = kv_cache
322
+ if use_cache:
323
+ k = torch.cat([k, k_new], dim=2)
324
+ v = torch.cat([v, v_new], dim=2)
325
+
326
+ att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
327
+
328
+ if q.size(2) == k.size(2):
329
+ if self.use_relpos and rel_bias_tokens is not None:
330
+ att = att + alibi_bias(self.h, rel_bias_tokens)
331
+ if mask is not None:
332
+ att = att + mask
333
+
334
+ z = (att.softmax(-1) @ v).transpose(1, 2) # (B,Nq,h,r)
335
+ z = z.reshape(x.size(0), x.size(1), -1)
336
+ out = self.drop(self.proj(z))
337
+ return (out, (k, v)) if use_cache else out
338
+
339
+
340
+ class Block(nn.Module):
341
+ def __init__(self, d: int, h: int, r: int):
342
+ super().__init__()
343
+ self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
344
+ self.mha = LowRankMHA(d, h, r, use_relpos=True)
345
+ self.ff = nn.Sequential(nn.Linear(d, 4 * d), nn.ReLU(), nn.Linear(4 * d, d))
346
+
347
+ def forward(
348
+ self,
349
+ x: torch.Tensor,
350
+ mask: Optional[torch.Tensor],
351
+ kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
352
+ use_cache: bool = False
353
+ ):
354
+ n = x.size(1)
355
+ if use_cache:
356
+ y, new_kv = self.mha(self.ln1(x), mask, rel_bias_tokens=n if mask is not None else None, kv_cache=kv, use_cache=True)
357
+ x = x + y
358
+ x = x + self.ff(self.ln2(x))
359
+ return x, new_kv
360
+ else:
361
+ x = x + self.mha(self.ln1(x), mask, rel_bias_tokens=n)
362
+ return x + self.ff(self.ln2(x))
363
+
364
+
365
+ class Encoder(nn.Module):
366
+ """
367
+ Transformer encoder with optional kv caching (for AR/SAT decode).
368
+ """
369
+ def __init__(self, cfg: Dict[str, int]):
370
+ super().__init__()
371
+ d, l, h, r = cfg["d"], cfg["layers"], cfg["heads"], cfg["rank"]
372
+ self.emb = nn.Embedding(VOCAB, d)
373
+ self.blocks = nn.ModuleList([Block(d, h, r) for _ in range(l)])
374
+ self.ln = nn.LayerNorm(d)
375
+
376
+ def forward(
377
+ self,
378
+ ids: torch.Tensor,
379
+ mask: Optional[torch.Tensor],
380
+ kv_caches: Optional[List[Optional[Tuple[torch.Tensor, torch.Tensor]]]] = None,
381
+ use_cache: bool = False
382
+ ):
383
+ x = self.emb(ids)
384
+ if not use_cache:
385
+ for blk in self.blocks:
386
+ x = blk(x, mask)
387
+ return self.ln(x)
388
+
389
+ new_kvs: List[Tuple[torch.Tensor, torch.Tensor]] = []
390
+ for i, blk in enumerate(self.blocks):
391
+ kv = kv_caches[i] if (kv_caches is not None) else None
392
+ x, kv_out = blk(x, mask, kv, use_cache=True)
393
+ new_kvs.append(kv_out)
394
+ return self.ln(x), new_kvs
395
+
396
+
397
+ class ARHead(nn.Module):
398
+ def __init__(self, d):
399
+ super().__init__()
400
+ self.proj = nn.Linear(d, VOCAB)
401
+ def forward(self, h): return self.proj(h)
402
+
403
+
404
+ class NATHead(nn.Module):
405
+ def __init__(self, d):
406
+ super().__init__()
407
+ self.proj = nn.Linear(d, VOCAB)
408
+ def forward(self, h): return self.proj(h)
409
+
410
+
411
+ class SATHead(nn.Module):
412
+ def __init__(self, d, mode="var"):
413
+ super().__init__()
414
+ self.proj = nn.Linear(d, VOCAB)
415
+ self.mode = mode
416
+ self.gate = nn.Linear(d, 2) if mode == "var" else None
417
+ # logits for last SAT_BLOCK tokens, optional gate for stride
418
+ def forward(self, h_last):
419
+ logits = self.proj(h_last)
420
+ gate = self.gate(h_last[:, 0]) if self.gate is not None else None
421
+ return logits, gate
422
+
423
+
424
+ # ───────────────────────── Masks ─────────────────────────
425
+ def causal_mask(n):
426
+ m = torch.full((1, 1, n, n), float("-inf"), device=DEV)
427
+ return torch.triu(m, 1)
428
+
429
+ def sat_mask(n, block=SAT_BLOCK):
430
+ idx = torch.arange(n, device=DEV)
431
+ grp = idx.unsqueeze(0) // block
432
+ allow = (grp.T == grp) | (grp.T > grp)
433
+ return torch.where(allow, 0.0, float("-inf")).unsqueeze(0).unsqueeze(0)
434
+
435
+
436
+ # ───────────────────────── Checkpoint helpers ─────────────────────────
437
+ def save_ckpt(
438
+ path: pathlib.Path,
439
+ core: nn.Module,
440
+ ar_h: nn.Module,
441
+ nat_h: nn.Module,
442
+ sat_h: nn.Module,
443
+ opt: torch.optim.Optimizer,
444
+ scaler: GradScaler,
445
+ meta: Dict[str, Any],
446
+ ):
447
+ path.parent.mkdir(exist_ok=True, parents=True)
448
+ tmp = path.with_suffix(path.suffix + ".tmp")
449
+ state = {
450
+ "core": core.state_dict(),
451
+ "ar": ar_h.state_dict(),
452
+ "nat": nat_h.state_dict(),
453
+ "sat": sat_h.state_dict(),
454
+ "opt": opt.state_dict(),
455
+ "scaler": scaler.state_dict(),
456
+ "cfg": meta.get("cfg"),
457
+ "tokenizer_id": TOKENIZER_ID,
458
+ **{k: v for k, v in meta.items() if k not in {"cfg"}},
459
+ }
460
+ torch.save(state, tmp, _use_new_zipfile_serialization=False)
461
+ tmp.replace(path)
462
+ (path.parent / "latest.json").write_text(json.dumps({"path": str(path), "step": meta["step"]}))
463
+ print(f"\nβœ“ saved checkpoint {path.name}")
464
+
465
+ def load_ckpt(
466
+ path: pathlib.Path,
467
+ core: nn.Module,
468
+ ar_h: nn.Module,
469
+ nat_h: nn.Module,
470
+ sat_h: nn.Module,
471
+ opt: torch.optim.Optimizer,
472
+ scaler: GradScaler,
473
+ ):
474
+ p = _resolve_ckpt(path) or path
475
+ ck = _try_load(p, map_location="cpu")
476
+ if ck is None:
477
+ raise FileNotFoundError(f"No valid checkpoint at {p}")
478
+ core.load_state_dict(ck["core"])
479
+ ar_h.load_state_dict(ck["ar"])
480
+ nat_h.load_state_dict(ck["nat"])
481
+ sat_h.load_state_dict(ck["sat"])
482
+ opt.load_state_dict(ck["opt"])
483
+ scaler.load_state_dict(ck["scaler"])
484
+ return ck.get("step", 0), ck.get("seen_tok", 0), ck.get("wall_time", time.time())
485
+
486
+ def _safe_load_any(path: pathlib.Path, tgt: nn.Module, key: str | None = None, rename: str | None = None):
487
+ p = _resolve_ckpt(path) or path
488
+ if not p.exists(): return 0
489
+ ck = _try_load(p, map_location="cpu")
490
+ if ck is None: return 0
491
+ sd = ck.get(key, ck) if key else ck
492
+ if isinstance(sd, dict) and "state_dict" in sd:
493
+ sd = sd["state_dict"]
494
+ if rename:
495
+ sd = {k.replace(rename, "proj."): v for k, v in sd.items() if rename in k}
496
+ tgt_sd = tgt.state_dict()
497
+ filt = {k: v for k, v in sd.items() if k in tgt_sd and v.shape == tgt_sd[k].shape}
498
+ if filt:
499
+ tgt.load_state_dict(filt, strict=False)
500
+ return len(filt)
501
+
502
+ def infer_cfg_from_ckpt(path: pathlib.Path):
503
+ p = _resolve_ckpt(path) or path
504
+ if not p.exists(): return None
505
+ sd = _try_load(p, map_location="cpu")
506
+ if sd is None: return None
507
+ if isinstance(sd, dict) and "cfg" in sd and isinstance(sd["cfg"], dict):
508
+ return dict(sd["cfg"])
509
+ core = sd.get("core")
510
+ if core is None: return None
511
+ emb_w = core.get("emb.weight")
512
+ if emb_w is None: return None
513
+ d = emb_w.shape[1]
514
+ layer_ids = []
515
+ for k in core.keys():
516
+ if k.startswith("blocks."):
517
+ parts = k.split(".")
518
+ if len(parts) > 2 and parts[1].isdigit():
519
+ layer_ids.append(int(parts[1]))
520
+ layers = (max(layer_ids) + 1) if layer_ids else None
521
+ U = core.get("blocks.0.mha.U")
522
+ heads = rank = None
523
+ if U is not None:
524
+ dk, r = U.shape
525
+ rank = r
526
+ heads = d // dk if dk > 0 else None
527
+ out = {"d": d}
528
+ if layers is not None: out["layers"] = layers
529
+ if heads is not None: out["heads"] = heads
530
+ if rank is not None: out["rank"] = rank
531
+ return out
532
+
533
+
534
+ # ───────────────────────── Train loop ─────────────────────────
535
+ def _parse_grow_plan(s: str) -> List[int]:
536
+ steps = []
537
+ for part in s.split(","):
538
+ part = part.strip()
539
+ if part:
540
+ v = int(part)
541
+ if v >= 128:
542
+ steps.append(v)
543
+ return sorted(set(steps))
544
+
545
+ def _init_save_timers(resume_wall_time: float | None, interval_sec: int) -> Tuple[float, float]:
546
+ """
547
+ Returns (last_save_wall, last_save_mono).
548
+ We use wall time for metadata, monotonic for interval checks.
549
+ If resuming and the last save was long ago, schedule next save accordingly.
550
+ """
551
+ now_wall = time.time()
552
+ now_mono = time.monotonic()
553
+ if resume_wall_time is None:
554
+ return now_wall, now_mono
555
+ # How long since the previous save in wall-clock?
556
+ elapsed_wall = max(0.0, now_wall - resume_wall_time)
557
+ # Clamp to interval so we don't try to "catch up" multiple times
558
+ elapsed_clamped = min(float(interval_sec), elapsed_wall)
559
+ # Pretend we last saved 'elapsed_clamped' ago on the monotonic clock
560
+ return now_wall, now_mono - elapsed_clamped
561
+
562
+ def train(args):
563
+ set_seed(args.seed)
564
+
565
+ cfg = PRESETS[args.preset].copy()
566
+
567
+ # Previous topology probe (unless --fresh)
568
+ if not args.fresh:
569
+ src_probe = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
570
+ prev_cfg = infer_cfg_from_ckpt(src_probe)
571
+ else:
572
+ prev_cfg = None
573
+
574
+ if prev_cfg:
575
+ cfg["d"] = prev_cfg.get("d", cfg["d"])
576
+ if prev_cfg.get("heads"):
577
+ cfg["heads"] = prev_cfg["heads"]
578
+ if args.rank is None and prev_cfg.get("rank"):
579
+ cfg["rank"] = prev_cfg["rank"]
580
+ if prev_cfg.get("layers"):
581
+ cfg["layers"] = prev_cfg["layers"]
582
+ if args.x2 and prev_cfg.get("layers"):
583
+ cfg["layers"] = max(cfg["layers"], prev_cfg["layers"] * 2)
584
+ if args.rank:
585
+ cfg["rank"] = args.rank
586
+ if args.x2 and not prev_cfg:
587
+ cfg["layers"] *= 2
588
+
589
+ BLOCK = args.block or DEFAULT_BLOCK
590
+
591
+ core = Encoder(cfg).to(DEV)
592
+ ar_h, nat_h = ARHead(cfg["d"]).to(DEV), NATHead(cfg["d"]).to(DEV)
593
+ sat_h = SATHead(cfg["d"], mode="var").to(DEV)
594
+
595
+ # Warm start unless --fresh
596
+ loaded = 0
597
+ if not args.fresh:
598
+ src = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
599
+ src = _resolve_ckpt(src)
600
+ if src:
601
+ loaded += _safe_load_any(src, core, key="core")
602
+ loaded += _safe_load_any(src, ar_h, key="ar")
603
+ loaded += _safe_load_any(src, nat_h, key="nat")
604
+ loaded += _safe_load_any(src, sat_h, key="sat")
605
+ if loaded:
606
+ print(f"Warm-start: loaded {loaded} matching tensors from {src}")
607
+
608
+ opt = torch.optim.AdamW(
609
+ [
610
+ {"params": core.parameters(), "lr": LR_CORE},
611
+ {"params": ar_h.parameters(), "lr": LR_HEAD},
612
+ {"params": nat_h.parameters(), "lr": LR_HEAD},
613
+ {"params": sat_h.parameters(), "lr": LR_HEAD},
614
+ ]
615
+ )
616
+ scaler = GradScaler(enabled=(args.amp and DEV.type == "cuda"))
617
+
618
+ ce_tok = nn.CrossEntropyLoss(label_smoothing=0.1)
619
+ ctc = nn.CTCLoss(blank=BLANK, zero_infinity=True)
620
+ ce_gate = nn.CrossEntropyLoss()
621
+
622
+ # ---------- resume bookkeeping ----------
623
+ start_step, seen_tok = 0, 0
624
+ last_save_wall = None
625
+ if args.resume and not args.fresh:
626
+ start_step, seen_tok, last_save_wall = load_ckpt(
627
+ pathlib.Path(args.resume), core, ar_h, nat_h, sat_h, opt, scaler
628
+ )
629
+ print(f"βœ“ resumed from step {start_step:,}, seen_tokens={seen_tok:,}")
630
+ # Initialize save timers
631
+ last_save_wall, last_save_mono = _init_save_timers(last_save_wall, args.save_every_sec)
632
+
633
+ # Target tokens
634
+ if args.target_tokens:
635
+ target_tokens = args.target_tokens
636
+ else:
637
+ param_count = sum(p.numel() for p in core.parameters())
638
+ target_tokens = int(25 * param_count)
639
+
640
+ new_tokens_needed = target_tokens - seen_tok
641
+ if new_tokens_needed <= 0:
642
+ print("Target already reached – nothing to train.")
643
+ return
644
+ new_steps = new_tokens_needed // BLOCK
645
+ if args.steps:
646
+ new_steps = min(new_steps, args.steps)
647
+ new_tokens_needed = new_steps * BLOCK
648
+
649
+ total_tokens_needed = seen_tok + new_tokens_needed
650
+ print(f"[auto-steps] {new_steps:,} training steps (@ {BLOCK} tokens/step)")
651
+
652
+ # Progressive growth plan
653
+ grow_plan = _parse_grow_plan(args.grow_plan) if args.auto_grow else []
654
+ if args.auto_grow:
655
+ if BLOCK not in grow_plan:
656
+ grow_plan = sorted(set(grow_plan + [BLOCK]))
657
+ print(f"[auto-grow] plan: {grow_plan} every {args.grow_every_steps} steps")
658
+
659
+ stream = token_stream(args.source, target_tokens, seed=(args.seed if args.seed is not None else 42))
660
+ buf: list[int] = []
661
+ pbar = tqdm(total=total_tokens_needed, initial=seen_tok, unit="tok")
662
+ step = start_step
663
+ steps_since_last_grow = 0
664
+
665
+ while seen_tok < total_tokens_needed:
666
+ # ------- assemble one batch -------
667
+ try:
668
+ while len(buf) < BLOCK:
669
+ buf.append(next(stream))
670
+ except StopIteration:
671
+ break
672
+ ids = torch.tensor(buf[:BLOCK], device=DEV).unsqueeze(0) # (B=1, N)
673
+ buf = buf[BLOCK:]
674
+
675
+ tgt_ar = ids.clone() # (1, N)
676
+ ids_nat = torch.repeat_interleave(ids, 2, 1) # (1, 2N) for NAT only
677
+
678
+ try:
679
+ with amp(args.amp):
680
+ # AR path
681
+ h_ar = core(ids, causal_mask(ids.size(1)))
682
+ logits_ar = ar_h(h_ar)[:, :-1]
683
+ loss_ar = ce_tok(logits_ar.reshape(-1, VOCAB), tgt_ar[:, 1:].reshape(-1))
684
+
685
+ # NAT path (uses doubled sequence)
686
+ h_nat = core(ids_nat, None)
687
+ log_nat = nat_h(h_nat).log_softmax(-1).transpose(0, 1) # (T,B,V)
688
+ ilen = tlen = torch.tensor([ids_nat.size(1) // 2], device=DEV)
689
+ loss_nat = ctc(log_nat, tgt_ar, ilen, tlen)
690
+
691
+ # SAT path
692
+ h_sat = core(ids, sat_mask(ids.size(1)))
693
+ logits_sat, gate = sat_h(h_sat[:, -SAT_BLOCK:])
694
+ tgt_sat = ids[:, 1:SAT_BLOCK+1]
695
+ loss_sat = ce_tok(logits_sat.reshape(-1, VOCAB), tgt_sat.reshape(-1))
696
+ if gate is not None:
697
+ loss_sat += EMIT_LAMBDA * ce_gate(gate, torch.ones(ids.size(0), device=DEV, dtype=torch.long))
698
+
699
+ loss = loss_ar + loss_nat + loss_sat
700
+
701
+ # optimisation
702
+ scaler.scale(loss).backward()
703
+ scaler.unscale_(opt)
704
+ nn.utils.clip_grad_norm_(core.parameters(), 1.0)
705
+ scaler.step(opt)
706
+ scaler.update()
707
+ opt.zero_grad(set_to_none=True)
708
+
709
+ except RuntimeError as e:
710
+ msg = str(e).lower()
711
+ if "out of memory" in msg or "cuda error" in msg:
712
+ new_block = max(128, BLOCK // 2)
713
+ if new_block < BLOCK:
714
+ print(f"\n[OOM] reducing block from {BLOCK} -> {new_block}")
715
+ BLOCK = new_block
716
+ if DEV.type == "cuda":
717
+ torch.cuda.empty_cache()
718
+ buf = ids[0].tolist() + buf
719
+ steps_since_last_grow = 0
720
+ continue
721
+ raise
722
+
723
+ # progress
724
+ step += 1
725
+ seen_tok += BLOCK
726
+ pbar.update(BLOCK)
727
+ pbar.set_postfix(loss=f"{loss.item():.3f}", block=BLOCK)
728
+
729
+ # time-based checkpoint cadence only (monotonic)
730
+ if args.save_every_sec > 0:
731
+ now_mono = time.monotonic()
732
+ if now_mono - last_save_mono >= args.save_every_sec:
733
+ ck_name = f"step{step:08d}.pt"
734
+ save_ckpt(
735
+ pathlib.Path(args.save_dir) / ck_name,
736
+ core, ar_h, nat_h, sat_h, opt, scaler,
737
+ meta={
738
+ "cfg": cfg,
739
+ "step": step,
740
+ "seen_tok": seen_tok,
741
+ "wall_time": time.time(),
742
+ "py_state": random.getstate(),
743
+ "torch_state": rng_state(),
744
+ "seed": args.seed,
745
+ },
746
+ )
747
+ last_save_mono = now_mono
748
+ last_save_wall = time.time()
749
+
750
+ # progressive growth
751
+ if args.auto_grow:
752
+ steps_since_last_grow += 1
753
+ if steps_since_last_grow >= args.grow_every_steps:
754
+ steps_since_last_grow = 0
755
+ try:
756
+ idx = grow_plan.index(BLOCK)
757
+ if idx + 1 < len(grow_plan):
758
+ candidate = grow_plan[idx + 1]
759
+ print(f"[auto-grow] attempting BLOCK {BLOCK} -> {candidate}")
760
+ BLOCK = candidate
761
+ if DEV.type == "cuda":
762
+ torch.cuda.empty_cache()
763
+ else:
764
+ print("[auto-grow] at max planned block; no further growth.")
765
+ except ValueError:
766
+ grow_plan = sorted(set(grow_plan + [BLOCK]))
767
+ idx = grow_plan.index(BLOCK)
768
+ if idx + 1 < len(grow_plan):
769
+ candidate = grow_plan[idx + 1]
770
+ print(f"[auto-grow] moving to planned BLOCK {candidate}")
771
+ BLOCK = candidate
772
+ if DEV.type == "cuda":
773
+ torch.cuda.empty_cache()
774
+
775
+ pbar.close()
776
+
777
+ # final save
778
+ save_ckpt(
779
+ pathlib.Path(args.save_dir) / "final.pt",
780
+ core, ar_h, nat_h, sat_h, opt, scaler,
781
+ meta={
782
+ "cfg": cfg,
783
+ "step": step,
784
+ "seen_tok": seen_tok,
785
+ "wall_time": time.time(),
786
+ "py_state": random.getstate(),
787
+ "torch_state": rng_state(),
788
+ "seed": args.seed,
789
+ },
790
+ )
791
+ print("πŸŽ‰ training complete")
792
+
793
+
794
+ # ───────────────────────── Sampling utils ─────────────────────────
795
+ def _apply_no_repeat_ngram(logits: torch.Tensor, ids: torch.Tensor, n: int):
796
+ """
797
+ Block tokens that would complete any previously seen n-gram.
798
+ ids: (1, t)
799
+ logits: (..., V) where ... may be (1,) or (stride,)
800
+ """
801
+ if n <= 0 or ids.size(1) < n - 1:
802
+ return logits
803
+ prefix = ids[0, - (n - 1):].tolist()
804
+ # Build set of next tokens forbidden after this prefix.
805
+ banned = []
806
+ tokens = ids[0].tolist()
807
+ for i in range(len(tokens) - n + 1):
808
+ if tokens[i:i + n - 1] == prefix:
809
+ banned.append(tokens[i + n - 1])
810
+ if banned:
811
+ banned_idx = torch.tensor(banned, device=logits.device, dtype=torch.long)
812
+ logits[..., banned_idx] = float("-inf")
813
+ return logits
814
+
815
+
816
+ def _apply_rep_presence_frequency(
817
+ logits: torch.Tensor, ids: torch.Tensor, last_n: int,
818
+ repetition_penalty: float, presence_penalty: float, frequency_penalty: float
819
+ ):
820
+ """
821
+ logits: (..., V) where ... may be (1,) or (stride,)
822
+ ids: (1, t) history
823
+ """
824
+ if ids.numel() == 0:
825
+ return logits
826
+ if last_n > 0:
827
+ hist = ids[0, -last_n:].to(torch.long)
828
+ else:
829
+ hist = ids[0].to(torch.long)
830
+
831
+ if hist.numel() == 0:
832
+ return logits
833
+
834
+ uniq, counts = torch.unique(hist, return_counts=True)
835
+
836
+ # presence/frequency penalties (OpenAI-like)
837
+ if presence_penalty != 0.0 or frequency_penalty != 0.0:
838
+ # subtract presence for seen tokens; subtract frequency * count
839
+ adjust = presence_penalty + frequency_penalty * counts.to(logits.dtype)
840
+ logits[..., uniq] = logits[..., uniq] - adjust
841
+
842
+ # repetition penalty (CTRL/GPT-NeoX style)
843
+ if repetition_penalty and abs(repetition_penalty - 1.0) > 1e-6:
844
+ sel = logits[..., uniq]
845
+ # if logit > 0: divide by penalty; else multiply by penalty
846
+ sel = torch.where(sel > 0, sel / repetition_penalty, sel * repetition_penalty)
847
+ logits[..., uniq] = sel
848
+
849
+ return logits
850
+
851
+
852
+ def _filter_top_k_top_p_min_p(
853
+ logits: torch.Tensor, top_k: int, top_p: float, min_p: float, temperature: float
854
+ ) -> torch.Tensor:
855
+ """
856
+ Works on 1D or 2D logits (..., V). Applies temperature, then filtering.
857
+ Returns normalized probabilities ready for sampling.
858
+ """
859
+ logits = logits / max(temperature, 1e-8)
860
+
861
+ # shape handling
862
+ if logits.dim() == 1:
863
+ logits = logits.unsqueeze(0)
864
+
865
+ B, V = logits.size(0), logits.size(-1)
866
+
867
+ # Convert to probabilities for p-based filtering
868
+ probs = logits.softmax(-1)
869
+
870
+ # Top-k
871
+ if top_k and top_k < V:
872
+ vals, idx = torch.topk(probs, top_k, dim=-1)
873
+ mask = torch.full_like(probs, 0.0)
874
+ mask.scatter_(1, idx, 1.0)
875
+ probs = probs * mask
876
+
877
+ # Top-p (nucleus)
878
+ if top_p < 1.0:
879
+ sorted_probs, sorted_idx = torch.sort(probs, descending=True, dim=-1)
880
+ cumsum = torch.cumsum(sorted_probs, dim=-1)
881
+ keep = cumsum <= top_p
882
+ # Always keep at least one
883
+ keep[..., 0] = True
884
+ # Build mask
885
+ mask = torch.zeros_like(probs)
886
+ mask.scatter_(1, sorted_idx, keep.to(mask.dtype))
887
+ probs = probs * mask
888
+
889
+ # Min-p
890
+ if min_p > 0.0:
891
+ probs = torch.where(probs >= min_p, probs, torch.zeros_like(probs))
892
+
893
+ # If everything zeroed (can happen at extreme settings), fall back to the argmax token
894
+ sums = probs.sum(-1, keepdim=True)
895
+ empty = (sums == 0)
896
+ if empty.any():
897
+ fallback_idx = logits.argmax(-1, keepdim=True)
898
+ probs = torch.where(empty, torch.zeros_like(probs), probs)
899
+ probs.scatter_(-1, fallback_idx, torch.where(empty, torch.ones_like(sums), torch.zeros_like(sums)))
900
+
901
+ # Renormalize
902
+ probs = probs / probs.sum(-1, keepdim=True)
903
+ return probs
904
+
905
+
906
+ # ───────────────────────── Inference helpers ─────────────────────────
907
+ def load_joint(ckpt: str, preset: str):
908
+ path = _resolve_ckpt(pathlib.Path(ckpt)) or pathlib.Path(ckpt)
909
+ sd = _try_load(path, map_location="cpu")
910
+ if sd is None:
911
+ raise FileNotFoundError(f"No valid checkpoint at {path}")
912
+ cfg = sd["cfg"] if "cfg" in sd and isinstance(sd["cfg"], dict) else (infer_cfg_from_ckpt(path) or PRESETS[preset])
913
+ core = Encoder(cfg).to(DEV)
914
+ ar_h, nat_h = ARHead(cfg["d"]).to(DEV), NATHead(cfg["d"]).to(DEV)
915
+ sat_h = SATHead(cfg["d"]).to(DEV)
916
+ core.load_state_dict(sd["core"])
917
+ ar_h.load_state_dict(sd["ar"])
918
+ nat_h.load_state_dict(sd["nat"])
919
+ sat_h.load_state_dict(sd["sat"])
920
+ return core, ar_h, nat_h, sat_h
921
+
922
+
923
+ @torch.no_grad()
924
+ def ar_decode(core, ar_h, prompt: str, max_new: int, T: float,
925
+ greedy: bool, top_k: int, top_p: float, min_p: float,
926
+ repetition_penalty: float, presence_penalty: float,
927
+ frequency_penalty: float, penalty_last_n: int,
928
+ no_repeat_ngram_size: int):
929
+ ids = torch.tensor([tok.encode(prompt)], device=DEV)
930
+ if ids.size(1) == 0:
931
+ ids = torch.tensor([[EOS] if EOS is not None else [0]], device=DEV)
932
+ h_full, kvs = core(ids, causal_mask(ids.size(1)), use_cache=True)
933
+
934
+ start = time.time()
935
+ for _ in range(max_new):
936
+ logits = ar_h(h_full)[:, -1] # (1, V)
937
+
938
+ # penalties
939
+ logits = _apply_no_repeat_ngram(logits, ids, no_repeat_ngram_size)
940
+ logits = _apply_rep_presence_frequency(
941
+ logits, ids, penalty_last_n, repetition_penalty, presence_penalty, frequency_penalty
942
+ )
943
+
944
+ if greedy:
945
+ nxt = logits.argmax(-1, keepdim=True)
946
+ else:
947
+ probs = _filter_top_k_top_p_min_p(logits.squeeze(0), top_k, top_p, min_p, T)
948
+ nxt = probs.multinomial(1)
949
+
950
+ ids = torch.cat([ids, nxt.unsqueeze(0) if nxt.dim()==1 else nxt], 1)
951
+
952
+ # step with kv cache
953
+ x = ids[:, -1:]
954
+ h_full, kvs = core(x, None, kv_caches=kvs, use_cache=True)
955
+
956
+ print(tok.decode(ids[0].tolist(), skip_special_tokens=True))
957
+ print(f"[{max_new} tok in {time.time() - start:.2f}s]")
958
+
959
+
960
+ @torch.no_grad()
961
+ def sat_decode(core, sat_h, prompt, max_new, T, var,
962
+ greedy: bool, top_k: int, top_p: float, min_p: float,
963
+ repetition_penalty: float, presence_penalty: float,
964
+ frequency_penalty: float, penalty_last_n: int,
965
+ no_repeat_ngram_size: int):
966
+ ids = torch.tensor([tok.encode(prompt)], device=DEV)
967
+ added, t0 = 0, time.time()
968
+ while added < max_new:
969
+ h = core(ids, sat_mask(ids.size(1)))
970
+ logits_all, gate = sat_h(h[:, -SAT_BLOCK:]) # (1, SAT_BLOCK, V)
971
+ stride = 2 if (not var or gate is None) else (gate.softmax(-1).multinomial(1) + 1).item()
972
+ stride = int(stride)
973
+
974
+ # Sequentially sample within the stride so penalties apply cumulatively
975
+ for pos in range(stride):
976
+ row_logits = logits_all[:, pos, :] # (1, V)
977
+
978
+ # penalties
979
+ row_logits = _apply_no_repeat_ngram(row_logits, ids, no_repeat_ngram_size)
980
+ row_logits = _apply_rep_presence_frequency(
981
+ row_logits, ids, penalty_last_n, repetition_penalty, presence_penalty, frequency_penalty
982
+ )
983
+
984
+ if greedy:
985
+ nxt = row_logits.argmax(-1, keepdim=True) # (1,1)
986
+ else:
987
+ probs = _filter_top_k_top_p_min_p(row_logits.squeeze(0), top_k, top_p, min_p, T)
988
+ nxt = probs.multinomial(1) # (1,1)
989
+
990
+ ids = torch.cat([ids, nxt], 1)
991
+ added += 1
992
+ if added >= max_new:
993
+ break
994
+
995
+ print(tok.decode(ids[0].tolist(), skip_special_tokens=True))
996
+ print(f"[{added} tok in {time.time() - t0:.2f}s]")
997
+
998
+
999
+ @torch.no_grad()
1000
+ def nat_decode(core, nat_h, prompt, max_new, passes, streams):
1001
+ ids = torch.tensor([tok.encode(prompt) + [BLANK] * (max_new * 2)], device=DEV)
1002
+ t0 = time.time()
1003
+ for _ in range(passes):
1004
+ h = core(ids, None)
1005
+ logits = nat_h(h)
1006
+ logits[..., BLANK] = -1e9
1007
+ cand = logits.topk(streams, -1).indices.permute(2, 0, 1)
1008
+ best = (cand != BLANK).float().mean(-1).argmax(0)
1009
+ ids = cand[best, torch.arange(ids.size(0), device=DEV)][:, ::2]
1010
+ out = [t for t in ids[0].tolist() if t != BLANK]
1011
+ print(tok.decode(out, skip_special_tokens=True))
1012
+ print(f"[{len(out)} output tokens in {time.time() - t0:.2f}s]")
1013
+
1014
+
1015
+ # ───────────────────────── CLI ─────────────────────────
1016
+ def main():
1017
+ ap = argparse.ArgumentParser()
1018
+ sub = ap.add_subparsers(dest="cmd", required=True)
1019
+
1020
+ tr = sub.add_parser("train")
1021
+ tr.add_argument("--preset", choices=PRESETS, default="small")
1022
+ tr.add_argument("--rank", type=int)
1023
+ tr.add_argument("--block", type=int, default=DEFAULT_BLOCK)
1024
+ tr.add_argument("--source", default="cerebras/SlimPajama-627B")
1025
+ tr.add_argument("--target_tokens", type=int)
1026
+ tr.add_argument("--steps", type=int)
1027
+ tr.add_argument("--amp", action="store_true")
1028
+ tr.add_argument("--save_every_sec", type=int, default=DEFAULT_SAVE_SEC)
1029
+ tr.add_argument("--save_dir", default=str(CKDIR))
1030
+ tr.add_argument("--resume", type=str)
1031
+ tr.add_argument("--x2", action="store_true", help="~2x params by doubling layers")
1032
+ tr.add_argument("--warmstart_from", type=str, default=None, help="Path to previous final.pt for shape-safe warm start")
1033
+ tr.add_argument("--fresh", action="store_true", help="Start from scratch: do not probe or load any checkpoints")
1034
+ tr.add_argument("--seed", type=int, default=None, help="Random seed for reproducibility")
1035
+
1036
+ # Progressive block growth
1037
+ tr.add_argument("--auto_grow", action="store_true", help="Automatically grow block size over time")
1038
+ tr.add_argument("--grow_plan", type=str, default="576,640,768,896,1024", help="Comma list of block sizes to try in order")
1039
+ tr.add_argument("--grow_every_steps", type=int, default=50000, help="Steps between growth attempts")
1040
+
1041
+ inf = sub.add_parser("infer")
1042
+ inf.add_argument("--mode", choices=["ar", "nat", "sat"], required=True)
1043
+ inf.add_argument("--ckpt", required=True)
1044
+ inf.add_argument("--preset", default="small")
1045
+ inf.add_argument("--prompt", required=True)
1046
+ inf.add_argument("--max_new", type=int, default=120)
1047
+ inf.add_argument("--temperature", type=float, default=1.0)
1048
+ inf.add_argument("--seed", type=int, default=None, help="Random seed for reproducibility")
1049
+
1050
+ # New decode controls
1051
+ inf.add_argument("--greedy", action="store_true", help="Greedy decode (overrides sampling)")
1052
+ inf.add_argument("--top_k", type=int, default=0)
1053
+ inf.add_argument("--top_p", type=float, default=1.0)
1054
+ inf.add_argument("--min_p", type=float, default=0.0)
1055
+
1056
+ inf.add_argument("--repetition_penalty", type=float, default=1.0)
1057
+ inf.add_argument("--presence_penalty", type=float, default=0.0)
1058
+ inf.add_argument("--frequency_penalty", type=float, default=0.0)
1059
+ inf.add_argument("--penalty_last_n", type=int, default=64)
1060
+ inf.add_argument("--no_repeat_ngram_size", type=int, default=0)
1061
+
1062
+ inf.add_argument("--var", action="store_true")
1063
+ inf.add_argument("--passes", type=int, default=1)
1064
+ inf.add_argument("--streams", type=int, default=5)
1065
+
1066
+ args = ap.parse_args()
1067
+
1068
+ # Set seed early for both train and infer
1069
+ if getattr(args, "seed", None) is not None:
1070
+ set_seed(args.seed)
1071
+
1072
+ if args.cmd == "train":
1073
+ train(args)
1074
+ else:
1075
+ core, ar_h, nat_h, sat_h = load_joint(args.ckpt, args.preset)
1076
+ if args.mode == "ar":
1077
+ ar_decode(core, ar_h, args.prompt, args.max_new, args.temperature,
1078
+ args.greedy, args.top_k, args.top_p, args.min_p,
1079
+ args.repetition_penalty, args.presence_penalty,
1080
+ args.frequency_penalty, args.penalty_last_n,
1081
+ args.no_repeat_ngram_size)
1082
+ elif args.mode == "sat":
1083
+ sat_decode(core, sat_h, args.prompt, args.max_new, args.temperature, args.var,
1084
+ args.greedy, args.top_k, args.top_p, args.min_p,
1085
+ args.repetition_penalty, args.presence_penalty,
1086
+ args.frequency_penalty, args.penalty_last_n,
1087
+ args.no_repeat_ngram_size)
1088
+ else:
1089
+ nat_decode(core, nat_h, args.prompt, args.max_new, args.passes, args.streams)
1090
+
1091
+
1092
+ if __name__ == "__main__":
1093
+ main()