OpenTransformer commited on
Commit
9537b70
Β·
verified Β·
1 Parent(s): 72921ac

Upload G3f.py

Browse files
Files changed (1) hide show
  1. G3f.py +901 -0
G3f.py ADDED
@@ -0,0 +1,901 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # 5L.py β€” joint AR+SAT trainer/decoder (DeepSeek-V3.2-Exp tokenizer)
4
+ # Robust fresh-start, ignores *.pt.tmp, AMP dtype auto, OOM backoff, progressive block growth.
5
+ # Added: repetition/presence/frequency penalties, top-k/top-p/min-p, greedy, no-repeat-ngrams.
6
+ # Added: Rolling checkpoint pruning (--max_ckpts) and "large" preset.
7
+ # Added: --chilla_max_double for 51.2x training ratio.
8
+ # Removed: NAT pipeline.
9
+
10
+ from __future__ import annotations
11
+ import argparse, json, math, pathlib, random, time, os
12
+ from contextlib import nullcontext
13
+ from typing import Dict, Any, List, Optional, Tuple
14
+ import torch
15
+ import torch.nn as nn
16
+ import torch.nn.functional as F
17
+ from datasets import load_dataset
18
+ from transformers import AutoTokenizer, logging as hf_log
19
+ from tqdm.auto import tqdm
20
+
21
+ # ───────────────────────── Globals ─────────────────────────
22
+ hf_log.set_verbosity_error()
23
+ DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
24
+ torch.backends.cuda.matmul.allow_tf32 = True
25
+ try:
26
+ torch.set_float32_matmul_precision("high")
27
+ except Exception:
28
+ pass
29
+
30
+ # UPDATED: Use the DeepSeek-V3.2-Exp tokenizer
31
+ TOKENIZER_ID = os.environ.get(
32
+ "TOKENIZER_ID",
33
+ "deepseek-ai/DeepSeek-V3.2-Exp"
34
+ )
35
+
36
+ # DeepSeek often requires trust_remote_code=True for their tokenizers
37
+ tok = AutoTokenizer.from_pretrained(TOKENIZER_ID, use_fast=True, trust_remote_code=True)
38
+ if tok.pad_token is None:
39
+ # DeepSeek usually uses eos_token_id (100001 or similar) as pad, but if undefined, add one.
40
+ tok.add_special_tokens({"pad_token": "<|pad|>"})
41
+
42
+ VOCAB, EOS = (
43
+ max(tok.get_vocab().values()) + 1,
44
+ tok.eos_token_id if tok.eos_token_id is not None else tok.sep_token_id
45
+ )
46
+
47
+ PRESETS: Dict[str, Dict[str, int]] = {
48
+ "small": dict(d=512, layers=8, heads=16, rank=64),
49
+ "smallx2": dict(d=512, layers=16, heads=16, rank=64),
50
+ "base": dict(d=768, layers=12, heads=24, rank=96),
51
+ "large": dict(d=1024, layers=24, heads=16, rank=128),
52
+ }
53
+
54
+ DEFAULT_BLOCK = 576
55
+ SAT_BLOCK = 2
56
+ LR_CORE, LR_HEAD = 5e-5, 2e-4
57
+ EMIT_LAMBDA = 0.1
58
+ DEFAULT_SAVE_SEC = 24 * 3600
59
+ CKDIR = pathlib.Path("ckpts_joint")
60
+
61
+ # ───────────────────────── Utilities ─────────────────────────
62
+ def rng_state():
63
+ if DEV.type == "cuda":
64
+ try:
65
+ return torch.cuda.get_rng_state(DEV)
66
+ except TypeError:
67
+ return torch.cuda.get_rng_state()
68
+ return torch.get_rng_state()
69
+
70
+ def _is_probably_ckpt(path: pathlib.Path) -> bool:
71
+ try:
72
+ return path.is_file() and path.suffix == ".pt" and not path.name.endswith(".pt.tmp") and path.stat().st_size > (1<<20)
73
+ except Exception:
74
+ return False
75
+
76
+ def _resolve_ckpt(path: pathlib.Path) -> pathlib.Path | None:
77
+ try:
78
+ if path.is_dir():
79
+ cands = sorted([p for p in path.glob(".pt") if _is_probably_ckpt(p)],
80
+ key=lambda p: p.stat().st_mtime, reverse=True)
81
+ return cands[0] if cands else None
82
+
83
+ if path.suffix == ".tmp":
84
+ solid = path.with_suffix("")
85
+ return solid if _is_probably_ckpt(solid) else _resolve_ckpt(path.parent)
86
+
87
+ return path if _is_probably_ckpt(path) else _resolve_ckpt(path.parent)
88
+ except Exception:
89
+ return None
90
+
91
+ def _try_load(path: pathlib.Path, map_location="cpu"):
92
+ try:
93
+ return torch.load(path, map_location="cpu")
94
+ except Exception as e:
95
+ print(f"[ckpt-skip] {path} not usable: {e}")
96
+ return None
97
+
98
+ # ───────────────────────── AMP helper ─────────────────────────
99
+ try:
100
+ from torch.amp import autocast as _ac, GradScaler
101
+ except ImportError:
102
+ from torch.cuda.amp import autocast as _ac, GradScaler
103
+
104
+ def _auto_amp_dtype():
105
+ if DEV.type == "cuda":
106
+ try:
107
+ if torch.cuda.is_bf16_supported():
108
+ return torch.bfloat16
109
+ return torch.float16
110
+ except Exception:
111
+ return torch.float16
112
+ return torch.float32
113
+
114
+ def amp(enabled: bool):
115
+ return nullcontext() if not (enabled and DEV.type == "cuda") else _ac(device_type="cuda", dtype=_auto_amp_dtype())
116
+
117
+ # ───────────────────────── Data stream ─────────────────────────
118
+ def token_stream(ds_name: str, target: int, seed: int = 42):
119
+ ds = load_dataset(ds_name, split="train", streaming=True)
120
+ ds = ds.shuffle(buffer_size=10_000, seed=seed)
121
+ emitted = 0
122
+ for ex in ds:
123
+ enc = tok.encode(ex["text"])
124
+ if EOS is not None and (len(enc) == 0 or enc[-1] != EOS):
125
+ enc = enc + [EOS]
126
+
127
+ for t in enc:
128
+ yield t
129
+ emitted += 1
130
+ if emitted >= target:
131
+ return
132
+
133
+ # ───────────────────────── Relative positional bias (ALiBi) ─────────────────────────
134
+ def _alibi_slopes(n_heads: int):
135
+ import math
136
+ def pow2slopes(n):
137
+ start = 2 ** (-2 ** -(math.log2(n) - 3))
138
+ ratio = start
139
+ return [start * (ratio ** i) for i in range(n)]
140
+ if math.log2(n_heads).is_integer():
141
+ vals = pow2slopes(n_heads)
142
+ else:
143
+ closest = 2 ** math.floor(math.log2(n_heads))
144
+ vals = pow2slopes(closest)
145
+ extra = pow2slopes(2 * closest)
146
+ vals += extra[0::2][: n_heads - closest]
147
+ return torch.tensor(vals, device=DEV).view(1, n_heads, 1, 1)
148
+
149
+ def alibi_bias(n_heads: int, n_tokens: int):
150
+ i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
151
+ j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
152
+ dist = (j - i).clamp_min(0)
153
+ slopes = _alibi_slopes(n_heads)
154
+ return -slopes * dist
155
+
156
+ # ───────────────────────── Model components ─────────────────────────
157
+ class LowRankMHA(nn.Module):
158
+ def __init__(self, d: int, h: int, r: int, use_relpos: bool = True):
159
+ super().__init__()
160
+ assert d % h == 0, "d must be divisible by number of heads"
161
+ self.h, self.dk = h, d // h
162
+ self.use_relpos = use_relpos
163
+ self.q = nn.Linear(d, d, bias=False)
164
+ self.k = nn.Linear(d, d, bias=False)
165
+ self.v = nn.Linear(d, d, bias=False)
166
+
167
+ self.U = nn.Parameter(torch.randn(self.dk, r))
168
+ nn.init.orthogonal_(self.U)
169
+
170
+ self.proj = nn.Linear(h * r, d, bias=False)
171
+ self.drop = nn.Dropout(0.1)
172
+
173
+ def _proj(self, x):
174
+ B, N, _ = x.shape
175
+ return (x.view(B, N, self.h, self.dk).transpose(1, 2) @ self.U)
176
+
177
+ def forward(
178
+ self,
179
+ x: torch.Tensor,
180
+ mask: Optional[torch.Tensor] = None,
181
+ rel_bias_tokens: Optional[int] = None,
182
+ kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
183
+ use_cache: bool = False,
184
+ ):
185
+ q = self._proj(self.q(x))
186
+ k_new = self._proj(self.k(x))
187
+ v_new = self._proj(self.v(x))
188
+
189
+ if kv_cache is None:
190
+ k, v = k_new, v_new
191
+ else:
192
+ k, v = kv_cache
193
+ if use_cache:
194
+ k = torch.cat([k, k_new], dim=2)
195
+ v = torch.cat([v, v_new], dim=2)
196
+
197
+ att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
198
+
199
+ if q.size(2) == k.size(2):
200
+ if self.use_relpos and rel_bias_tokens is not None:
201
+ att = att + alibi_bias(self.h, rel_bias_tokens)
202
+
203
+ if mask is not None:
204
+ att = att + mask
205
+
206
+ z = (att.softmax(-1) @ v).transpose(1, 2)
207
+ z = z.reshape(x.size(0), x.size(1), -1)
208
+ out = self.drop(self.proj(z))
209
+ return (out, (k, v)) if use_cache else out
210
+
211
+ class Block(nn.Module):
212
+ def __init__(self, d: int, h: int, r: int):
213
+ super().__init__()
214
+ self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
215
+ self.mha = LowRankMHA(d, h, r, use_relpos=True)
216
+ self.ff = nn.Sequential(nn.Linear(d, 4 * d), nn.ReLU(), nn.Linear(4 * d, d))
217
+
218
+ def forward(
219
+ self,
220
+ x: torch.Tensor,
221
+ mask: Optional[torch.Tensor],
222
+ kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
223
+ use_cache: bool = False
224
+ ):
225
+ n = x.size(1)
226
+ if use_cache:
227
+ y, new_kv = self.mha(self.ln1(x), mask, rel_bias_tokens=n if mask is not None else None, kv_cache=kv, use_cache=True)
228
+ x = x + y
229
+ x = x + self.ff(self.ln2(x))
230
+ return x, new_kv
231
+ else:
232
+ x = x + self.mha(self.ln1(x), mask, rel_bias_tokens=n)
233
+ return x + self.ff(self.ln2(x))
234
+
235
+ class Encoder(nn.Module):
236
+ def __init__(self, cfg: Dict[str, int]):
237
+ super().__init__()
238
+ d, l, h, r = cfg["d"], cfg["layers"], cfg["heads"], cfg["rank"]
239
+ self.emb = nn.Embedding(VOCAB, d)
240
+ self.blocks = nn.ModuleList([Block(d, h, r) for _ in range(l)])
241
+ self.ln = nn.LayerNorm(d)
242
+
243
+ def forward(
244
+ self,
245
+ ids: torch.Tensor,
246
+ mask: Optional[torch.Tensor],
247
+ kv_caches: Optional[List[Optional[Tuple[torch.Tensor, torch.Tensor]]]] = None,
248
+ use_cache: bool = False
249
+ ):
250
+ x = self.emb(ids)
251
+ if not use_cache:
252
+ for blk in self.blocks:
253
+ x = blk(x, mask)
254
+ return self.ln(x)
255
+
256
+ new_kvs: List[Tuple[torch.Tensor, torch.Tensor]] = []
257
+ for i, blk in enumerate(self.blocks):
258
+ kv = kv_caches[i] if (kv_caches is not None) else None
259
+ x, kv_out = blk(x, mask, kv, use_cache=True)
260
+ new_kvs.append(kv_out)
261
+ return self.ln(x), new_kvs
262
+
263
+ class ARHead(nn.Module):
264
+ def __init__(self, d):
265
+ super().__init__()
266
+ self.proj = nn.Linear(d, VOCAB)
267
+ def forward(self, h): return self.proj(h)
268
+
269
+ class SATHead(nn.Module):
270
+ def __init__(self, d, mode="var"):
271
+ super().__init__()
272
+ self.proj = nn.Linear(d, VOCAB)
273
+ self.mode = mode
274
+ self.gate = nn.Linear(d, 2) if mode == "var" else None
275
+ def forward(self, h_last):
276
+ logits = self.proj(h_last)
277
+ gate = self.gate(h_last[:, 0]) if self.gate is not None else None
278
+ return logits, gate
279
+
280
+ # ───────────────────────── Masks ─────────────────────────
281
+ def causal_mask(n):
282
+ m = torch.full((1, 1, n, n), float("-inf"), device=DEV)
283
+ return torch.triu(m, 1)
284
+
285
+ def sat_mask(n, block=SAT_BLOCK):
286
+ idx = torch.arange(n, device=DEV)
287
+ grp = idx.unsqueeze(0) // block
288
+ allow = (grp.T == grp) | (grp.T > grp)
289
+ return torch.where(allow, 0.0, float("-inf")).unsqueeze(0).unsqueeze(0)
290
+
291
+ # ───────────────────────── Checkpoint helpers ─────────────────────────
292
+ def _prune_old_ckpts(dir_path: pathlib.Path, max_keep: int):
293
+ """
294
+ Keeps only the 'max_keep' most recent step-based checkpoints.
295
+ Assumes checkpoints are named 'stepXXXXXXXX.pt'.
296
+ """
297
+ if max_keep <= 0:
298
+ return
299
+
300
+ # Find all step checkpoints (ignoring final.pt or others)
301
+ ckpts = sorted([p for p in dir_path.glob("step*.pt") if _is_probably_ckpt(p)])
302
+
303
+ if len(ckpts) > max_keep:
304
+ # We need to remove the oldest ones
305
+ num_to_delete = len(ckpts) - max_keep
306
+ for i in range(num_to_delete):
307
+ victim = ckpts[i] # sorted by name (step001 < step002) implies age
308
+ try:
309
+ victim.unlink()
310
+ # Try to remove associated .tmp if it exists (though it shouldn't)
311
+ tmp_v = victim.with_suffix(".pt.tmp")
312
+ if tmp_v.exists(): tmp_v.unlink()
313
+ print(f" [prune] deleted old checkpoint {victim.name}")
314
+ except Exception as e:
315
+ print(f" [prune] failed to delete {victim.name}: {e}")
316
+
317
+ def save_ckpt(
318
+ path: pathlib.Path,
319
+ core: nn.Module,
320
+ ar_h: nn.Module,
321
+ sat_h: nn.Module,
322
+ opt: torch.optim.Optimizer,
323
+ scaler: GradScaler,
324
+ meta: Dict[str, Any],
325
+ max_ckpts: int | None = None,
326
+ ):
327
+ path.parent.mkdir(exist_ok=True, parents=True)
328
+ tmp = path.with_suffix(path.suffix + ".tmp")
329
+ state = {
330
+ "core": core.state_dict(),
331
+ "ar": ar_h.state_dict(),
332
+ "sat": sat_h.state_dict(),
333
+ "opt": opt.state_dict(),
334
+ "scaler": scaler.state_dict(),
335
+ "cfg": meta.get("cfg"),
336
+ "tokenizer_id": TOKENIZER_ID,
337
+ **{k: v for k, v in meta.items() if k not in {"cfg"}},
338
+ }
339
+ torch.save(state, tmp, _use_new_zipfile_serialization=False)
340
+ tmp.replace(path)
341
+ (path.parent / "latest.json").write_text(json.dumps({"path": str(path), "step": meta["step"]}))
342
+ print(f"\nβœ“ saved checkpoint {path.name}")
343
+
344
+ if max_ckpts is not None:
345
+ _prune_old_ckpts(path.parent, max_ckpts)
346
+
347
+ def load_ckpt(
348
+ path: pathlib.Path,
349
+ core: nn.Module,
350
+ ar_h: nn.Module,
351
+ sat_h: nn.Module,
352
+ opt: torch.optim.Optimizer,
353
+ scaler: GradScaler,
354
+ ):
355
+ p = _resolve_ckpt(path) or path
356
+ ck = _try_load(p, map_location="cpu")
357
+ if ck is None:
358
+ raise FileNotFoundError(f"No valid checkpoint at {p}")
359
+ core.load_state_dict(ck["core"])
360
+ ar_h.load_state_dict(ck["ar"])
361
+ sat_h.load_state_dict(ck["sat"])
362
+ opt.load_state_dict(ck["opt"])
363
+ scaler.load_state_dict(ck["scaler"])
364
+ return ck.get("step", 0), ck.get("seen_tok", 0), ck.get("wall_time", time.time())
365
+
366
+ def _safe_load_any(path: pathlib.Path, tgt: nn.Module, key: str | None = None, rename: str | None = None):
367
+ p = _resolve_ckpt(path) or path
368
+ if not p.exists(): return 0
369
+ ck = _try_load(p, map_location="cpu")
370
+ if ck is None: return 0
371
+ sd = ck.get(key, ck) if key else ck
372
+ if isinstance(sd, dict) and "state_dict" in sd:
373
+ sd = sd["state_dict"]
374
+
375
+ if rename:
376
+ sd = {k.replace(rename, "proj."): v for k, v in sd.items() if rename in k}
377
+
378
+ tgt_sd = tgt.state_dict()
379
+ filt = {k: v for k, v in sd.items() if k in tgt_sd and v.shape == tgt_sd[k].shape}
380
+ if filt:
381
+ tgt.load_state_dict(filt, strict=False)
382
+ return len(filt)
383
+
384
+ def infer_cfg_from_ckpt(path: pathlib.Path):
385
+ p = _resolve_ckpt(path) or path
386
+ if not p.exists(): return None
387
+ sd = _try_load(p, map_location="cpu")
388
+ if sd is None: return None
389
+
390
+ if isinstance(sd, dict) and "cfg" in sd and isinstance(sd["cfg"], dict):
391
+ return dict(sd["cfg"])
392
+
393
+ core = sd.get("core")
394
+ if core is None: return None
395
+
396
+ emb_w = core.get("emb.weight")
397
+ if emb_w is None: return None
398
+ d = emb_w.shape[1]
399
+
400
+ layer_ids = []
401
+ for k in core.keys():
402
+ if k.startswith("blocks."):
403
+ parts = k.split(".")
404
+ if len(parts) > 2 and parts[1].isdigit():
405
+ layer_ids.append(int(parts[1]))
406
+ layers = (max(layer_ids) + 1) if layer_ids else None
407
+
408
+ U = core.get("blocks.0.mha.U")
409
+ heads = rank = None
410
+ if U is not None:
411
+ dk, r = U.shape
412
+ rank = r
413
+ heads = d // dk if dk > 0 else None
414
+
415
+ out = {"d": d}
416
+ if layers is not None: out["layers"] = layers
417
+ if heads is not None: out["heads"] = heads
418
+ if rank is not None: out["rank"] = rank
419
+ return out
420
+
421
+ # ───────────────────────── Train loop ─────────────────────────
422
+ def _parse_grow_plan(s: str) -> List[int]:
423
+ steps = []
424
+ for part in s.split(","):
425
+ part = part.strip()
426
+ if part:
427
+ v = int(part)
428
+ if v >= 128:
429
+ steps.append(v)
430
+ return sorted(set(steps))
431
+
432
+ def _init_save_timers(resume_wall_time: float | None, interval_sec: int) -> Tuple[float, float]:
433
+ now_wall = time.time()
434
+ now_mono = time.monotonic()
435
+ if resume_wall_time is None:
436
+ return now_wall, now_mono
437
+
438
+ elapsed_wall = max(0.0, now_wall - resume_wall_time)
439
+ elapsed_clamped = min(float(interval_sec), elapsed_wall)
440
+ return now_wall, now_mono - elapsed_clamped
441
+
442
+ def train(args):
443
+ cfg = PRESETS[args.preset].copy()
444
+
445
+ if not args.fresh:
446
+ src_probe = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
447
+ prev_cfg = infer_cfg_from_ckpt(src_probe)
448
+ else:
449
+ prev_cfg = None
450
+
451
+ if prev_cfg:
452
+ cfg["d"] = prev_cfg.get("d", cfg["d"])
453
+ if prev_cfg.get("heads"):
454
+ cfg["heads"] = prev_cfg["heads"]
455
+ if args.rank is None and prev_cfg.get("rank"):
456
+ cfg["rank"] = prev_cfg["rank"]
457
+ if prev_cfg.get("layers"):
458
+ cfg["layers"] = prev_cfg["layers"]
459
+ if args.x2 and prev_cfg.get("layers"):
460
+ cfg["layers"] = max(cfg["layers"], prev_cfg["layers"] * 2)
461
+
462
+ if args.rank:
463
+ cfg["rank"] = args.rank
464
+ if args.x2 and not prev_cfg:
465
+ cfg["layers"] *= 2
466
+
467
+ BLOCK = args.block or DEFAULT_BLOCK
468
+
469
+ core = Encoder(cfg).to(DEV)
470
+ ar_h = ARHead(cfg["d"]).to(DEV)
471
+ sat_h = SATHead(cfg["d"], mode="var").to(DEV)
472
+
473
+ loaded = 0
474
+ if not args.fresh:
475
+ src = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
476
+ src = _resolve_ckpt(src)
477
+ if src:
478
+ loaded += _safe_load_any(src, core, key="core")
479
+ loaded += _safe_load_any(src, ar_h, key="ar")
480
+ loaded += _safe_load_any(src, sat_h, key="sat")
481
+ if loaded:
482
+ print(f"Warm-start: loaded {loaded} matching tensors from {src}")
483
+
484
+ opt = torch.optim.AdamW(
485
+ [
486
+ {"params": core.parameters(), "lr": LR_CORE},
487
+ {"params": ar_h.parameters(), "lr": LR_HEAD},
488
+ {"params": sat_h.parameters(), "lr": LR_HEAD},
489
+ ]
490
+ )
491
+ scaler = GradScaler(enabled=(args.amp and DEV.type == "cuda"))
492
+
493
+ ce_tok = nn.CrossEntropyLoss(label_smoothing=0.1)
494
+ ce_gate = nn.CrossEntropyLoss()
495
+
496
+ start_step, seen_tok = 0, 0
497
+ last_save_wall = None
498
+
499
+ if args.resume and not args.fresh:
500
+ start_step, seen_tok, last_save_wall = load_ckpt(
501
+ pathlib.Path(args.resume), core, ar_h, sat_h, opt, scaler
502
+ )
503
+ print(f"βœ“ resumed from step {start_step:,}, seen_tokens={seen_tok:,}")
504
+
505
+ last_save_wall, last_save_mono = _init_save_timers(last_save_wall, args.save_every_sec)
506
+
507
+ if args.target_tokens:
508
+ target_tokens = args.target_tokens
509
+ else:
510
+ param_count = sum(p.numel() for p in core.parameters())
511
+ # Default is 25, "chilla max double" is 51.2 (25.6 * 2)
512
+ ratio = 51.2 if args.chilla_max_double else 25
513
+ target_tokens = int(ratio * param_count)
514
+ print(f"[config] Chinchilla ratio: {ratio}x tokens/param")
515
+
516
+ new_tokens_needed = target_tokens - seen_tok
517
+ if new_tokens_needed <= 0:
518
+ print("Target already reached – nothing to train.")
519
+ return
520
+
521
+ new_steps = new_tokens_needed // BLOCK
522
+ if args.steps:
523
+ new_steps = min(new_steps, args.steps)
524
+ new_tokens_needed = new_steps * BLOCK
525
+
526
+ total_tokens_needed = seen_tok + new_tokens_needed
527
+ print(f"[auto-steps] {new_steps:,} training steps (@ {BLOCK} tokens/step)")
528
+
529
+ grow_plan = _parse_grow_plan(args.grow_plan) if args.auto_grow else []
530
+ if args.auto_grow:
531
+ if BLOCK not in grow_plan:
532
+ grow_plan = sorted(set(grow_plan + [BLOCK]))
533
+ print(f"[auto-grow] plan: {grow_plan} every {args.grow_every_steps} steps")
534
+
535
+ stream = token_stream(args.source, target_tokens, seed=42)
536
+ buf: list[int] = []
537
+ pbar = tqdm(total=total_tokens_needed, initial=seen_tok, unit="tok")
538
+ step = start_step
539
+ steps_since_last_grow = 0
540
+
541
+ while seen_tok < total_tokens_needed:
542
+ try:
543
+ while len(buf) < BLOCK:
544
+ buf.append(next(stream))
545
+ except StopIteration:
546
+ break
547
+
548
+ ids = torch.tensor(buf[:BLOCK], device=DEV).unsqueeze(0)
549
+ buf = buf[BLOCK:]
550
+
551
+ tgt_ar = ids.clone()
552
+
553
+ try:
554
+ with amp(args.amp):
555
+ h_ar = core(ids, causal_mask(ids.size(1)))
556
+ logits_ar = ar_h(h_ar)[:, :-1]
557
+ loss_ar = ce_tok(logits_ar.reshape(-1, VOCAB), tgt_ar[:, 1:].reshape(-1))
558
+
559
+ h_sat = core(ids, sat_mask(ids.size(1)))
560
+ logits_sat, gate = sat_h(h_sat[:, -SAT_BLOCK:])
561
+ tgt_sat = ids[:, 1:SAT_BLOCK+1]
562
+ loss_sat = ce_tok(logits_sat.reshape(-1, VOCAB), tgt_sat.reshape(-1))
563
+ if gate is not None:
564
+ loss_sat += EMIT_LAMBDA * ce_gate(gate, torch.ones(ids.size(0), device=DEV, dtype=torch.long))
565
+
566
+ loss = loss_ar + loss_sat
567
+
568
+ scaler.scale(loss).backward()
569
+ scaler.unscale_(opt)
570
+ nn.utils.clip_grad_norm_(core.parameters(), 1.0)
571
+ scaler.step(opt)
572
+ scaler.update()
573
+ opt.zero_grad(set_to_none=True)
574
+
575
+ except RuntimeError as e:
576
+ msg = str(e).lower()
577
+ if "out of memory" in msg or "cuda error" in msg:
578
+ new_block = max(128, BLOCK // 2)
579
+ if new_block < BLOCK:
580
+ print(f"\n[OOM] reducing block from {BLOCK} -> {new_block}")
581
+ BLOCK = new_block
582
+ if DEV.type == "cuda":
583
+ torch.cuda.empty_cache()
584
+ buf = ids[0].tolist() + buf
585
+ steps_since_last_grow = 0
586
+ continue
587
+ raise
588
+
589
+ step += 1
590
+ seen_tok += BLOCK
591
+ pbar.update(BLOCK)
592
+ pbar.set_postfix(loss=f"{loss.item():.3f}", block=BLOCK)
593
+
594
+ if args.save_every_sec > 0:
595
+ now_mono = time.monotonic()
596
+ if now_mono - last_save_mono >= args.save_every_sec:
597
+ ck_name = f"step{step:08d}.pt"
598
+ save_ckpt(
599
+ pathlib.Path(args.save_dir) / ck_name,
600
+ core, ar_h, sat_h, opt, scaler,
601
+ meta={
602
+ "cfg": cfg,
603
+ "step": step,
604
+ "seen_tok": seen_tok,
605
+ "wall_time": time.time(),
606
+ "py_state": random.getstate(),
607
+ "torch_state": rng_state(),
608
+ },
609
+ max_ckpts=args.max_ckpts
610
+ )
611
+ last_save_mono = now_mono
612
+ last_save_wall = time.time()
613
+
614
+ if args.auto_grow:
615
+ steps_since_last_grow += 1
616
+ if steps_since_last_grow >= args.grow_every_steps:
617
+ steps_since_last_grow = 0
618
+ try:
619
+ idx = grow_plan.index(BLOCK)
620
+ if idx + 1 < len(grow_plan):
621
+ candidate = grow_plan[idx + 1]
622
+ print(f"[auto-grow] attempting BLOCK {BLOCK} -> {candidate}")
623
+ BLOCK = candidate
624
+ if DEV.type == "cuda":
625
+ torch.cuda.empty_cache()
626
+ else:
627
+ print("[auto-grow] at max planned block; no further growth.")
628
+ except ValueError:
629
+ grow_plan = sorted(set(grow_plan + [BLOCK]))
630
+ idx = grow_plan.index(BLOCK)
631
+ if idx + 1 < len(grow_plan):
632
+ candidate = grow_plan[idx + 1]
633
+ print(f"[auto-grow] moving to planned BLOCK {candidate}")
634
+ BLOCK = candidate
635
+ if DEV.type == "cuda":
636
+ torch.cuda.empty_cache()
637
+
638
+ pbar.close()
639
+
640
+ save_ckpt(
641
+ pathlib.Path(args.save_dir) / "final.pt",
642
+ core, ar_h, sat_h, opt, scaler,
643
+ meta={
644
+ "cfg": cfg,
645
+ "step": step,
646
+ "seen_tok": seen_tok,
647
+ "wall_time": time.time(),
648
+ "py_state": random.getstate(),
649
+ "torch_state": rng_state(),
650
+ },
651
+ max_ckpts=None # Do not delete final.pt based on pruning
652
+ )
653
+ print("πŸŽ‰ training complete")
654
+
655
+ # ───────────────────────── Sampling utils ─────────────────────────
656
+ def _apply_no_repeat_ngram(logits: torch.Tensor, ids: torch.Tensor, n: int):
657
+ if n <= 0 or ids.size(1) < n - 1:
658
+ return logits
659
+
660
+ prefix = ids[0, - (n - 1):].tolist()
661
+ banned = []
662
+ tokens = ids[0].tolist()
663
+ for i in range(len(tokens) - n + 1):
664
+ if tokens[i:i + n - 1] == prefix:
665
+ banned.append(tokens[i + n - 1])
666
+
667
+ if banned:
668
+ banned_idx = torch.tensor(banned, device=logits.device, dtype=torch.long)
669
+ logits[..., banned_idx] = float("-inf")
670
+ return logits
671
+
672
+ def _apply_rep_presence_frequency(
673
+ logits: torch.Tensor, ids: torch.Tensor, last_n: int,
674
+ repetition_penalty: float, presence_penalty: float, frequency_penalty: float
675
+ ):
676
+ if ids.numel() == 0:
677
+ return logits
678
+
679
+ if last_n > 0:
680
+ hist = ids[0, -last_n:].to(torch.long)
681
+ else:
682
+ hist = ids[0].to(torch.long)
683
+
684
+ if hist.numel() == 0:
685
+ return logits
686
+
687
+ uniq, counts = torch.unique(hist, return_counts=True)
688
+
689
+ if presence_penalty != 0.0 or frequency_penalty != 0.0:
690
+ adjust = presence_penalty + frequency_penalty * counts.to(logits.dtype)
691
+ logits[..., uniq] = logits[..., uniq] - adjust
692
+
693
+ if repetition_penalty and abs(repetition_penalty - 1.0) > 1e-6:
694
+ sel = logits[..., uniq]
695
+ sel = torch.where(sel > 0, sel / repetition_penalty, sel * repetition_penalty)
696
+ logits[..., uniq] = sel
697
+
698
+ return logits
699
+
700
+ def _filter_top_k_top_p_min_p(
701
+ logits: torch.Tensor, top_k: int, top_p: float, min_p: float, temperature: float
702
+ ) -> torch.Tensor:
703
+ logits = logits / max(temperature, 1e-8)
704
+
705
+ if logits.dim() == 1:
706
+ logits = logits.unsqueeze(0)
707
+
708
+ B, V = logits.size(0), logits.size(-1)
709
+ probs = logits.softmax(-1)
710
+
711
+ if top_k and top_k < V:
712
+ vals, idx = torch.topk(probs, top_k, dim=-1)
713
+ mask = torch.full_like(probs, 0.0)
714
+ mask.scatter_(1, idx, 1.0)
715
+ probs = probs * mask
716
+
717
+ if top_p < 1.0:
718
+ sorted_probs, sorted_idx = torch.sort(probs, descending=True, dim=-1)
719
+ cumsum = torch.cumsum(sorted_probs, dim=-1)
720
+ keep = cumsum <= top_p
721
+ keep[..., 0] = True
722
+ mask = torch.zeros_like(probs)
723
+ mask.scatter_(1, sorted_idx, keep.to(mask.dtype))
724
+ probs = probs * mask
725
+
726
+ if min_p > 0.0:
727
+ probs = torch.where(probs >= min_p, probs, torch.zeros_like(probs))
728
+
729
+ sums = probs.sum(-1, keepdim=True)
730
+ empty = (sums == 0)
731
+ if empty.any():
732
+ fallback_idx = logits.argmax(-1, keepdim=True)
733
+ probs = torch.where(empty, torch.zeros_like(probs), probs)
734
+ probs.scatter_(-1, fallback_idx, torch.where(empty, torch.ones_like(sums), torch.zeros_like(sums)))
735
+
736
+ probs = probs / probs.sum(-1, keepdim=True)
737
+ return probs
738
+
739
+ # ───────────────────────── Inference helpers ─────────────────────────
740
+ def load_joint(ckpt: str, preset: str):
741
+ path = _resolve_ckpt(pathlib.Path(ckpt)) or pathlib.Path(ckpt)
742
+ sd = _try_load(path, map_location="cpu")
743
+ if sd is None:
744
+ raise FileNotFoundError(f"No valid checkpoint at {path}")
745
+
746
+ cfg = sd["cfg"] if "cfg" in sd and isinstance(sd["cfg"], dict) else (infer_cfg_from_ckpt(path) or PRESETS[preset])
747
+
748
+ core = Encoder(cfg).to(DEV)
749
+ ar_h = ARHead(cfg["d"]).to(DEV)
750
+ sat_h = SATHead(cfg["d"]).to(DEV)
751
+
752
+ core.load_state_dict(sd["core"])
753
+ ar_h.load_state_dict(sd["ar"])
754
+ sat_h.load_state_dict(sd["sat"])
755
+
756
+ return core, ar_h, sat_h
757
+
758
+ @torch.no_grad()
759
+ def ar_decode(core, ar_h, prompt: str, max_new: int, T: float,
760
+ greedy: bool, top_k: int, top_p: float, min_p: float,
761
+ repetition_penalty: float, presence_penalty: float,
762
+ frequency_penalty: float, penalty_last_n: int,
763
+ no_repeat_ngram_size: int):
764
+ ids = torch.tensor([tok.encode(prompt)], device=DEV)
765
+ if ids.size(1) == 0:
766
+ ids = torch.tensor([[EOS] if EOS is not None else [0]], device=DEV)
767
+
768
+ h_full, kvs = core(ids, causal_mask(ids.size(1)), use_cache=True)
769
+
770
+ start = time.time()
771
+ for _ in range(max_new):
772
+ logits = ar_h(h_full)[:, -1]
773
+
774
+ logits = _apply_no_repeat_ngram(logits, ids, no_repeat_ngram_size)
775
+ logits = _apply_rep_presence_frequency(
776
+ logits, ids, penalty_last_n, repetition_penalty, presence_penalty, frequency_penalty
777
+ )
778
+
779
+ if greedy:
780
+ nxt = logits.argmax(-1, keepdim=True)
781
+ else:
782
+ probs = _filter_top_k_top_p_min_p(logits.squeeze(0), top_k, top_p, min_p, T)
783
+ nxt = probs.multinomial(1)
784
+
785
+ ids = torch.cat([ids, nxt.unsqueeze(0) if nxt.dim()==1 else nxt], 1)
786
+
787
+ x = ids[:, -1:]
788
+ h_full, kvs = core(x, None, kv_caches=kvs, use_cache=True)
789
+
790
+ print(tok.decode(ids[0].tolist(), skip_special_tokens=True))
791
+ print(f"[{max_new} tok in {time.time() - start:.2f}s]")
792
+
793
+ @torch.no_grad()
794
+ def sat_decode(core, sat_h, prompt, max_new, T, var,
795
+ greedy: bool, top_k: int, top_p: float, min_p: float,
796
+ repetition_penalty: float, presence_penalty: float,
797
+ frequency_penalty: float, penalty_last_n: int,
798
+ no_repeat_ngram_size: int):
799
+ ids = torch.tensor([tok.encode(prompt)], device=DEV)
800
+ added, t0 = 0, time.time()
801
+ while added < max_new:
802
+ h = core(ids, sat_mask(ids.size(1)))
803
+ logits_all, gate = sat_h(h[:, -SAT_BLOCK:])
804
+
805
+ stride = 2 if (not var or gate is None) else (gate.softmax(-1).multinomial(1) + 1).item()
806
+ stride = int(stride)
807
+
808
+ for pos in range(stride):
809
+ row_logits = logits_all[:, pos, :]
810
+
811
+ row_logits = _apply_no_repeat_ngram(row_logits, ids, no_repeat_ngram_size)
812
+ row_logits = _apply_rep_presence_frequency(
813
+ row_logits, ids, penalty_last_n, repetition_penalty, presence_penalty, frequency_penalty
814
+ )
815
+
816
+ if greedy:
817
+ nxt = row_logits.argmax(-1, keepdim=True)
818
+ else:
819
+ probs = _filter_top_k_top_p_min_p(row_logits.squeeze(0), top_k, top_p, min_p, T)
820
+ nxt = probs.multinomial(1)
821
+
822
+ ids = torch.cat([ids, nxt], 1)
823
+ added += 1
824
+ if added >= max_new:
825
+ break
826
+
827
+ print(tok.decode(ids[0].tolist(), skip_special_tokens=True))
828
+ print(f"[{added} tok in {time.time() - t0:.2f}s]")
829
+
830
+ # ───────────────────────── CLI ─────────────────────────
831
+ def main():
832
+ ap = argparse.ArgumentParser()
833
+ sub = ap.add_subparsers(dest="cmd", required=True)
834
+
835
+ tr = sub.add_parser("train")
836
+ tr.add_argument("--preset", choices=PRESETS, default="small")
837
+ tr.add_argument("--rank", type=int)
838
+ tr.add_argument("--block", type=int, default=DEFAULT_BLOCK)
839
+ tr.add_argument("--source", default="cerebras/SlimPajama-627B")
840
+ tr.add_argument("--target_tokens", type=int)
841
+ tr.add_argument("--steps", type=int)
842
+ tr.add_argument("--amp", action="store_true")
843
+ tr.add_argument("--save_every_sec", type=int, default=DEFAULT_SAVE_SEC)
844
+ tr.add_argument("--save_dir", default=str(CKDIR))
845
+ tr.add_argument("--resume", type=str)
846
+ tr.add_argument("--x2", action="store_true", help="~2x params by doubling layers")
847
+ tr.add_argument("--warmstart_from", type=str, default=None, help="Path to previous final.pt for shape-safe warm start")
848
+ tr.add_argument("--fresh", action="store_true", help="Start from scratch: do not probe or load any checkpoints")
849
+
850
+ # New Checkpoint Control
851
+ tr.add_argument("--max_ckpts", type=int, default=None, help="Max number of recent step checkpoints to keep (deletes oldest)")
852
+
853
+ # Chinchilla control
854
+ tr.add_argument("--chilla_max_double", action="store_true", help="Use 51.2x tokens/param (25.6 * 2) instead of default 25x")
855
+
856
+ tr.add_argument("--auto_grow", action="store_true", help="Automatically grow block size over time")
857
+ tr.add_argument("--grow_plan", type=str, default="576,640,768,896,1024", help="Comma list of block sizes to try in order")
858
+ tr.add_argument("--grow_every_steps", type=int, default=50000, help="Steps between growth attempts")
859
+
860
+ inf = sub.add_parser("infer")
861
+ inf.add_argument("--mode", choices=["ar", "sat"], required=True)
862
+ inf.add_argument("--ckpt", required=True)
863
+ inf.add_argument("--preset", default="small")
864
+ inf.add_argument("--prompt", required=True)
865
+ inf.add_argument("--max_new", type=int, default=120)
866
+ inf.add_argument("--temperature", type=float, default=1.0)
867
+
868
+ inf.add_argument("--greedy", action="store_true", help="Greedy decode (overrides sampling)")
869
+ inf.add_argument("--top_k", type=int, default=0)
870
+ inf.add_argument("--top_p", type=float, default=1.0)
871
+ inf.add_argument("--min_p", type=float, default=0.0)
872
+
873
+ inf.add_argument("--repetition_penalty", type=float, default=1.0)
874
+ inf.add_argument("--presence_penalty", type=float, default=0.0)
875
+ inf.add_argument("--frequency_penalty", type=float, default=0.0)
876
+ inf.add_argument("--penalty_last_n", type=int, default=64)
877
+ inf.add_argument("--no_repeat_ngram_size", type=int, default=0)
878
+
879
+ inf.add_argument("--var", action="store_true")
880
+
881
+ args = ap.parse_args()
882
+
883
+ if args.cmd == "train":
884
+ train(args)
885
+ else:
886
+ core, ar_h, sat_h = load_joint(args.ckpt, args.preset)
887
+ if args.mode == "ar":
888
+ ar_decode(core, ar_h, args.prompt, args.max_new, args.temperature,
889
+ args.greedy, args.top_k, args.top_p, args.min_p,
890
+ args.repetition_penalty, args.presence_penalty,
891
+ args.frequency_penalty, args.penalty_last_n,
892
+ args.no_repeat_ngram_size)
893
+ elif args.mode == "sat":
894
+ sat_decode(core, sat_h, args.prompt, args.max_new, args.temperature, args.var,
895
+ args.greedy, args.top_k, args.top_p, args.min_p,
896
+ args.repetition_penalty, args.presence_penalty,
897
+ args.frequency_penalty, args.penalty_last_n,
898
+ args.no_repeat_ngram_size)
899
+
900
+ if __name__ == "__main__":
901
+ main()