OpenTransformer commited on
Commit
087ce02
Β·
verified Β·
1 Parent(s): 411e65d

Upload 2 files

Browse files
Files changed (2) hide show
  1. 5apg (2).py +752 -0
  2. step03297683 (1).pt +3 -0
5apg (2).py ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # 5L.py β€” AR-only trainer/decoder (Qwen3 tokenizer)
3
+ # Fresh-start safe, AMP dtype auto, OOM backoff, progressive block growth.
4
+ # Sampling: repetition/presence/frequency penalties, top-k/top-p/min-p, greedy, no-repeat-ngrams.
5
+ # Checkpoints: time-based only (monotonic). Resume respects interval.
6
+ # FP8: --fp8-only [--fp8-fallback] attempts float8_e4m3fn autocast, otherwise bf16/FP16.
7
+ # Chinchilla-style target token calc uses ALL enabled params (core + AR head).
8
+
9
+ from __future__ import annotations
10
+ import argparse, json, math, pathlib, random, time, os, sys
11
+ from contextlib import nullcontext
12
+ from typing import Dict, Any, List, Optional, Tuple
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ import torch.nn.functional as F
17
+ from datasets import load_dataset
18
+ from transformers import AutoTokenizer, logging as hf_log
19
+ from tqdm.auto import tqdm
20
+
21
+ # ───────────────────────── Globals ─────────────────────────
22
+ hf_log.set_verbosity_error()
23
+ DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
24
+ torch.backends.cuda.matmul.allow_tf32 = True
25
+ try:
26
+ torch.set_float32_matmul_precision("high")
27
+ except Exception:
28
+ pass
29
+
30
+ # Tokenizer
31
+ TOKENIZER_ID = os.environ.get("TOKENIZER_ID", "Qwen/Qwen3-235B-A22B-Thinking-2507")
32
+ tok = AutoTokenizer.from_pretrained(TOKENIZER_ID, use_fast=True, trust_remote_code=True)
33
+ if tok.pad_token is None:
34
+ tok.add_special_tokens({"pad_token": "[PAD]"})
35
+ VOCAB = max(tok.get_vocab().values()) + 1
36
+ BLANK = tok.pad_token_id
37
+ EOS = tok.eos_token_id if tok.eos_token_id is not None else tok.sep_token_id
38
+
39
+ PRESETS: Dict[str, Dict[str, int]] = {
40
+ "small": dict(d=512, layers=8, heads=16, rank=64),
41
+ "smallx2": dict(d=512, layers=16, heads=16, rank=64),
42
+ "base": dict(d=768, layers=12, heads=24, rank=96),
43
+ }
44
+
45
+ DEFAULT_BLOCK = 576
46
+ LR_CORE, LR_HEAD = 5e-5, 2e-4
47
+ DEFAULT_SAVE_SEC = 24 * 3600
48
+ CKDIR = pathlib.Path("ckpts_joint")
49
+
50
+ # ───────────────────────── Utilities ─────────────────────────
51
+ def rng_state():
52
+ if DEV.type == "cuda":
53
+ try:
54
+ return torch.cuda.get_rng_state(DEV)
55
+ except TypeError:
56
+ return torch.cuda.get_rng_state()
57
+ return torch.get_rng_state()
58
+
59
+ def _is_probably_ckpt(path: pathlib.Path) -> bool:
60
+ try:
61
+ return path.is_file() and path.suffix == ".pt" and not path.name.endswith(".pt.tmp") and path.stat().st_size > (1<<20)
62
+ except Exception:
63
+ return False
64
+
65
+ def _resolve_ckpt(path: pathlib.Path) -> pathlib.Path | None:
66
+ try:
67
+ if path.is_dir():
68
+ cands = sorted([p for p in path.glob("*.pt") if _is_probably_ckpt(p)],
69
+ key=lambda p: p.stat().st_mtime, reverse=True)
70
+ return cands[0] if cands else None
71
+ if path.suffix == ".tmp":
72
+ solid = path.with_suffix("")
73
+ return solid if _is_probably_ckpt(solid) else _resolve_ckpt(path.parent)
74
+ return path if _is_probably_ckpt(path) else _resolve_ckpt(path.parent)
75
+ except Exception:
76
+ return None
77
+
78
+ def _try_load(path: pathlib.Path, map_location="cpu"):
79
+ try:
80
+ return torch.load(path, map_location="cpu")
81
+ except Exception as e:
82
+ print(f"[ckpt-skip] {path} not usable: {e}")
83
+ return None
84
+
85
+ # ───────────────────────── AMP helper ─────────────────────────
86
+ try:
87
+ from torch.amp import autocast as _ac, GradScaler
88
+ except ImportError:
89
+ from torch.cuda.amp import autocast as _ac, GradScaler
90
+
91
+ def _supports_fp8() -> bool:
92
+ return hasattr(torch, "float8_e4m3fn")
93
+
94
+ def _auto_amp_dtype(prefer_fp8: bool = False):
95
+ if DEV.type != "cuda":
96
+ return torch.float32
97
+ if prefer_fp8 and _supports_fp8():
98
+ return torch.float8_e4m3fn
99
+ try:
100
+ if torch.cuda.is_bf16_supported():
101
+ return torch.bfloat16
102
+ return torch.float16
103
+ except Exception:
104
+ return torch.float16
105
+
106
+ def amp(enabled: bool, prefer_fp8: bool = False):
107
+ if not (enabled and DEV.type == "cuda"):
108
+ return nullcontext()
109
+ return _ac(device_type="cuda", dtype=_auto_amp_dtype(prefer_fp8=prefer_fp8))
110
+
111
+ # ───────────────────────── Data stream ─────────────────────────
112
+ def token_stream(ds_name: str, target: int, seed: int = 42):
113
+ ds = load_dataset(ds_name, split="train", streaming=True)
114
+ ds = ds.shuffle(buffer_size=10_000, seed=seed)
115
+ emitted = 0
116
+ for ex in ds:
117
+ enc = tok.encode(ex["text"])
118
+ if EOS is not None and (len(enc) == 0 or enc[-1] != EOS):
119
+ enc = enc + [EOS]
120
+ for t in enc:
121
+ yield t
122
+ emitted += 1
123
+ if emitted >= target:
124
+ return
125
+
126
+ # ───────────────────────── Relative positional bias (ALiBi) ─────────────────────────
127
+ def _alibi_slopes(n_heads: int):
128
+ import math
129
+ def pow2slopes(n):
130
+ start = 2 ** (-2 ** -(math.log2(n) - 3))
131
+ ratio = start
132
+ return [start * (ratio ** i) for i in range(n)]
133
+ if math.log2(n_heads).is_integer():
134
+ vals = pow2slopes(n_heads)
135
+ else:
136
+ closest = 2 ** math.floor(math.log2(n_heads))
137
+ vals = pow2slopes(closest)
138
+ extra = pow2slopes(2 * closest)
139
+ vals += extra[0::2][: n_heads - closest]
140
+ return torch.tensor(vals, device=DEV).view(1, n_heads, 1, 1)
141
+
142
+ def alibi_bias(n_heads: int, n_tokens: int):
143
+ i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
144
+ j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
145
+ dist = (j - i).clamp_min(0)
146
+ slopes = _alibi_slopes(n_heads)
147
+ return -slopes * dist
148
+
149
+ # ───────────────────────── Model components ─────────────────────────
150
+ class LowRankMHA(nn.Module):
151
+ def __init__(self, d: int, h: int, r: int, use_relpos: bool = True):
152
+ super().__init__()
153
+ assert d % h == 0, "d must be divisible by number of heads"
154
+ self.h, self.dk = h, d // h
155
+ self.use_relpos = use_relpos
156
+ self.q = nn.Linear(d, d, bias=False)
157
+ self.k = nn.Linear(d, d, bias=False)
158
+ self.v = nn.Linear(d, d, bias=False)
159
+ self.U = nn.Parameter(torch.randn(self.dk, r))
160
+ nn.init.orthogonal_(self.U)
161
+ self.proj = nn.Linear(h * r, d, bias=False)
162
+ self.drop = nn.Dropout(0.1)
163
+
164
+ def _proj(self, x):
165
+ B, N, _ = x.shape
166
+ return (x.view(B, N, self.h, self.dk).transpose(1, 2) @ self.U)
167
+
168
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None,
169
+ rel_bias_tokens: Optional[int] = None,
170
+ kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
171
+ use_cache: bool = False):
172
+ q = self._proj(self.q(x))
173
+ k_new = self._proj(self.k(x))
174
+ v_new = self._proj(self.v(x))
175
+
176
+ if kv_cache is None:
177
+ k, v = k_new, v_new
178
+ else:
179
+ k, v = kv_cache
180
+ if use_cache:
181
+ k = torch.cat([k, k_new], dim=2)
182
+ v = torch.cat([v, v_new], dim=2)
183
+
184
+ att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
185
+
186
+ if q.size(2) == k.size(2):
187
+ if self.use_relpos and rel_bias_tokens is not None:
188
+ att = att + alibi_bias(self.h, rel_bias_tokens)
189
+ if mask is not None:
190
+ att = att + mask
191
+
192
+ z = (att.softmax(-1) @ v).transpose(1, 2)
193
+ z = z.reshape(x.size(0), x.size(1), -1)
194
+ out = self.drop(self.proj(z))
195
+ return (out, (k, v)) if use_cache else out
196
+
197
+ class Block(nn.Module):
198
+ def __init__(self, d: int, h: int, r: int):
199
+ super().__init__()
200
+ self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
201
+ self.mha = LowRankMHA(d, h, r, use_relpos=True)
202
+ self.ff = nn.Sequential(nn.Linear(d, 4 * d), nn.ReLU(), nn.Linear(4 * d, d))
203
+
204
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor],
205
+ kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
206
+ use_cache: bool = False):
207
+ n = x.size(1)
208
+ if use_cache:
209
+ y, new_kv = self.mha(self.ln1(x), mask, rel_bias_tokens=n if mask is not None else None, kv_cache=kv, use_cache=True)
210
+ x = x + y
211
+ x = x + self.ff(self.ln2(x))
212
+ return x, new_kv
213
+ else:
214
+ x = x + self.mha(self.ln1(x), mask, rel_bias_tokens=n)
215
+ return x + self.ff(self.ln2(x))
216
+
217
+ class Encoder(nn.Module):
218
+ def __init__(self, cfg: Dict[str, int]):
219
+ super().__init__()
220
+ d, l, h, r = cfg["d"], cfg["layers"], cfg["heads"], cfg["rank"]
221
+ self.emb = nn.Embedding(VOCAB, d)
222
+ self.blocks = nn.ModuleList([Block(d, h, r) for _ in range(l)])
223
+ self.ln = nn.LayerNorm(d)
224
+
225
+ def forward(self, ids: torch.Tensor, mask: Optional[torch.Tensor],
226
+ kv_caches: Optional[List[Optional[Tuple[torch.Tensor, torch.Tensor]]]] = None,
227
+ use_cache: bool = False):
228
+ x = self.emb(ids)
229
+ if not use_cache:
230
+ for blk in self.blocks:
231
+ x = blk(x, mask)
232
+ return self.ln(x)
233
+ new_kvs: List[Tuple[torch.Tensor, torch.Tensor]] = []
234
+ for i, blk in enumerate(self.blocks):
235
+ kv = kv_caches[i] if (kv_caches is not None) else None
236
+ x, kv_out = blk(x, mask, kv, use_cache=True)
237
+ new_kvs.append(kv_out)
238
+ return self.ln(x), new_kvs
239
+
240
+ class ARHead(nn.Module):
241
+ def __init__(self, d):
242
+ super().__init__()
243
+ self.proj = nn.Linear(d, VOCAB)
244
+ def forward(self, h): return self.proj(h)
245
+
246
+ # ───────────────────────── Masks ─────────────────────────
247
+ def causal_mask(n):
248
+ m = torch.full((1, 1, n, n), float("-inf"), device=DEV)
249
+ return torch.triu(m, 1)
250
+
251
+ # ───────────────────────── Checkpoint helpers ─────────────────────────
252
+ def save_ckpt(path: pathlib.Path, core: nn.Module, ar_h: nn.Module,
253
+ opt: torch.optim.Optimizer, scaler: GradScaler, meta: Dict[str, Any]):
254
+ path.parent.mkdir(exist_ok=True, parents=True)
255
+ tmp = path.with_suffix(path.suffix + ".tmp")
256
+ state = {
257
+ "core": core.state_dict(),
258
+ "ar": ar_h.state_dict(),
259
+ "opt": opt.state_dict(),
260
+ "scaler": scaler.state_dict(),
261
+ "cfg": meta.get("cfg"),
262
+ "tokenizer_id": TOKENIZER_ID,
263
+ **{k: v for k, v in meta.items() if k not in {"cfg"}},
264
+ }
265
+ torch.save(state, tmp, _use_new_zipfile_serialization=False)
266
+ tmp.replace(path)
267
+ (path.parent / "latest.json").write_text(json.dumps({"path": str(path), "step": meta["step"]}))
268
+ print(f"\nβœ“ saved checkpoint {path.name}")
269
+
270
+ def load_ckpt(path: pathlib.Path, core: nn.Module, ar_h: nn.Module,
271
+ opt: torch.optim.Optimizer, scaler: GradScaler):
272
+ p = _resolve_ckpt(path) or path
273
+ ck = _try_load(p, map_location="cpu")
274
+ if ck is None:
275
+ raise FileNotFoundError(f"No valid checkpoint at {p}")
276
+ core.load_state_dict(ck["core"])
277
+ if "ar" in ck:
278
+ ar_h.load_state_dict(ck["ar"])
279
+ opt.load_state_dict(ck["opt"])
280
+ scaler.load_state_dict(ck["scaler"])
281
+ return ck.get("step", 0), ck.get("seen_tok", 0), ck.get("wall_time", time.time())
282
+
283
+ def _safe_load_any(path: pathlib.Path, tgt: nn.Module, key: str | None = None, rename: str | None = None):
284
+ p = _resolve_ckpt(path) or path
285
+ if not p or not p.exists(): return 0
286
+ ck = _try_load(p, map_location="cpu")
287
+ if ck is None: return 0
288
+ sd = ck.get(key, ck) if key else ck
289
+ if isinstance(sd, dict) and "state_dict" in sd:
290
+ sd = sd["state_dict"]
291
+ if rename:
292
+ sd = {k.replace(rename, "proj."): v for k, v in sd.items() if rename in k}
293
+ tgt_sd = tgt.state_dict()
294
+ filt = {k: v for k, v in sd.items() if k in tgt_sd and v.shape == tgt_sd[k].shape}
295
+ if filt:
296
+ tgt.load_state_dict(filt, strict=False)
297
+ return len(filt)
298
+
299
+ def infer_cfg_from_ckpt(path: pathlib.Path):
300
+ p = _resolve_ckpt(path) or path
301
+ if not p.exists(): return None
302
+ sd = _try_load(p, map_location="cpu")
303
+ if sd is None: return None
304
+ if isinstance(sd, dict) and "cfg" in sd and isinstance(sd["cfg"], dict):
305
+ return dict(sd["cfg"])
306
+ core = sd.get("core")
307
+ if core is None: return None
308
+ emb_w = core.get("emb.weight")
309
+ if emb_w is None: return None
310
+ d = emb_w.shape[1]
311
+ layer_ids = []
312
+ for k in core.keys():
313
+ if k.startswith("blocks."):
314
+ parts = k.split(".")
315
+ if len(parts) > 2 and parts[1].isdigit():
316
+ layer_ids.append(int(parts[1]))
317
+ layers = (max(layer_ids) + 1) if layer_ids else None
318
+ U = core.get("blocks.0.mha.U")
319
+ heads = rank = None
320
+ if U is not None:
321
+ dk, r = U.shape
322
+ rank = r
323
+ heads = d // dk if dk > 0 else None
324
+ out = {"d": d}
325
+ if layers is not None: out["layers"] = layers
326
+ if heads is not None: out["heads"] = heads
327
+ if rank is not None: out["rank"] = rank
328
+ return out
329
+
330
+ # ───────────────────────── Train loop ─────────────────────────
331
+ def _parse_grow_plan(s: str) -> List[int]:
332
+ steps = []
333
+ for part in s.split(","):
334
+ part = part.strip()
335
+ if part:
336
+ v = int(part)
337
+ if v >= 128:
338
+ steps.append(v)
339
+ return sorted(set(steps))
340
+
341
+ def _init_save_timers(resume_wall_time: float | None, interval_sec: int) -> Tuple[float, float]:
342
+ now_wall = time.time()
343
+ now_mono = time.monotonic()
344
+ if resume_wall_time is None:
345
+ return now_wall, now_mono
346
+ elapsed_wall = max(0.0, now_wall - resume_wall_time)
347
+ elapsed_clamped = min(float(interval_sec), elapsed_wall)
348
+ return now_wall, now_mono - elapsed_clamped
349
+
350
+ def _count_enabled_params(*modules: Optional[nn.Module]) -> int:
351
+ total = 0
352
+ for m in modules:
353
+ if m is not None:
354
+ total += sum(p.numel() for p in m.parameters())
355
+ return total
356
+
357
+ def train(args):
358
+ cfg = PRESETS[args.preset].copy()
359
+
360
+ # Previous topology probe (unless --fresh)
361
+ if not args.fresh:
362
+ src_probe = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
363
+ prev_cfg = infer_cfg_from_ckpt(src_probe)
364
+ else:
365
+ prev_cfg = None
366
+
367
+ if prev_cfg:
368
+ cfg["d"] = prev_cfg.get("d", cfg["d"])
369
+ if prev_cfg.get("heads"): cfg["heads"] = prev_cfg["heads"]
370
+ if args.rank is None and prev_cfg.get("rank"): cfg["rank"] = prev_cfg["rank"]
371
+ if prev_cfg.get("layers"): cfg["layers"] = prev_cfg["layers"]
372
+ if args.x2 and prev_cfg.get("layers"): cfg["layers"] = max(cfg["layers"], prev_cfg["layers"] * 2)
373
+ if args.rank: cfg["rank"] = args.rank
374
+ if args.x2 and not prev_cfg: cfg["layers"] *= 2
375
+
376
+ BLOCK = args.block or DEFAULT_BLOCK
377
+
378
+ core = Encoder(cfg).to(DEV)
379
+ ar_h = ARHead(cfg["d"]).to(DEV)
380
+
381
+ # Warm start unless --fresh
382
+ loaded = 0
383
+ if not args.fresh:
384
+ src = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
385
+ src = _resolve_ckpt(src)
386
+ if src:
387
+ loaded += _safe_load_any(src, core, key="core")
388
+ loaded += _safe_load_any(src, ar_h, key="ar")
389
+ if loaded:
390
+ print(f"Warm-start: loaded {loaded} matching tensors from {src}")
391
+
392
+ # Optimizer
393
+ opt = torch.optim.AdamW([
394
+ {"params": core.parameters(), "lr": LR_CORE},
395
+ {"params": ar_h.parameters(), "lr": LR_HEAD},
396
+ ])
397
+ scaler = GradScaler(enabled=((args.amp or args.fp8_only) and DEV.type == "cuda"))
398
+ ce_tok = nn.CrossEntropyLoss(label_smoothing=0.1)
399
+
400
+ # ---------- resume bookkeeping ----------
401
+ start_step, seen_tok = 0, 0
402
+ last_save_wall = None
403
+ if args.resume and not args.fresh:
404
+ start_step, seen_tok, last_save_wall = load_ckpt(pathlib.Path(args.resume), core, ar_h, opt, scaler)
405
+ print(f"βœ“ resumed from step {start_step:,}, seen_tokens={seen_tok:,}")
406
+ last_save_wall, last_save_mono = _init_save_timers(last_save_wall, args.save_every_sec)
407
+
408
+ # Chinchilla-style target tokens: ALL enabled params (core + ar head)
409
+ if args.target_tokens:
410
+ target_tokens = args.target_tokens
411
+ else:
412
+ enabled_param_count = _count_enabled_params(core, ar_h)
413
+ target_tokens = int(25 * enabled_param_count)
414
+
415
+ new_tokens_needed = target_tokens - seen_tok
416
+ if new_tokens_needed <= 0:
417
+ print("Target already reached – nothing to train.")
418
+ return
419
+ new_steps = new_tokens_needed // BLOCK
420
+ if args.steps:
421
+ new_steps = min(new_steps, args.steps)
422
+ new_tokens_needed = new_steps * BLOCK
423
+
424
+ total_tokens_needed = seen_tok + new_tokens_needed
425
+ print(f"[auto-steps] {new_steps:,} training steps (@ {BLOCK} tokens/step)")
426
+
427
+ # Progressive growth plan
428
+ grow_plan = _parse_grow_plan(args.grow_plan) if args.auto_grow else []
429
+ if args.auto_grow:
430
+ if BLOCK not in grow_plan:
431
+ grow_plan = sorted(set(grow_plan + [BLOCK]))
432
+ print(f"[auto-grow] plan: {grow_plan} every {args.grow_every_steps} steps")
433
+
434
+ # FP8 guard
435
+ if args.fp8_only and not _supports_fp8() and not args.fp8_fallback:
436
+ raise RuntimeError("FP8 not supported by your torch build/hardware. Use --fp8-fallback to continue with bf16.")
437
+
438
+ stream = token_stream(args.source, target_tokens, seed=42)
439
+ buf: list[int] = []
440
+ pbar = tqdm(total=total_tokens_needed, initial=seen_tok, unit="tok")
441
+ step = start_step
442
+ steps_since_last_grow = 0
443
+
444
+ while seen_tok < total_tokens_needed:
445
+ # ------- assemble one batch -------
446
+ try:
447
+ while len(buf) < BLOCK:
448
+ buf.append(next(stream))
449
+ except StopIteration:
450
+ break
451
+ ids = torch.tensor(buf[:BLOCK], device=DEV).unsqueeze(0) # (B=1, N)
452
+ buf = buf[BLOCK:]
453
+
454
+ tgt_ar = ids.clone()
455
+
456
+ try:
457
+ with amp(args.amp or args.fp8_only, prefer_fp8=args.fp8_only and (_supports_fp8() or args.fp8_fallback)):
458
+ h_ar = core(ids, causal_mask(ids.size(1)))
459
+ logits_ar = ar_h(h_ar)[:, :-1]
460
+ loss = ce_tok(logits_ar.reshape(-1, VOCAB), tgt_ar[:, 1:].reshape(-1))
461
+
462
+ scaler.scale(loss).backward()
463
+ scaler.unscale_(opt)
464
+ nn.utils.clip_grad_norm_(core.parameters(), 1.0)
465
+ scaler.step(opt)
466
+ scaler.update()
467
+ opt.zero_grad(set_to_none=True)
468
+
469
+ except RuntimeError as e:
470
+ msg = str(e).lower()
471
+ if "out of memory" in msg or "cuda error" in msg:
472
+ new_block = max(128, BLOCK // 2)
473
+ if new_block < BLOCK:
474
+ print(f"\n[OOM] reducing block from {BLOCK} -> {new_block}")
475
+ BLOCK = new_block
476
+ if DEV.type == "cuda":
477
+ torch.cuda.empty_cache()
478
+ buf = ids[0].tolist() + buf
479
+ steps_since_last_grow = 0
480
+ continue
481
+ raise
482
+
483
+ # progress
484
+ step += 1
485
+ seen_tok += BLOCK
486
+ pbar.update(BLOCK)
487
+ pbar.set_postfix(loss=f"{loss.item():.3f}", block=BLOCK)
488
+
489
+ # time-based checkpoint cadence only (monotonic)
490
+ if args.save_every_sec > 0:
491
+ now_mono = time.monotonic()
492
+ if now_mono - last_save_mono >= args.save_every_sec:
493
+ ck_name = f"step{step:08d}.pt"
494
+ save_ckpt(
495
+ pathlib.Path(args.save_dir) / ck_name,
496
+ core, ar_h, opt, scaler,
497
+ meta={
498
+ "cfg": cfg,
499
+ "step": step,
500
+ "seen_tok": seen_tok,
501
+ "wall_time": time.time(),
502
+ "py_state": random.getstate(),
503
+ "torch_state": rng_state(),
504
+ "fp8_only": args.fp8_only,
505
+ },
506
+ )
507
+ last_save_mono = now_mono
508
+
509
+ # progressive growth
510
+ if args.auto_grow:
511
+ steps_since_last_grow += 1
512
+ if steps_since_last_grow >= args.grow_every_steps:
513
+ steps_since_last_grow = 0
514
+ try:
515
+ idx = grow_plan.index(BLOCK)
516
+ if idx + 1 < len(grow_plan):
517
+ candidate = grow_plan[idx + 1]
518
+ print(f"[auto-grow] attempting BLOCK {BLOCK} -> {candidate}")
519
+ BLOCK = candidate
520
+ if DEV.type == "cuda":
521
+ torch.cuda.empty_cache()
522
+ else:
523
+ print("[auto-grow] at max planned block; no further growth.")
524
+ except ValueError:
525
+ grow_plan = sorted(set(grow_plan + [BLOCK]))
526
+ idx = grow_plan.index(BLOCK)
527
+ if idx + 1 < len(grow_plan):
528
+ candidate = grow_plan[idx + 1]
529
+ print(f"[auto-grow] moving to planned BLOCK {candidate}")
530
+ BLOCK = candidate
531
+ if DEV.type == "cuda":
532
+ torch.cuda.empty_cache()
533
+
534
+ pbar.close()
535
+
536
+ # final save
537
+ save_ckpt(
538
+ pathlib.Path(args.save_dir) / "final.pt",
539
+ core, ar_h, opt, scaler,
540
+ meta={
541
+ "cfg": cfg,
542
+ "step": step,
543
+ "seen_tok": seen_tok,
544
+ "wall_time": time.time(),
545
+ "py_state": random.getstate(),
546
+ "torch_state": rng_state(),
547
+ "fp8_only": args.fp8_only,
548
+ },
549
+ )
550
+ print("πŸŽ‰ training complete")
551
+
552
+ # ───────────────────────── Sampling utils ─────────────────────────
553
+ def _apply_no_repeat_ngram(logits: torch.Tensor, ids: torch.Tensor, n: int):
554
+ if n <= 0 or ids.size(1) < n - 1:
555
+ return logits
556
+ prefix = ids[0, - (n - 1):].tolist()
557
+ banned = []
558
+ tokens = ids[0].tolist()
559
+ for i in range(len(tokens) - n + 1):
560
+ if tokens[i:i + n - 1] == prefix:
561
+ banned.append(tokens[i + n - 1])
562
+ if banned:
563
+ banned_idx = torch.tensor(banned, device=logits.device, dtype=torch.long)
564
+ logits[..., banned_idx] = float("-inf")
565
+ return logits
566
+
567
+ def _apply_rep_presence_frequency(
568
+ logits: torch.Tensor, ids: torch.Tensor, last_n: int,
569
+ repetition_penalty: float, presence_penalty: float, frequency_penalty: float
570
+ ):
571
+ if ids.numel() == 0:
572
+ return logits
573
+ hist = ids[0, -last_n:].to(torch.long) if last_n > 0 else ids[0].to(torch.long)
574
+ if hist.numel() == 0:
575
+ return logits
576
+ uniq, counts = torch.unique(hist, return_counts=True)
577
+ if presence_penalty != 0.0 or frequency_penalty != 0.0:
578
+ adjust = presence_penalty + frequency_penalty * counts.to(logits.dtype)
579
+ logits[..., uniq] = logits[..., uniq] - adjust
580
+ if repetition_penalty and abs(repetition_penalty - 1.0) > 1e-6:
581
+ sel = logits[..., uniq]
582
+ sel = torch.where(sel > 0, sel / repetition_penalty, sel * repetition_penalty)
583
+ logits[..., uniq] = sel
584
+ return logits
585
+
586
+ def _filter_top_k_top_p_min_p(
587
+ logits: torch.Tensor, top_k: int, top_p: float, min_p: float, temperature: float
588
+ ) -> torch.Tensor:
589
+ logits = logits / max(temperature, 1e-8)
590
+ if logits.dim() == 1:
591
+ logits = logits.unsqueeze(0)
592
+ probs = logits.softmax(-1)
593
+
594
+ V = probs.size(-1)
595
+ if top_k and top_k < V:
596
+ vals, idx = torch.topk(probs, top_k, dim=-1)
597
+ mask = torch.full_like(probs, 0.0)
598
+ mask.scatter_(1, idx, 1.0)
599
+ probs = probs * mask
600
+
601
+ if top_p < 1.0:
602
+ sorted_probs, sorted_idx = torch.sort(probs, descending=True, dim=-1)
603
+ cumsum = torch.cumsum(sorted_probs, dim=-1)
604
+ keep = cumsum <= top_p
605
+ keep[..., 0] = True
606
+ mask = torch.zeros_like(probs)
607
+ mask.scatter_(1, sorted_idx, keep.to(mask.dtype))
608
+ probs = probs * mask
609
+
610
+ if min_p > 0.0:
611
+ probs = torch.where(probs >= min_p, probs, torch.zeros_like(probs))
612
+
613
+ sums = probs.sum(-1, keepdim=True)
614
+ empty = (sums == 0)
615
+ if empty.any():
616
+ fallback_idx = logits.argmax(-1, keepdim=True)
617
+ probs = torch.where(empty, torch.zeros_like(probs), probs)
618
+ probs.scatter_(-1, fallback_idx, torch.where(empty, torch.ones_like(sums), torch.zeros_like(sums)))
619
+
620
+ probs = probs / probs.sum(-1, keepdim=True)
621
+ return probs
622
+
623
+ # ───────────────────────── Inference helpers ─────────────────────────
624
+ def load_joint(ckpt: str, preset: str):
625
+ path = _resolve_ckpt(pathlib.Path(ckpt)) or pathlib.Path(ckpt)
626
+ sd = _try_load(path, map_location="cpu")
627
+ if sd is None:
628
+ raise FileNotFoundError(f"No valid checkpoint at {path}")
629
+ cfg = sd["cfg"] if "cfg" in sd and isinstance(sd["cfg"], dict) else (infer_cfg_from_ckpt(path) or PRESETS[preset])
630
+ core = Encoder(cfg).to(DEV)
631
+ ar_h = ARHead(cfg["d"]).to(DEV)
632
+ core.load_state_dict(sd["core"])
633
+ if "ar" in sd:
634
+ ar_h.load_state_dict(sd["ar"])
635
+ return core, ar_h
636
+
637
+ @torch.no_grad()
638
+ def ar_decode(core, ar_h, prompt: str, max_new: int, T: float,
639
+ greedy: bool, top_k: int, top_p: float, min_p: float,
640
+ repetition_penalty: float, presence_penalty: float,
641
+ frequency_penalty: float, penalty_last_n: int,
642
+ no_repeat_ngram_size: int,
643
+ use_fp8: bool, fp8_fallback: bool):
644
+ # Tokenize prompt and remember its length
645
+ prompt_ids = tok.encode(prompt)
646
+ if len(prompt_ids) == 0:
647
+ ids = torch.tensor([[EOS] if EOS is not None else [0]], device=DEV)
648
+ prompt_len = 0
649
+ else:
650
+ ids = torch.tensor([prompt_ids], device=DEV)
651
+ prompt_len = ids.size(1)
652
+
653
+ t0 = time.time()
654
+ with amp(use_fp8 or False, prefer_fp8=use_fp8 and (_supports_fp8() or fp8_fallback)):
655
+ h_full, kvs = core(ids, causal_mask(ids.size(1)), use_cache=True)
656
+ for _ in range(max_new):
657
+ logits = ar_h(h_full)[:, -1]
658
+ logits = _apply_no_repeat_ngram(logits, ids, no_repeat_ngram_size)
659
+ logits = _apply_rep_presence_frequency(
660
+ logits, ids, penalty_last_n, repetition_penalty, presence_penalty, frequency_penalty
661
+ )
662
+ if greedy:
663
+ nxt = logits.argmax(-1, keepdim=True)
664
+ else:
665
+ probs = _filter_top_k_top_p_min_p(logits.squeeze(0), top_k, top_p, min_p, T)
666
+ nxt = probs.multinomial(1)
667
+ ids = torch.cat([ids, nxt.unsqueeze(0) if nxt.dim()==1 else nxt], 1)
668
+ x = ids[:, -1:]
669
+ h_full, kvs = core(x, None, kv_caches=kvs, use_cache=True)
670
+
671
+ # Decode prompt vs generation separately
672
+ full_ids = ids[0].tolist()
673
+ prompt_text = tok.decode(full_ids[:prompt_len], skip_special_tokens=True)
674
+ gen_text = tok.decode(full_ids[prompt_len:], skip_special_tokens=True)
675
+
676
+ # Color the prompt in bright gray (90), leave generation default
677
+ if sys.stdout.isatty():
678
+ sys.stdout.write("\x1b[90m") # bright gray
679
+ sys.stdout.write(prompt_text)
680
+ sys.stdout.write("\x1b[0m") # reset
681
+ sys.stdout.write(gen_text + "\n")
682
+ else:
683
+ sys.stdout.write(prompt_text + gen_text + "\n")
684
+
685
+ print(f"[{len(full_ids) - prompt_len} tok in {time.time() - t0:.2f}s]")
686
+
687
+ # ───────────────────────── CLI ─────────────────────────
688
+ def main():
689
+ ap = argparse.ArgumentParser()
690
+ sub = ap.add_subparsers(dest="cmd", required=True)
691
+
692
+ tr = sub.add_parser("train")
693
+ tr.add_argument("--preset", choices=PRESETS, default="small")
694
+ tr.add_argument("--rank", type=int)
695
+ tr.add_argument("--block", type=int, default=DEFAULT_BLOCK)
696
+ tr.add_argument("--source", default="cerebras/SlimPajama-627B")
697
+ tr.add_argument("--target_tokens", type=int)
698
+ tr.add_argument("--steps", type=int)
699
+ tr.add_argument("--amp", action="store_true")
700
+ tr.add_argument("--save_every_sec", type=int, default=DEFAULT_SAVE_SEC)
701
+ tr.add_argument("--save_dir", default=str(CKDIR))
702
+ tr.add_argument("--resume", type=str)
703
+ tr.add_argument("--x2", action="store_true", help="~2x params by doubling layers")
704
+ tr.add_argument("--warmstart_from", type=str, default=None, help="Path to previous final.pt for shape-safe warm start")
705
+ tr.add_argument("--fresh", action="store_true", help="Start from scratch: do not probe or load any checkpoints")
706
+ # FP8 control
707
+ tr.add_argument("--fp8-only", action="store_true", dest="fp8_only", help="Attempt FP8 autocast (float8_e4m3fn) for compute")
708
+ tr.add_argument("--fp8-fallback", action="store_true", dest="fp8_fallback", help="If FP8 unsupported, fall back to bf16 instead of erroring")
709
+ # Progressive block growth
710
+ tr.add_argument("--auto_grow", action="store_true", help="Automatically grow block size over time")
711
+ tr.add_argument("--grow_plan", type=str, default="576,640,768,896,1024", help="Comma list of block sizes to try in order")
712
+ tr.add_argument("--grow_every_steps", type=int, default=50000, help="Steps between growth attempts")
713
+
714
+ inf = sub.add_parser("infer")
715
+ inf.add_argument("--mode", choices=["ar"], required=True)
716
+ inf.add_argument("--ckpt", required=True)
717
+ inf.add_argument("--preset", default="small")
718
+ inf.add_argument("--prompt", required=True)
719
+ inf.add_argument("--max_new", type=int, default=120)
720
+ inf.add_argument("--temperature", type=float, default=1.0)
721
+
722
+ # Decode controls
723
+ inf.add_argument("--greedy", action="store_true", help="Greedy decode (overrides sampling)")
724
+ inf.add_argument("--top_k", type=int, default=0)
725
+ inf.add_argument("--top_p", type=float, default=1.0)
726
+ inf.add_argument("--min_p", type=float, default=0.0)
727
+ inf.add_argument("--repetition_penalty", type=float, default=1.0)
728
+ inf.add_argument("--presence_penalty", type=float, default=0.0)
729
+ inf.add_argument("--frequency_penalty", type=float, default=0.0)
730
+ inf.add_argument("--penalty_last_n", type=int, default=64)
731
+ inf.add_argument("--no_repeat_ngram_size", type=int, default=0)
732
+
733
+ # Inference FP8
734
+ inf.add_argument("--fp8-only", action="store_true", dest="fp8_only", help="Attempt FP8 autocast during decode")
735
+ inf.add_argument("--fp8-fallback", action="store_true", default=False, dest="fp8_fallback", help=argparse.SUPPRESS)
736
+
737
+ args = ap.parse_args()
738
+ if args.cmd == "train":
739
+ if args.fp8_only:
740
+ print("[init] FP8-only requested. If FP8 kernels are missing, using --fp8-fallback will continue with bf16.")
741
+ train(args)
742
+ else:
743
+ core, ar_h = load_joint(args.ckpt, args.preset)
744
+ ar_decode(core, ar_h, args.prompt, args.max_new, args.temperature,
745
+ args.greedy, args.top_k, args.top_p, args.min_p,
746
+ args.repetition_penalty, args.presence_penalty,
747
+ args.frequency_penalty, args.penalty_last_n,
748
+ args.no_repeat_ngram_size,
749
+ use_fp8=args.fp8_only, fp8_fallback=args.fp8_fallback if hasattr(args, "fp8_fallback") else False)
750
+
751
+ if __name__ == "__main__":
752
+ main()
step03297683 (1).pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd82283113455b9b0527ed93c11ec2783c7490ca1c3937708d9b4a9eb6627e96
3
+ size 2521265803