OpenTransformer commited on
Commit
83dd932
Β·
verified Β·
1 Parent(s): 145d987

Upload 5p.py

Browse files
Files changed (1) hide show
  1. 5p.py +1003 -0
5p.py ADDED
@@ -0,0 +1,1003 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # 5apg.py β€” AR-only trainer/decoder (DeepSeek tokenizer)
3
+ # Fresh-start safe, AMP dtype auto, OOM backoff, progressive block growth.
4
+ # Sampling: repetition/presence/frequency penalties, top-k/top-p/min-p, greedy, no-repeat-ngrams.
5
+ # Checkpoints: time-based and step-based (monotonic). Resume respects interval.
6
+ # FP8: --fp8-only [--fp8-fallback] attempts float8_e4m3fn autocast, otherwise bf16/FP16.
7
+ # Chinchilla-style target token calc uses ALL enabled params (core + AR head).
8
+ # Robust streaming: retries, dataset fallbacks, dataset:config, and local JSONL support.
9
+ # Chat SFT: --chat uses tokenizer.apply_chat_template on records with {role, content} lists.
10
+
11
+ from __future__ import annotations
12
+ import argparse, json, math, pathlib, random, time, os, sys
13
+ from contextlib import nullcontext
14
+ from typing import Dict, Any, List, Optional, Tuple
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+ import torch.nn.functional as F
19
+ from datasets import load_dataset, DownloadConfig
20
+ from transformers import AutoTokenizer, logging as hf_log
21
+ from tqdm.auto import tqdm
22
+
23
+ # ───────────────────────── Globals ─────────────────────────
24
+ hf_log.set_verbosity_error()
25
+ DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26
+ torch.backends.cuda.matmul.allow_tf32 = True
27
+ try:
28
+ torch.set_float32_matmul_precision("high")
29
+ except Exception:
30
+ pass
31
+
32
+ # ───────────────────────── Determinism ─────────────────────────
33
+ def set_seed(seed: int | None):
34
+ if seed is None:
35
+ return
36
+ random.seed(seed)
37
+ torch.manual_seed(seed)
38
+ if torch.cuda.is_available():
39
+ torch.cuda.manual_seed_all(seed)
40
+ try:
41
+ import numpy as _np
42
+ _np.random.seed(seed)
43
+ except Exception:
44
+ pass
45
+
46
+ # Tokenizer (default DeepSeek V3.2 Exp)
47
+ TOKENIZER_ID = os.environ.get("TOKENIZER_ID", "deepseek-ai/DeepSeek-V3.2-Exp")
48
+ tok = AutoTokenizer.from_pretrained(TOKENIZER_ID, use_fast=True, trust_remote_code=True)
49
+ if tok.pad_token is None:
50
+ tok.add_special_tokens({"pad_token": "[PAD]"})
51
+ VOCAB = max(tok.get_vocab().values()) + 1
52
+ BLANK = tok.pad_token_id
53
+ EOS = tok.eos_token_id if tok.eos_token_id is not None else tok.sep_token_id
54
+
55
+ PRESETS: Dict[str, Dict[str, int]] = {
56
+ "small": dict(d=512, layers=8, heads=16, rank=64),
57
+ "smallx2": dict(d=512, layers=16, heads=16, rank=64),
58
+ "base": dict(d=768, layers=12, heads=24, rank=96),
59
+ # requested: base version with 17 layers
60
+ "base17": dict(d=768, layers=17, heads=24, rank=96),
61
+ }
62
+
63
+ DEFAULT_BLOCK = 576
64
+ LR_CORE, LR_HEAD = 5e-5, 2e-4
65
+ DEFAULT_SAVE_SEC = 24 * 3600
66
+ CKDIR = pathlib.Path("ckpts_joint")
67
+
68
+ # Defaults for automatic after-SFT if user only sets --after_sft_steps
69
+ DEFAULT_AFTER_SFT_SOURCES = "mlabonne/opc-sft-stage2-chat,HuggingFaceH4/ultrachat_200k"
70
+ DEFAULT_AFTER_SFT_BLOCK = 1120
71
+
72
+ # New: default pretrain sources (replaces SlimPajama/C4)
73
+ DEFAULT_PRETRAIN_SOURCES = "HuggingFaceFW/fineweb-edu,togethercomputer/RedPajama-Data-1T,oscar-corpus/OSCAR-2201:en"
74
+
75
+ # ───────────────────────── Utilities ─────────────────────────
76
+ def rng_state():
77
+ if DEV.type == "cuda":
78
+ try:
79
+ return torch.cuda.get_rng_state(DEV)
80
+ except TypeError:
81
+ return torch.cuda.get_rng_state()
82
+ return torch.get_rng_state()
83
+
84
+ def _is_probably_ckpt(path: pathlib.Path) -> bool:
85
+ try:
86
+ return path.is_file() and path.suffix == ".pt" and not path.name.endswith(".pt.tmp") and path.stat().st_size > (1<<20)
87
+ except Exception:
88
+ return False
89
+
90
+ def _resolve_ckpt(path: pathlib.Path) -> pathlib.Path | None:
91
+ try:
92
+ if path.is_dir():
93
+ cands = sorted([p for p in path.glob("*.pt") if _is_probably_ckpt(p)],
94
+ key=lambda p: p.stat().st_mtime, reverse=True)
95
+ return cands[0] if cands else None
96
+ if path.suffix == ".tmp":
97
+ solid = path.with_suffix("")
98
+ return solid if _is_probably_ckpt(solid) else _resolve_ckpt(path.parent)
99
+ return path if _is_probably_ckpt(path) else _resolve_ckpt(path.parent)
100
+ except Exception:
101
+ return None
102
+
103
+ def _try_load(path: pathlib.Path, map_location="cpu"):
104
+ try:
105
+ return torch.load(path, map_location="cpu")
106
+ except Exception as e:
107
+ print(f"[ckpt-skip] {path} not usable: {e}")
108
+ return None
109
+
110
+ # ───────────────────────── AMP helper ─────────────────────────
111
+ try:
112
+ from torch.amp import autocast as _ac, GradScaler
113
+ except ImportError:
114
+ from torch.cuda.amp import autocast as _ac, GradScaler
115
+
116
+ def _supports_fp8() -> bool:
117
+ return hasattr(torch, "float8_e4m3fn")
118
+
119
+ def _auto_amp_dtype(prefer_fp8: bool = False):
120
+ if DEV.type != "cuda":
121
+ return torch.float32
122
+ if prefer_fp8 and _supports_fp8():
123
+ return torch.float8_e4m3fn
124
+ try:
125
+ if torch.cuda.is_bf16_supported():
126
+ return torch.bfloat16
127
+ return torch.float16
128
+ except Exception:
129
+ return torch.float16
130
+
131
+ def amp(enabled: bool, prefer_fp8: bool = False):
132
+ if not (enabled and DEV.type == "cuda"):
133
+ return nullcontext()
134
+ return _ac(device_type="cuda", dtype=_auto_amp_dtype(prefer_fp8=prefer_fp8))
135
+
136
+ # ───────────────────────── Chat helpers ─────────────────────────
137
+ def _coerce_role(r: str) -> str:
138
+ r = (r or "").lower()
139
+ if r in {"user", "human", "customer", "questioner"}:
140
+ return "user"
141
+ if r in {"assistant", "gpt", "bot", "agent", "answerer"}:
142
+ return "assistant"
143
+ if r in {"system", "context", "instruction"}:
144
+ return "system"
145
+ return r or "user"
146
+
147
+ def _render_chat_text_from_ex(ex: dict, messages_key: str, add_generation_prompt: bool) -> Optional[str]:
148
+ msgs = ex.get(messages_key)
149
+ if msgs is None:
150
+ for alt in ("conversations", "dialog", "turns"):
151
+ if isinstance(ex.get(alt), list):
152
+ msgs = ex[alt]
153
+ break
154
+ if isinstance(msgs, list) and msgs and isinstance(msgs[0], dict):
155
+ try:
156
+ norm = []
157
+ for m in msgs:
158
+ role = _coerce_role(m.get("role", "")); content = m.get("content", m.get("text", ""))
159
+ if not isinstance(content, str):
160
+ continue
161
+ norm.append({"role": role, "content": content})
162
+ if not norm:
163
+ return None
164
+ return tok.apply_chat_template(norm, tokenize=False, add_generation_prompt=add_generation_prompt)
165
+ except Exception:
166
+ return None
167
+ for a, b in (("prompt", "response"), ("instruction", "output"), ("question", "answer")):
168
+ if isinstance(ex.get(a), str) and isinstance(ex.get(b), str):
169
+ return f"User: {ex[a]}\nAssistant: {ex[b]}"
170
+ return None
171
+
172
+ # ───────────────────────── Robust streaming data ─────────────────────────
173
+ def _open_stream_one(ds_name: str, seed: int):
174
+ if ":" in ds_name:
175
+ base, config = ds_name.split(":", 1)
176
+ else:
177
+ base, config = ds_name, None
178
+ dc = DownloadConfig(max_retries=5, use_etag=True, resume_download=True)
179
+ if base == "json":
180
+ if not config:
181
+ raise ValueError("Use 'json:/path/to/file.jsonl' or glob like 'json:/data/*.jsonl'")
182
+ data_files = {"train": config}
183
+ ds = load_dataset("json", data_files=data_files, split="train", streaming=True, download_config=dc)
184
+ else:
185
+ if config:
186
+ ds = load_dataset(base, config, split="train", streaming=True, download_config=dc)
187
+ else:
188
+ ds = load_dataset(base, split="train", streaming=True, download_config=dc)
189
+ ds = ds.shuffle(buffer_size=10_000, seed=seed)
190
+ return iter(ds)
191
+
192
+ def token_stream(args, target: int, seed: int = 42, max_retries: int = 999, *,
193
+ source: Optional[str] = None, chat: Optional[bool] = None,
194
+ chat_messages_key: Optional[str] = None, sft_add_generation_prompt: Optional[bool] = None,
195
+ dataset_field_text: Optional[str] = None):
196
+ ds_names = source if source is not None else args.source
197
+ sources = [s.strip() for s in ds_names.split(",") if s.strip()]
198
+ if not sources:
199
+ # Default replaced: use the three stable sources by default
200
+ sources = [s.strip() for s in DEFAULT_PRETRAIN_SOURCES.split(",") if s.strip()]
201
+ use_chat = args.chat if chat is None else chat
202
+ msg_key = args.chat_messages_key if chat_messages_key is None else chat_messages_key
203
+ add_gen = args.sft_add_generation_prompt if sft_add_generation_prompt is None else sft_add_generation_prompt
204
+ text_key = args.dataset_field_text if dataset_field_text is None else dataset_field_text
205
+
206
+ src_idx = 0; emitted = 0; it = None; attempts = 0; backoff_base = 2.0
207
+ while emitted < target:
208
+ try:
209
+ if it is None:
210
+ it = _open_stream_one(sources[src_idx], seed)
211
+ ex = next(it)
212
+ text = None
213
+ if isinstance(ex, dict):
214
+ if use_chat:
215
+ text = _render_chat_text_from_ex(ex, msg_key, add_gen)
216
+ if text is None:
217
+ if text_key and isinstance(ex.get(text_key), str):
218
+ text = ex[text_key]
219
+ elif isinstance(ex.get("text"), str):
220
+ text = ex["text"]
221
+ if not isinstance(text, str):
222
+ attempts = 0; continue
223
+ enc = tok.encode(text)
224
+ if EOS is not None and (len(enc) == 0 or enc[-1] != EOS):
225
+ enc.append(EOS)
226
+ for t in enc:
227
+ yield t; emitted += 1
228
+ if emitted >= target:
229
+ return
230
+ attempts = 0
231
+ except StopIteration:
232
+ it = None; src_idx = (src_idx + 1) % len(sources)
233
+ except Exception as e:
234
+ attempts += 1
235
+ sleep_s = min(60.0, backoff_base ** min(attempts, 6))
236
+ print(f"[stream-retry] source={sources[src_idx]} attempts={attempts} sleep={sleep_s:.1f}s reason={type(e).__name__}", flush=True)
237
+ time.sleep(sleep_s); it = None
238
+ if attempts % 5 == 0 and len(sources) > 1:
239
+ src_idx = (src_idx + 1) % len(sources)
240
+ if attempts > max_retries:
241
+ raise
242
+
243
+ # ───────────────────────── Relative positional bias (ALiBi) ─────────────────────────
244
+ def _alibi_slopes(n_heads: int):
245
+ import math
246
+ def pow2slopes(n):
247
+ start = 2 ** (-2 ** -(math.log2(n) - 3))
248
+ ratio = start
249
+ return [start * (ratio ** i) for i in range(n)]
250
+ if math.log2(n_heads).is_integer():
251
+ vals = pow2slopes(n_heads)
252
+ else:
253
+ closest = 2 ** math.floor(math.log2(n_heads))
254
+ vals = pow2slopes(closest)
255
+ extra = pow2slopes(2 * closest)
256
+ vals += extra[0::2][: n_heads - closest]
257
+ return torch.tensor(vals, device=DEV).view(1, n_heads, 1, 1)
258
+
259
+ def alibi_bias(n_heads: int, n_tokens: int):
260
+ i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
261
+ j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
262
+ dist = (j - i).clamp_min(0)
263
+ slopes = _alibi_slopes(n_heads)
264
+ return -slopes * dist
265
+
266
+ # ───────────────────────── Model components ─────────────────────────
267
+ class LowRankMHA(nn.Module):
268
+ def __init__(self, d: int, h: int, r: int, use_relpos: bool = True):
269
+ super().__init__()
270
+ assert d % h == 0, "d must be divisible by number of heads"
271
+ self.h, self.dk = h, d // h
272
+ self.use_relpos = use_relpos
273
+ self.q = nn.Linear(d, d, bias=False)
274
+ self.k = nn.Linear(d, d, bias=False)
275
+ self.v = nn.Linear(d, d, bias=False)
276
+ self.U = nn.Parameter(torch.randn(self.dk, r))
277
+ nn.init.orthogonal_(self.U)
278
+ self.proj = nn.Linear(h * r, d, bias=False)
279
+ self.drop = nn.Dropout(0.1)
280
+
281
+ def _proj(self, x):
282
+ B, N, _ = x.shape
283
+ return (x.view(B, N, self.h, self.dk).transpose(1, 2) @ self.U)
284
+
285
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None,
286
+ rel_bias_tokens: Optional[int] = None,
287
+ kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
288
+ use_cache: bool = False):
289
+ q = self._proj(self.q(x))
290
+ k_new = self._proj(self.k(x))
291
+ v_new = self._proj(self.v(x))
292
+
293
+ if kv_cache is None:
294
+ k, v = k_new, v_new
295
+ else:
296
+ k, v = kv_cache
297
+ if use_cache:
298
+ k = torch.cat([k, k_new], dim=2)
299
+ v = torch.cat([v, v_new], dim=2)
300
+
301
+ att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
302
+
303
+ if q.size(2) == k.size(2):
304
+ if self.use_relpos and rel_bias_tokens is not None:
305
+ att = att + alibi_bias(self.h, rel_bias_tokens)
306
+ if mask is not None:
307
+ att = att + mask
308
+
309
+ z = (att.softmax(-1) @ v).transpose(1, 2)
310
+ z = z.reshape(x.size(0), x.size(1), -1)
311
+ out = self.drop(self.proj(z))
312
+ return (out, (k, v)) if use_cache else out
313
+
314
+ class Block(nn.Module):
315
+ def __init__(self, d: int, h: int, r: int):
316
+ super().__init__()
317
+ self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
318
+ self.mha = LowRankMHA(d, h, r, use_relpos=True)
319
+ self.ff = nn.Sequential(nn.Linear(d, 4 * d), nn.ReLU(), nn.Linear(4 * d, d))
320
+
321
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor],
322
+ kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
323
+ use_cache: bool = False):
324
+ n = x.size(1)
325
+ if use_cache:
326
+ y, new_kv = self.mha(self.ln1(x), mask, rel_bias_tokens=n if mask is not None else None, kv_cache=kv, use_cache=True)
327
+ x = x + y
328
+ x = x + self.ff(self.ln2(x))
329
+ return x, new_kv
330
+ else:
331
+ x = x + self.mha(self.ln1(x), mask, rel_bias_tokens=n)
332
+ return x + self.ff(self.ln2(x))
333
+
334
+ class Encoder(nn.Module):
335
+ def __init__(self, cfg: Dict[str, int]):
336
+ super().__init__()
337
+ d, l, h, r = cfg["d"], cfg["layers"], cfg["heads"], cfg["rank"]
338
+ self.emb = nn.Embedding(VOCAB, d)
339
+ self.blocks = nn.ModuleList([Block(d, h, r) for _ in range(l)])
340
+ self.ln = nn.LayerNorm(d)
341
+
342
+ def forward(self, ids: torch.Tensor, mask: Optional[torch.Tensor],
343
+ kv_caches: Optional[List[Optional[Tuple[torch.Tensor, torch.Tensor]]]] = None,
344
+ use_cache: bool = False):
345
+ x = self.emb(ids)
346
+ if not use_cache:
347
+ for blk in self.blocks:
348
+ x = blk(x, mask)
349
+ return self.ln(x)
350
+ new_kvs: List[Tuple[torch.Tensor, torch.Tensor]] = []
351
+ for i, blk in enumerate(self.blocks):
352
+ kv = kv_caches[i] if (kv_caches is not None) else None
353
+ x, kv_out = blk(x, mask, kv, use_cache=True)
354
+ new_kvs.append(kv_out)
355
+ return self.ln(x), new_kvs
356
+
357
+ class ARHead(nn.Module):
358
+ def __init__(self, d):
359
+ super().__init__()
360
+ self.proj = nn.Linear(d, VOCAB)
361
+ def forward(self, h): return self.proj(h)
362
+
363
+ # ───────────────────────── Masks ─────────────────────────
364
+ def causal_mask(n):
365
+ m = torch.full((1, 1, n, n), float("-inf"), device=DEV)
366
+ return torch.triu(m, 1)
367
+
368
+ # ───────────────────────── Checkpoint helpers ─────────────────────────
369
+ def save_ckpt(path: pathlib.Path, core: nn.Module, ar_h: nn.Module,
370
+ opt: torch.optim.Optimizer, scaler: GradScaler, meta: Dict[str, Any]):
371
+ path.parent.mkdir(exist_ok=True, parents=True)
372
+ tmp = path.with_suffix(path.suffix + ".tmp")
373
+ state = {
374
+ "core": core.state_dict(),
375
+ "ar": ar_h.state_dict(),
376
+ "opt": opt.state_dict(),
377
+ "scaler": scaler.state_dict(),
378
+ "cfg": meta.get("cfg"),
379
+ "tokenizer_id": TOKENIZER_ID,
380
+ **{k: v for k, v in meta.items() if k not in {"cfg"}},
381
+ }
382
+ torch.save(state, tmp, _use_new_zipfile_serialization=False)
383
+ tmp.replace(path)
384
+ (path.parent / "latest.json").write_text(json.dumps({"path": str(path), "step": meta["step"]}))
385
+ print(f"\nβœ“ saved checkpoint {path.name}")
386
+
387
+ def load_ckpt(path: pathlib.Path, core: nn.Module, ar_h: nn.Module,
388
+ opt: torch.optim.Optimizer, scaler: GradScaler):
389
+ p = _resolve_ckpt(path) or path
390
+ ck = _try_load(p, map_location="cpu")
391
+ if ck is None:
392
+ raise FileNotFoundError(f"No valid checkpoint at {p}")
393
+ core.load_state_dict(ck["core"])
394
+ if "ar" in ck:
395
+ ar_h.load_state_dict(ck["ar"])
396
+ opt.load_state_dict(ck["opt"])
397
+ scaler.load_state_dict(ck["scaler"])
398
+ return ck.get("step", 0), ck.get("seen_tok", 0), ck.get("wall_time", time.time())
399
+
400
+ def _safe_load_any(path: pathlib.Path, tgt: nn.Module, key: str | None = None, rename: str | None = None):
401
+ p = _resolve_ckpt(path) or path
402
+ if not p or not p.exists(): return 0
403
+ ck = _try_load(p, map_location="cpu")
404
+ if ck is None: return 0
405
+ sd = ck.get(key, ck) if key else ck
406
+ if isinstance(sd, dict) and "state_dict" in sd:
407
+ sd = sd["state_dict"]
408
+ if rename:
409
+ sd = {k.replace(rename, "proj."): v for k, v in sd.items() if rename in k}
410
+ tgt_sd = tgt.state_dict()
411
+ filt = {k: v for k, v in sd.items() if k in tgt_sd and v.shape == tgt_sd[k].shape}
412
+ if filt:
413
+ tgt.load_state_dict(filt, strict=False)
414
+ return len(filt)
415
+
416
+ def infer_cfg_from_ckpt(path: pathlib.Path):
417
+ p = _resolve_ckpt(path) or path
418
+ if not p.exists(): return None
419
+ sd = _try_load(p, map_location="cpu")
420
+ if sd is None: return None
421
+ if isinstance(sd, dict) and "cfg" in sd and isinstance(sd["cfg"], dict):
422
+ return dict(sd["cfg"])
423
+ core = sd.get("core")
424
+ if core is None: return None
425
+ emb_w = core.get("emb.weight")
426
+ if emb_w is None: return None
427
+ d = emb_w.shape[1]
428
+ layer_ids = []
429
+ for k in core.keys():
430
+ if k.startswith("blocks."):
431
+ parts = k.split(".")
432
+ if len(parts) > 2 and parts[1].isdigit():
433
+ layer_ids.append(int(parts[1]))
434
+ layers = (max(layer_ids) + 1) if layer_ids else None
435
+ U = core.get("blocks.0.mha.U")
436
+ heads = rank = None
437
+ if U is not None:
438
+ dk, r = U.shape
439
+ rank = r
440
+ heads = d // dk if dk > 0 else None
441
+ out = {"d": d}
442
+ if layers is not None: out["layers"] = layers
443
+ if heads is not None: out["heads"] = heads
444
+ if rank is not None: out["rank"] = rank
445
+ return out
446
+
447
+ # ───────────────────────── Train loop helpers ─────────────────────────
448
+ def _parse_grow_plan(s: str) -> List[int]:
449
+ steps = []
450
+ for part in s.split(","):
451
+ part = part.strip()
452
+ if part:
453
+ v = int(part)
454
+ if v >= 128:
455
+ steps.append(v)
456
+ return sorted(set(steps))
457
+
458
+ def _init_save_timers(resume_wall_time: float | None, interval_sec: int) -> Tuple[float, float]:
459
+ now_wall = time.time()
460
+ now_mono = time.monotonic()
461
+ if resume_wall_time is None:
462
+ return now_wall, now_mono
463
+ elapsed_wall = max(0.0, now_wall - resume_wall_time)
464
+ elapsed_clamped = min(float(interval_sec), elapsed_wall)
465
+ return now_wall, now_mono - elapsed_clamped
466
+
467
+ def _count_enabled_params(*modules: Optional[nn.Module]) -> int:
468
+ total = 0
469
+ for m in modules:
470
+ if m is not None:
471
+ total += sum(p.numel() for p in m.parameters())
472
+ return total
473
+
474
+ def _make_optimizer(core, ar_h, lr_core: float, lr_head: float):
475
+ return torch.optim.AdamW([
476
+ {"params": [p for p in core.parameters() if p.requires_grad], "lr": lr_core},
477
+ {"params": ar_h.parameters(), "lr": lr_head},
478
+ ])
479
+
480
+ def _phase_freeze(core: nn.Module, *, freeze_core: bool, unfreeze_ln: bool, train_emb: bool):
481
+ for p in core.parameters():
482
+ p.requires_grad = not freeze_core
483
+ if freeze_core:
484
+ if unfreeze_ln:
485
+ for blk in core.blocks:
486
+ for p in blk.ln1.parameters(): p.requires_grad = True
487
+ for p in blk.ln2.parameters(): p.requires_grad = True
488
+ for p in core.ln.parameters(): p.requires_grad = True
489
+ if train_emb:
490
+ for p in core.emb.parameters(): p.requires_grad = True
491
+
492
+ def _train_phase(
493
+ args,
494
+ *,
495
+ core: nn.Module,
496
+ ar_h: nn.Module,
497
+ opt: torch.optim.Optimizer,
498
+ scaler: GradScaler,
499
+ start_step: int,
500
+ seen_tok: int,
501
+ resume_wall_time: Optional[float],
502
+ ce_tok,
503
+ cfg: Dict[str,int],
504
+ source: str,
505
+ steps: Optional[int],
506
+ block: int,
507
+ save_dir: str,
508
+ save_every_sec: int,
509
+ save_every_steps: int,
510
+ auto_grow: bool,
511
+ grow_plan_s: str,
512
+ grow_every_steps: int,
513
+ chat: bool,
514
+ chat_messages_key: str,
515
+ dataset_field_text: str,
516
+ sft_add_generation_prompt: bool,
517
+ amp_flag: bool,
518
+ fp8_only_flag: bool,
519
+ fp8_fallback_flag: bool,
520
+ target_tokens_override: Optional[int] = None,
521
+ phase_name: str = "phase"
522
+ ):
523
+ BLOCK = block
524
+ pbar = None
525
+
526
+ if target_tokens_override is not None:
527
+ target_tokens = target_tokens_override
528
+ else:
529
+ enabled_param_count = _count_enabled_params(core, ar_h)
530
+ target_tokens = int(25 * enabled_param_count)
531
+
532
+ new_tokens_needed = target_tokens - seen_tok
533
+ if steps:
534
+ new_tokens_needed = steps * BLOCK
535
+
536
+ total_tokens_needed = seen_tok + max(0, new_tokens_needed)
537
+ if new_tokens_needed <= 0:
538
+ print(f"[{phase_name}] target already reached – skipping.")
539
+ return start_step, seen_tok, resume_wall_time
540
+
541
+ print(f"[{phase_name}] [auto-steps] {new_tokens_needed // BLOCK:,} steps (@ {BLOCK} tokens/step)")
542
+ grow_plan = _parse_grow_plan(grow_plan_s) if auto_grow else []
543
+
544
+ stream = token_stream(args, target_tokens, seed=42,
545
+ source=source, chat=chat, chat_messages_key=chat_messages_key,
546
+ sft_add_generation_prompt=sft_add_generation_prompt, dataset_field_text=dataset_field_text)
547
+ buf: list[int] = []
548
+ if pbar is None:
549
+ pbar = tqdm(total=total_tokens_needed, initial=seen_tok, unit="tok")
550
+
551
+ last_save_wall, last_save_mono = _init_save_timers(resume_wall_time, save_every_sec)
552
+ step = start_step; steps_since_last_grow = 0
553
+
554
+ while seen_tok < total_tokens_needed:
555
+ try:
556
+ while len(buf) < BLOCK:
557
+ buf.append(next(stream))
558
+ except StopIteration:
559
+ break
560
+ ids = torch.tensor(buf[:BLOCK], device=DEV).unsqueeze(0)
561
+ buf = buf[BLOCK:]
562
+ tgt_ar = ids.clone()
563
+
564
+ try:
565
+ with amp(amp_flag or fp8_only_flag, prefer_fp8=fp8_only_flag and (_supports_fp8() or fp8_fallback_flag)):
566
+ h_ar = core(ids, causal_mask(ids.size(1)))
567
+ logits_ar = ar_h(h_ar)[:, :-1]
568
+ loss = ce_tok(logits_ar.reshape(-1, VOCAB), tgt_ar[:, 1:].reshape(-1))
569
+ scaler.scale(loss).backward()
570
+ scaler.unscale_(opt)
571
+ nn.utils.clip_grad_norm_(core.parameters(), 1.0)
572
+ scaler.step(opt); scaler.update()
573
+ opt.zero_grad(set_to_none=True)
574
+ except RuntimeError as e:
575
+ msg = str(e).lower()
576
+ if "out of memory" in msg or "cuda error" in msg:
577
+ new_block = max(128, BLOCK // 2)
578
+ if new_block < BLOCK:
579
+ print(f"\n[{phase_name}][OOM] reducing block from {BLOCK} -> {new_block}")
580
+ BLOCK = new_block
581
+ if DEV.type == "cuda":
582
+ torch.cuda.empty_cache()
583
+ buf = ids[0].tolist() + buf
584
+ steps_since_last_grow = 0
585
+ continue
586
+ raise
587
+
588
+ step += 1; seen_tok += BLOCK
589
+ pbar.update(BLOCK)
590
+ pbar.set_postfix_str(f"{phase_name} loss={loss.item():.3f} block={BLOCK}")
591
+
592
+ if save_every_sec > 0:
593
+ now_mono = time.monotonic()
594
+ if now_mono - last_save_mono >= save_every_sec:
595
+ ck_name = f"{phase_name}_step{step:08d}.pt"
596
+ save_ckpt(pathlib.Path(save_dir) / ck_name, core, ar_h, opt, scaler,
597
+ meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(),
598
+ "py_state": random.getstate(), "torch_state": rng_state(), "fp8_only": fp8_only_flag})
599
+ last_save_mono = now_mono
600
+
601
+ if save_every_steps > 0 and step > 0 and (step % save_every_steps == 0):
602
+ ck_name = f"{phase_name}_step{step:08d}.pt"
603
+ save_ckpt(pathlib.Path(save_dir) / ck_name, core, ar_h, opt, scaler,
604
+ meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(),
605
+ "py_state": random.getstate(), "torch_state": rng_state(), "fp8_only": fp8_only_flag})
606
+
607
+ if auto_grow:
608
+ steps_since_last_grow += 1
609
+ if steps_since_last_grow >= grow_every_steps:
610
+ steps_since_last_grow = 0
611
+ try:
612
+ idx = grow_plan.index(BLOCK)
613
+ if idx + 1 < len(grow_plan):
614
+ candidate = grow_plan[idx + 1]
615
+ print(f"[{phase_name}][auto-grow] {BLOCK} -> {candidate}")
616
+ BLOCK = candidate
617
+ if DEV.type == "cuda":
618
+ torch.cuda.empty_cache()
619
+ else:
620
+ print(f"[{phase_name}][auto-grow] at max planned block.")
621
+ except ValueError:
622
+ grow_plan = sorted(set(grow_plan + [BLOCK]))
623
+ idx = grow_plan.index(BLOCK)
624
+ if idx + 1 < len(grow_plan):
625
+ candidate = grow_plan[idx + 1]
626
+ print(f"[{phase_name}][auto-grow] moving to planned BLOCK {candidate}")
627
+ BLOCK = candidate
628
+ if DEV.type == "cuda":
629
+ torch.cuda.empty_cache()
630
+
631
+ if pbar is not None:
632
+ pbar.close()
633
+
634
+ save_ckpt(pathlib.Path(save_dir) / f"{phase_name}_final.pt", core, ar_h, opt, scaler,
635
+ meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(),
636
+ "py_state": random.getstate(), "torch_state": rng_state(), "fp8_only": args.fp8_only})
637
+ print(f"πŸŽ‰ {phase_name} complete")
638
+ return step, seen_tok, time.time()
639
+
640
+ # ───────────────────────── Top-level Train orchestrator ─────────────────────────
641
+ def train(args):
642
+ cfg = PRESETS[args.preset].copy()
643
+
644
+ # probe unless --fresh
645
+ if not args.fresh:
646
+ src_probe = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
647
+ prev_cfg = infer_cfg_from_ckpt(src_probe)
648
+ else:
649
+ prev_cfg = None
650
+
651
+ if prev_cfg and not args.fresh:
652
+ cfg["d"] = prev_cfg.get("d", cfg["d"])
653
+ if prev_cfg.get("heads"): cfg["heads"] = prev_cfg["heads"]
654
+ if args.rank is None and prev_cfg.get("rank"): cfg["rank"] = prev_cfg["rank"]
655
+ if prev_cfg.get("layers"): cfg["layers"] = prev_cfg["layers"]
656
+ if args.x2 and prev_cfg.get("layers"): cfg["layers"] = max(cfg["layers"], prev_cfg["layers"] * 2)
657
+ if args.rank: cfg["rank"] = args.rank
658
+ if args.x2 and not prev_cfg: cfg["layers"] *= 2
659
+
660
+ BLOCK = args.block or DEFAULT_BLOCK
661
+
662
+ core = Encoder(cfg).to(DEV)
663
+ ar_h = ARHead(cfg["d"]).to(DEV)
664
+
665
+ # shape-safe warm-start even in --fresh
666
+ loaded = 0; src = None
667
+ if args.warmstart_from:
668
+ src = _resolve_ckpt(pathlib.Path(args.warmstart_from)) or pathlib.Path(args.warmstart_from)
669
+ else:
670
+ maybe = _resolve_ckpt(pathlib.Path(args.save_dir) / "final.pt")
671
+ if maybe and not args.fresh:
672
+ src = maybe
673
+ if src:
674
+ loaded += _safe_load_any(src, core, key="core")
675
+ loaded += _safe_load_any(src, ar_h, key="ar")
676
+ if loaded:
677
+ print(f"Warm-start: loaded {loaded} matching tensors from {src}")
678
+
679
+ _phase_freeze(core, freeze_core=args.freeze_core, unfreeze_ln=args.unfreeze_ln, train_emb=args.train_emb)
680
+ opt = _make_optimizer(core, ar_h, args.lr_core, args.lr_head)
681
+ scaler = GradScaler(enabled=((args.amp or args.fp8_only) and DEV.type == "cuda"))
682
+ ce_tok = nn.CrossEntropyLoss(label_smoothing=0.1)
683
+
684
+ start_step, seen_tok = 0, 0
685
+ last_save_wall = None
686
+ if args.resume and not args.fresh:
687
+ start_step, seen_tok, last_save_wall = load_ckpt(pathlib.Path(args.resume), core, ar_h, opt, scaler)
688
+ print(f"βœ“ resumed from step {start_step:,}, seen_tokens={seen_tok:,}")
689
+
690
+ # Phase A: pretrain
691
+ step, seen_tok, last_save_wall = _train_phase(
692
+ args,
693
+ core=core, ar_h=ar_h, opt=opt, scaler=scaler,
694
+ start_step=start_step, seen_tok=seen_tok, resume_wall_time=last_save_wall,
695
+ ce_tok=ce_tok, cfg=cfg,
696
+ source=args.source, steps=args.steps, block=BLOCK,
697
+ save_dir=args.save_dir, save_every_sec=args.save_every_sec, save_every_steps=args.save_every_steps,
698
+ auto_grow=args.auto_grow, grow_plan_s=args.grow_plan, grow_every_steps=args.grow_every_steps,
699
+ chat=args.chat, chat_messages_key=args.chat_messages_key, dataset_field_text=args.dataset_field_text,
700
+ sft_add_generation_prompt=args.sft_add_generation_prompt,
701
+ amp_flag=args.amp, fp8_only_flag=args.fp8_only, fp8_fallback_flag=args.fp8_fallback,
702
+ target_tokens_override=(args.target_tokens if args.target_tokens else None),
703
+ phase_name="pretrain"
704
+ )
705
+
706
+ # Auto-wire Phase B defaults if steps provided but no source
707
+ if (not args.after_sft_source) and (args.after_sft_steps and args.after_sft_steps > 0):
708
+ args.after_sft_source = DEFAULT_AFTER_SFT_SOURCES
709
+ args.after_sft_chat = True
710
+ if args.after_sft_add_generation_prompt is None:
711
+ args.after_sft_add_generation_prompt = True
712
+ if not args.after_sft_block or args.after_sft_block <= 0:
713
+ args.after_sft_block = DEFAULT_AFTER_SFT_BLOCK
714
+
715
+ if args.after_sft_source and args.after_sft_steps and args.after_sft_steps > 0:
716
+ print("\n[after-sft] starting automatic post-pretraining chat SFT phase")
717
+ _phase_freeze(core,
718
+ freeze_core=args.after_sft_freeze_core,
719
+ unfreeze_ln=args.after_sft_unfreeze_ln,
720
+ train_emb=args.after_sft_train_emb)
721
+ opt = _make_optimizer(core, ar_h,
722
+ args.after_sft_lr_core or args.lr_core,
723
+ args.after_sft_lr_head or args.lr_head)
724
+
725
+ step, seen_tok, last_save_wall = _train_phase(
726
+ args,
727
+ core=core, ar_h=ar_h, opt=opt, scaler=scaler,
728
+ start_step=step, seen_tok=seen_tok, resume_wall_time=last_save_wall,
729
+ ce_tok=ce_tok, cfg=cfg,
730
+ source=args.after_sft_source, steps=args.after_sft_steps,
731
+ block=args.after_sft_block or DEFAULT_AFTER_SFT_BLOCK,
732
+ save_dir=args.save_dir, save_every_sec=args.save_every_sec, save_every_steps=args.save_every_steps,
733
+ auto_grow=args.after_sft_auto_grow, grow_plan_s=(args.after_sft_grow_plan or args.grow_plan),
734
+ grow_every_steps=(args.after_sft_grow_every_steps or args.grow_every_steps),
735
+ chat=args.after_sft_chat, chat_messages_key=args.after_sft_chat_messages_key,
736
+ dataset_field_text=args.after_sft_dataset_field_text,
737
+ sft_add_generation_prompt=(args.after_sft_add_generation_prompt
738
+ if args.after_sft_add_generation_prompt is not None
739
+ else args.sft_add_generation_prompt),
740
+ amp_flag=args.amp, fp8_only_flag=args.fp8_only, fp8_fallback_flag=args.fp8_fallback,
741
+ target_tokens_override=None,
742
+ phase_name="sft"
743
+ )
744
+
745
+ save_ckpt(pathlib.Path(args.save_dir) / "final.pt", core, ar_h, opt, scaler,
746
+ meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(),
747
+ "py_state": random.getstate(), "torch_state": rng_state(), "fp8_only": args.fp8_only})
748
+ print("πŸŽ‰ training complete")
749
+
750
+ # ───────────────────────── Sampling utils ─────────────────────────
751
+ def _apply_no_repeat_ngram(logits: torch.Tensor, ids: torch.Tensor, n: int):
752
+ if n <= 0 or ids.size(1) < n - 1:
753
+ return logits
754
+ prefix = ids[0, - (n - 1):].tolist()
755
+ banned = []
756
+ tokens = ids[0].tolist()
757
+ for i in range(len(tokens) - n + 1):
758
+ if tokens[i:i + n - 1] == prefix:
759
+ banned.append(tokens[i + n - 1])
760
+ if banned:
761
+ banned_idx = torch.tensor(banned, device=logits.device, dtype=torch.long)
762
+ logits[..., banned_idx] = float("-inf")
763
+ return logits
764
+
765
+ def _apply_rep_presence_frequency(
766
+ logits: torch.Tensor, ids: torch.Tensor, last_n: int,
767
+ repetition_penalty: float, presence_penalty: float, frequency_penalty: float
768
+ ):
769
+ if ids.numel() == 0:
770
+ return logits
771
+ hist = ids[0, -last_n:].to(torch.long) if last_n > 0 else ids[0].to(torch.long)
772
+ if hist.numel() == 0:
773
+ return logits
774
+ uniq, counts = torch.unique(hist, return_counts=True)
775
+ if presence_penalty != 0.0 or frequency_penalty != 0.0:
776
+ adjust = presence_penalty + frequency_penalty * counts.to(logits.dtype)
777
+ logits[..., uniq] = logits[..., uniq] - adjust
778
+ if repetition_penalty and abs(repetition_penalty - 1.0) > 1e-6:
779
+ sel = logits[..., uniq]
780
+ sel = torch.where(sel > 0, sel / repetition_penalty, sel * repetition_penalty)
781
+ logits[..., uniq] = sel
782
+ return logits
783
+
784
+ def _filter_top_k_top_p_min_p(
785
+ logits: torch.Tensor, top_k: int, top_p: float, min_p: float, temperature: float
786
+ ) -> torch.Tensor:
787
+ logits = logits / max(temperature, 1e-8)
788
+ if logits.dim() == 1:
789
+ logits = logits.unsqueeze(0)
790
+ probs = logits.softmax(-1)
791
+ if torch.isnan(probs).any() or torch.isinf(probs).any():
792
+ probs = torch.nan_to_num(probs, nan=0.0, posinf=0.0, neginf=0.0)
793
+ V = probs.size(-1)
794
+ if top_k and top_k < V:
795
+ vals, idx = torch.topk(probs, top_k, dim=-1)
796
+ mask = torch.full_like(probs, 0.0)
797
+ mask.scatter_((1 if probs.dim() == 2 else -1), idx, 1.0)
798
+ probs = probs * mask
799
+ if top_p < 1.0:
800
+ sorted_probs, sorted_idx = torch.sort(probs, descending=True, dim=-1)
801
+ cumsum = torch.cumsum(sorted_probs, dim=-1)
802
+ keep = cumsum <= top_p
803
+ keep[..., 0] = True
804
+ mask = torch.zeros_like(probs)
805
+ mask.scatter_(1, sorted_idx, keep.to(mask.dtype))
806
+ probs = probs * mask
807
+ if min_p > 0.0:
808
+ probs = torch.where(probs >= min_p, probs, torch.zeros_like(probs))
809
+ sums = probs.sum(-1, keepdim=True)
810
+ empty = (sums == 0)
811
+ if empty.any():
812
+ fallback_idx = logits.argmax(-1, keepdim=True)
813
+ probs = torch.where(empty, torch.zeros_like(probs), probs)
814
+ probs.scatter_(-1, fallback_idx, torch.where(empty, torch.ones_like(sums), torch.zeros_like(sums)))
815
+ probs = probs / probs.sum(-1, keepdim=True)
816
+ return probs
817
+
818
+ # ───────────────────────── Inference helpers ─────────────────────────
819
+ def load_joint(ckpt: str, preset: str):
820
+ path = _resolve_ckpt(pathlib.Path(ckpt)) or pathlib.Path(ckpt)
821
+ sd = _try_load(path, map_location="cpu")
822
+ if sd is None:
823
+ raise FileNotFoundError(f"No valid checkpoint at {path}")
824
+ cfg = sd["cfg"] if "cfg" in sd and isinstance(sd["cfg"], dict) else (infer_cfg_from_ckpt(path) or PRESETS[preset])
825
+ core = Encoder(cfg).to(DEV)
826
+ ar_h = ARHead(cfg["d"]).to(DEV)
827
+ core.load_state_dict(sd["core"])
828
+ if "ar" in sd:
829
+ ar_h.load_state_dict(sd["ar"])
830
+ return core, ar_h
831
+
832
+ def _warn_tokenizer_mismatch(sd_tokenizer_id: str | None):
833
+ if not sd_tokenizer_id:
834
+ return
835
+ if sd_tokenizer_id != TOKENIZER_ID:
836
+ print(f"[warn] tokenizer mismatch: ckpt used '{sd_tokenizer_id}', runtime is '{TOKENIZER_ID}'. Expect degraded outputs.", file=sys.stderr)
837
+
838
+ DECODE_PRESETS = {
839
+ "det": dict(greedy=True, temperature=1.0, top_k=0, top_p=1.0, min_p=0.0,
840
+ repetition_penalty=1.05, presence_penalty=0.0, frequency_penalty=0.0,
841
+ penalty_last_n=128, no_repeat_ngram_size=3),
842
+ "balanced": dict(greedy=False, temperature=0.7, top_k=40, top_p=0.9, min_p=0.0,
843
+ repetition_penalty=1.1, presence_penalty=0.3, frequency_penalty=0.3,
844
+ penalty_last_n=256, no_repeat_ngram_size=3),
845
+ "creative": dict(greedy=False, temperature=0.85, top_k=80, top_p=0.95, min_p=0.0,
846
+ repetition_penalty=1.05, presence_penalty=0.2, frequency_penalty=0.2,
847
+ penalty_last_n=256, no_repeat_ngram_size=3),
848
+ }
849
+
850
+ @torch.no_grad()
851
+ def ar_decode(core, ar_h, prompt: str, max_new: int, T: float,
852
+ greedy: bool, top_k: int, top_p: float, min_p: float,
853
+ repetition_penalty: float, presence_penalty: float,
854
+ frequency_penalty: float, penalty_last_n: int,
855
+ no_repeat_ngram_size: int,
856
+ use_fp8: bool, fp8_fallback: bool):
857
+ prompt_ids = tok.encode(prompt)
858
+ if len(prompt_ids) == 0:
859
+ ids = torch.tensor([[EOS] if EOS is not None else [0]], device=DEV); prompt_len = 0
860
+ else:
861
+ ids = torch.tensor([prompt_ids], device=DEV); prompt_len = ids.size(1)
862
+
863
+ t0 = time.time()
864
+ with amp(use_fp8 or False, prefer_fp8=use_fp8 and (_supports_fp8() or fp8_fallback)):
865
+ h_full, kvs = core(ids, causal_mask(ids.size(1)), use_cache=True)
866
+ for _ in range(max_new):
867
+ logits = ar_h(h_full)[:, -1]
868
+ logits = _apply_no_repeat_ngram(logits, ids, no_repeat_ngram_size)
869
+ logits = _apply_rep_presence_frequency(logits, ids, penalty_last_n,
870
+ repetition_penalty, presence_penalty, frequency_penalty)
871
+ if greedy:
872
+ nxt = logits.argmax(-1, keepdim=True)
873
+ else:
874
+ probs = _filter_top_k_top_p_min_p(logits.squeeze(0), top_k, top_p, min_p, T)
875
+ nxt = probs.multinomial(1)
876
+ ids = torch.cat([ids, nxt.unsqueeze(0) if nxt.dim()==1 else nxt], 1)
877
+ x = ids[:, -1:]; h_full, kvs = core(x, None, kv_caches=kvs, use_cache=True)
878
+
879
+ full_ids = ids[0].tolist()
880
+ prompt_text = tok.decode(full_ids[:prompt_len], skip_special_tokens=True)
881
+ gen_text = tok.decode(full_ids[prompt_len:], skip_special_tokens=True)
882
+
883
+ if sys.stdout.isatty():
884
+ sys.stdout.write("\x1b[90m"); sys.stdout.write(prompt_text); sys.stdout.write("\x1b[0m"); sys.stdout.write(gen_text + "\n")
885
+ else:
886
+ sys.stdout.write(prompt_text + gen_text + "\n")
887
+
888
+ print(f"[{len(full_ids) - prompt_len} tok in {time.time() - t0:.2f}s]")
889
+
890
+ # ───────────────────────── CLI ─────────────────────────
891
+ def main():
892
+ ap = argparse.ArgumentParser()
893
+ sub = ap.add_subparsers(dest="cmd", required=True)
894
+
895
+ tr = sub.add_parser("train")
896
+ tr.add_argument("--preset", choices=PRESETS, default="base17")
897
+ tr.add_argument("--rank", type=int)
898
+ tr.add_argument("--block", type=int, default=DEFAULT_BLOCK)
899
+ tr.add_argument("--source", default=DEFAULT_PRETRAIN_SOURCES,
900
+ help="Comma-separated datasets (optionally dataset:config), or json:/path.jsonl")
901
+ tr.add_argument("--target_tokens", type=int)
902
+ tr.add_argument("--steps", type=int)
903
+ tr.add_argument("--amp", action="store_true")
904
+ tr.add_argument("--save_every_sec", type=int, default=DEFAULT_SAVE_SEC)
905
+ tr.add_argument("--save_every_steps", type=int, default=0)
906
+ tr.add_argument("--save_dir", default=str(CKDIR))
907
+ tr.add_argument("--resume", type=str)
908
+ tr.add_argument("--x2", action="store_true")
909
+ tr.add_argument("--warmstart_from", type=str, default=None)
910
+ tr.add_argument("--fresh", action="store_true")
911
+
912
+ # FP8 control
913
+ tr.add_argument("--fp8-only", action="store_true", dest="fp8_only")
914
+ tr.add_argument("--fp8-fallback", action="store_true", dest="fp8_fallback")
915
+
916
+ # Progressive block growth
917
+ tr.add_argument("--auto_grow", action="store_true")
918
+ tr.add_argument("--grow_plan", type=str, default="576,768,1024")
919
+ tr.add_argument("--grow_every_steps", type=int, default=50000)
920
+
921
+ # Chat / dataset fields
922
+ tr.add_argument("--chat", action="store_true")
923
+ tr.add_argument("--chat_messages_key", type=str, default="messages")
924
+ tr.add_argument("--dataset_field_text", type=str, default="text")
925
+ tr.add_argument("--sft_add_generation_prompt", action="store_true")
926
+
927
+ # Phase A freezing / LRs
928
+ tr.add_argument("--freeze_core", action="store_true")
929
+ tr.add_argument("--unfreeze_ln", action="store_true")
930
+ tr.add_argument("--train_emb", action="store_true")
931
+ tr.add_argument("--lr_core", type=float, default=LR_CORE)
932
+ tr.add_argument("--lr_head", type=float, default=LR_HEAD)
933
+
934
+ # Phase B: automatic SFT
935
+ tr.add_argument("--after_sft_source", type=str, default="")
936
+ tr.add_argument("--after_sft_steps", type=int, default=0)
937
+ tr.add_argument("--after_sft_chat", action="store_true")
938
+ tr.add_argument("--after_sft_chat_messages_key", type=str, default="messages")
939
+ tr.add_argument("--after_sft_dataset_field_text", type=str, default="text")
940
+ tr.add_argument("--after_sft_add_generation_prompt", type=lambda x: str(x).lower() in {"1","true","yes"}, default=None)
941
+ tr.add_argument("--after_sft_block", type=int, default=0)
942
+ tr.add_argument("--after_sft_auto_grow", action="store_true")
943
+ tr.add_argument("--after_sft_grow_plan", type=str, default="")
944
+ tr.add_argument("--after_sft_grow_every_steps", type=int, default=0)
945
+ tr.add_argument("--after_sft_freeze_core", action="store_true")
946
+ tr.add_argument("--after_sft_unfreeze_ln", action="store_true")
947
+ tr.add_argument("--after_sft_train_emb", action="store_true")
948
+ tr.add_argument("--after_sft_lr_core", type=float, default=0.0)
949
+ tr.add_argument("--after_sft_lr_head", type=float, default=0.0)
950
+
951
+ inf = sub.add_parser("infer")
952
+ inf.add_argument("--mode", choices=["ar"], required=True)
953
+ inf.add_argument("--ckpt", required=True)
954
+ inf.add_argument("--preset", default="base17")
955
+ inf.add_argument("--prompt", required=True)
956
+ inf.add_argument("--max_new", type=int, default=256)
957
+ inf.add_argument("--seed", type=int, default=1234)
958
+ inf.add_argument("--greedy", action="store_true")
959
+ inf.add_argument("--temperature", type=float, default=0.7)
960
+ inf.add_argument("--top_k", type=int, default=40)
961
+ inf.add_argument("--top_p", type=float, default=0.9)
962
+ inf.add_argument("--min_p", type=float, default=0.0)
963
+ inf.add_argument("--repetition_penalty", type=float, default=1.1)
964
+ inf.add_argument("--presence_penalty", type=float, default=0.3)
965
+ inf.add_argument("--frequency_penalty", type=float, default=0.3)
966
+ inf.add_argument("--penalty_last_n", type=int, default=256)
967
+ inf.add_argument("--no_repeat_ngram_size", type=int, default=3)
968
+ inf.add_argument("--fp8-only", action="store_true", dest="fp8_only")
969
+ inf.add_argument("--fp8-fallback", action="store_true", default=False, dest="fp8_fallback")
970
+ inf.add_argument("--decode_preset", choices=["det","balanced","creative"], default="balanced")
971
+
972
+ args = ap.parse_args()
973
+ if args.cmd == "train":
974
+ if args.fp8_only:
975
+ print("[init] FP8-only requested. If FP8 kernels are missing, use --fp8-fallback to continue with bf16.")
976
+ train(args)
977
+ else:
978
+ core, ar_h = load_joint(args.ckpt, args.preset)
979
+ try:
980
+ p = _resolve_ckpt(pathlib.Path(args.ckpt)) or pathlib.Path(args.ckpt)
981
+ _sd = _try_load(p, map_location="cpu")
982
+ _warn_tokenizer_mismatch(_sd.get("tokenizer_id") if isinstance(_sd, dict) else None)
983
+ except Exception:
984
+ pass
985
+ set_seed(args.seed)
986
+ dp = DECODE_PRESETS.get(args.decode_preset, {})
987
+ g = dp.get("greedy", args.greedy)
988
+ T = dp.get("temperature", args.temperature)
989
+ k = dp.get("top_k", args.top_k)
990
+ p_ = dp.get("top_p", args.top_p)
991
+ mp = dp.get("min_p", args.min_p)
992
+ rp = dp.get("repetition_penalty", args.repetition_penalty)
993
+ pp = dp.get("presence_penalty", args.presence_penalty)
994
+ fp = dp.get("frequency_penalty", args.frequency_penalty)
995
+ ln = dp.get("penalty_last_n", args.penalty_last_n)
996
+ ng = dp.get("no_repeat_ngram_size", args.no_repeat_ngram_size)
997
+
998
+ ar_decode(core, ar_h, args.prompt, args.max_new, T,
999
+ g, k, p_, mp, rp, pp, fp, ln, ng,
1000
+ use_fp8=args.fp8_only, fp8_fallback=args.fp8_fallback if hasattr(args, "fp8_fallback") else False)
1001
+
1002
+ if __name__ == "__main__":
1003
+ main()