OpenTransformer commited on
Commit
c5ca747
Β·
verified Β·
1 Parent(s): c111ff8

Upload 2 files

Browse files
Files changed (2) hide show
  1. C43.py +863 -0
  2. pretrain_step00902666.pt +3 -0
C43.py ADDED
@@ -0,0 +1,863 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # C4.py β€” Joint AR+SAT Trainer with SFT Phase
4
+ # Merges 5L.py (Joint Model + Adaptive OOM) with 5apg.py (Robust Stream + SFT Phases)
5
+ # Features:
6
+ # - Joint AR + SAT training objective
7
+ # - Phase 1: Pretrain -> Phase 2: SFT (Chat/Instruction Tuning)
8
+ # - Adaptive OOM: Reduces Batch Size, then Block Size
9
+ # - Robust Data: Retries, JSONL, Chat Templates, Source Mixing
10
+ # - Chinchilla Scaling, Checkpoint Pruning, FP8/AMP support
11
+ # - Colored inference output (prompt vs generation)
12
+
13
+ from __future__ import annotations
14
+ import argparse, json, math, pathlib, random, time, os, sys
15
+ from contextlib import nullcontext
16
+ from typing import Dict, Any, List, Optional, Tuple
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+ from datasets import load_dataset, DownloadConfig
21
+ from transformers import AutoTokenizer, logging as hf_log
22
+ from tqdm.auto import tqdm
23
+
24
+ # ───────────────────────── ANSI Colors ─────────────────────────
25
+ class Colors:
26
+ RESET = "\033[0m"
27
+ BOLD = "\033[1m"
28
+ # Prompt color
29
+ PROMPT = "\033[36m" # Cyan
30
+ # Generation color
31
+ GEN = "\033[33m" # Yellow
32
+ # Info color
33
+ INFO = "\033[90m" # Gray
34
+ # Success
35
+ OK = "\033[32m" # Green
36
+
37
+ # ───────────────────────── Globals ─────────────────────────
38
+ hf_log.set_verbosity_error()
39
+ DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
40
+ torch.backends.cuda.matmul.allow_tf32 = True
41
+ try:
42
+ torch.set_float32_matmul_precision("high")
43
+ except Exception:
44
+ pass
45
+
46
+ # Tokenizer
47
+ TOKENIZER_ID = os.environ.get("TOKENIZER_ID", "deepseek-ai/DeepSeek-V3.2-Exp")
48
+ tok = AutoTokenizer.from_pretrained(TOKENIZER_ID, use_fast=True, trust_remote_code=True)
49
+ if tok.pad_token is None:
50
+ tok.add_special_tokens({"pad_token": "<|pad|>"})
51
+
52
+ VOCAB, EOS = (
53
+ max(tok.get_vocab().values()) + 1,
54
+ tok.eos_token_id if tok.eos_token_id is not None else tok.sep_token_id
55
+ )
56
+
57
+ PRESETS: Dict[str, Dict[str, int]] = {
58
+ "small": dict(d=512, layers=8, heads=16, rank=64),
59
+ "smallx2": dict(d=512, layers=16, heads=16, rank=64),
60
+ "base": dict(d=768, layers=12, heads=24, rank=96),
61
+ "base18": dict(d=768, layers=18, heads=24, rank=96),
62
+ "large": dict(d=1024, layers=24, heads=16, rank=128),
63
+ }
64
+
65
+ # Configuration
66
+ DEFAULT_BLOCK = 1122
67
+ DEFAULT_BATCH = 4
68
+ SAT_BLOCK = 2
69
+ LR_CORE, LR_HEAD = 5e-5, 2e-4
70
+ EMIT_LAMBDA = 0.1
71
+ DEFAULT_SAVE_SEC = 24 * 3600
72
+ CKDIR = pathlib.Path("ckpts_joint")
73
+
74
+ # Defaults for SFT
75
+ DEFAULT_PRETRAIN_SOURCES = "cerebras/SlimPajama-627B"
76
+ DEFAULT_AFTER_SFT_SOURCES = "mlabonne/opc-sft-stage2-chat,HuggingFaceH4/ultrachat_200k"
77
+ DEFAULT_AFTER_SFT_BLOCK = 1122
78
+
79
+ # ───────────────────────── Utilities ─────────────────────────
80
+ def rng_state():
81
+ if DEV.type == "cuda":
82
+ try:
83
+ return torch.cuda.get_rng_state(DEV)
84
+ except TypeError:
85
+ return torch.cuda.get_rng_state()
86
+ return torch.get_rng_state()
87
+
88
+ def _is_probably_ckpt(path: pathlib.Path) -> bool:
89
+ try:
90
+ return path.is_file() and path.suffix == ".pt" and not path.name.endswith(".pt.tmp") and path.stat().st_size > (1<<20)
91
+ except Exception:
92
+ return False
93
+
94
+ def _resolve_ckpt(path: pathlib.Path) -> pathlib.Path | None:
95
+ try:
96
+ if path.is_dir():
97
+ cands = sorted([p for p in path.glob("*.pt") if _is_probably_ckpt(p)],
98
+ key=lambda p: p.stat().st_mtime, reverse=True)
99
+ return cands[0] if cands else None
100
+ if path.suffix == ".tmp":
101
+ solid = path.with_suffix("")
102
+ return solid if _is_probably_ckpt(solid) else _resolve_ckpt(path.parent)
103
+ return path if _is_probably_ckpt(path) else _resolve_ckpt(path.parent)
104
+ except Exception:
105
+ return None
106
+
107
+ def _try_load(path: pathlib.Path, map_location="cpu"):
108
+ try:
109
+ return torch.load(path, map_location="cpu")
110
+ except Exception as e:
111
+ print(f"[ckpt-skip] {path} not usable: {e}")
112
+ return None
113
+
114
+ def _prune_checkpoints(save_dir: pathlib.Path, phase_name: str, max_ckpts: int):
115
+ """Prune old checkpoints for a specific phase."""
116
+ if max_ckpts is None or max_ckpts <= 0:
117
+ return
118
+ try:
119
+ pattern = f"{phase_name}_step*.pt"
120
+ ckpts = sorted(
121
+ [p for p in save_dir.glob(pattern) if _is_probably_ckpt(p)],
122
+ key=lambda p: p.stat().st_mtime
123
+ )
124
+ excess = len(ckpts) - max_ckpts
125
+ if excess > 0:
126
+ for p in ckpts[:excess]:
127
+ try:
128
+ p.unlink()
129
+ print(f" [prune] deleted old {p.name}")
130
+ except Exception:
131
+ pass
132
+ except Exception as e:
133
+ print(f"[ckpt-prune] error: {e}")
134
+
135
+ # ──────���────────────────── AMP helper ─────────────────────────
136
+ try:
137
+ from torch.amp import autocast as _ac, GradScaler
138
+ except ImportError:
139
+ from torch.cuda.amp import autocast as _ac, GradScaler
140
+
141
+ def _auto_amp_dtype():
142
+ if DEV.type == "cuda":
143
+ try:
144
+ if torch.cuda.is_bf16_supported(): return torch.bfloat16
145
+ return torch.float16
146
+ except Exception: return torch.float16
147
+ return torch.float32
148
+
149
+ def amp(enabled: bool):
150
+ return nullcontext() if not (enabled and DEV.type == "cuda") else _ac(device_type="cuda", dtype=_auto_amp_dtype())
151
+
152
+ # ───────────────────────── Chat & Data Stream ─────────────────────────
153
+ def _coerce_role(r: str) -> str:
154
+ r = (r or "").lower()
155
+ if r in {"user", "human", "customer"}: return "user"
156
+ if r in {"assistant", "gpt", "bot"}: return "assistant"
157
+ if r in {"system", "context"}: return "system"
158
+ return r or "user"
159
+
160
+ def _render_chat_text_from_ex(ex: dict, messages_key: str, add_generation_prompt: bool) -> Optional[str]:
161
+ msgs = ex.get(messages_key)
162
+ if msgs is None:
163
+ for alt in ("conversations", "dialog", "turns"):
164
+ if isinstance(ex.get(alt), list):
165
+ msgs = ex[alt]; break
166
+ if isinstance(msgs, list) and msgs and isinstance(msgs[0], dict):
167
+ try:
168
+ norm = []
169
+ for m in msgs:
170
+ role = _coerce_role(m.get("role", "")); content = m.get("content", m.get("text", ""))
171
+ if not isinstance(content, str): continue
172
+ norm.append({"role": role, "content": content})
173
+ if not norm: return None
174
+ return tok.apply_chat_template(norm, tokenize=False, add_generation_prompt=add_generation_prompt)
175
+ except Exception: return None
176
+ # Fallback for prompt/response pairs
177
+ for a, b in (("prompt", "response"), ("instruction", "output"), ("question", "answer")):
178
+ if isinstance(ex.get(a), str) and isinstance(ex.get(b), str):
179
+ return f"User: {ex[a]}\nAssistant: {ex[b]}"
180
+ return None
181
+
182
+ def _open_stream_one(ds_name: str, seed: int):
183
+ dc = DownloadConfig(max_retries=5, use_etag=True, resume_download=True)
184
+ if ":" in ds_name: base, config = ds_name.split(":", 1)
185
+ else: base, config = ds_name, None
186
+
187
+ if base == "json":
188
+ data_files = {"train": config}
189
+ ds = load_dataset("json", data_files=data_files, split="train", streaming=True, download_config=dc)
190
+ else:
191
+ ds = load_dataset(base, config, split="train", streaming=True, download_config=dc) if config else \
192
+ load_dataset(base, split="train", streaming=True, download_config=dc)
193
+ return iter(ds.shuffle(buffer_size=10_000, seed=seed))
194
+
195
+ def token_stream(ds_names: str, target: int, seed: int = 42,
196
+ chat: bool = False, chat_messages_key: str = "messages",
197
+ sft_add_generation_prompt: bool = False, dataset_field_text: str = "text"):
198
+ sources = [s.strip() for s in ds_names.split(",") if s.strip()]
199
+ if not sources: return
200
+
201
+ src_idx = 0; emitted = 0; it = None; attempts = 0; backoff_base = 2.0
202
+
203
+ while emitted < target:
204
+ try:
205
+ if it is None: it = _open_stream_one(sources[src_idx], seed)
206
+ ex = next(it)
207
+ text = None
208
+ if isinstance(ex, dict):
209
+ if chat:
210
+ text = _render_chat_text_from_ex(ex, chat_messages_key, sft_add_generation_prompt)
211
+ if text is None:
212
+ if dataset_field_text and isinstance(ex.get(dataset_field_text), str):
213
+ text = ex[dataset_field_text]
214
+ elif isinstance(ex.get("text"), str):
215
+ text = ex["text"]
216
+
217
+ if not isinstance(text, str):
218
+ attempts = 0; continue
219
+
220
+ enc = tok.encode(text)
221
+ if EOS is not None and (len(enc) == 0 or enc[-1] != EOS):
222
+ enc = enc + [EOS]
223
+
224
+ for t in enc:
225
+ yield t
226
+ emitted += 1
227
+ if emitted >= target: return
228
+ attempts = 0
229
+ except StopIteration:
230
+ it = None; src_idx = (src_idx + 1) % len(sources)
231
+ except Exception as e:
232
+ attempts += 1
233
+ sleep_s = min(60.0, backoff_base ** min(attempts, 6))
234
+ print(f"[stream-retry] {sources[src_idx]} error: {type(e).__name__}, sleeping {sleep_s:.1f}s")
235
+ time.sleep(sleep_s); it = None
236
+ if attempts % 5 == 0 and len(sources) > 1:
237
+ src_idx = (src_idx + 1) % len(sources)
238
+
239
+ # ───────────────────────── Relative positional bias (ALiBi) ─────────────────────────
240
+ def _alibi_slopes(n_heads: int):
241
+ import math
242
+ def pow2slopes(n):
243
+ start = 2 ** (-2 ** -(math.log2(n) - 3))
244
+ ratio = start
245
+ return [start * (ratio ** i) for i in range(n)]
246
+ if math.log2(n_heads).is_integer(): vals = pow2slopes(n_heads)
247
+ else:
248
+ closest = 2 ** math.floor(math.log2(n_heads))
249
+ vals = pow2slopes(closest)
250
+ extra = pow2slopes(2 * closest)
251
+ vals += extra[0::2][: n_heads - closest]
252
+ return torch.tensor(vals, device=DEV).view(1, n_heads, 1, 1)
253
+
254
+ def alibi_bias(n_heads: int, n_tokens: int):
255
+ i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
256
+ j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
257
+ dist = (j - i).clamp_min(0)
258
+ return -_alibi_slopes(n_heads) * dist
259
+
260
+ # ───────────────────────── Model components ─────────────────────────
261
+ class LowRankMHA(nn.Module):
262
+ def __init__(self, d: int, h: int, r: int, use_relpos: bool = True):
263
+ super().__init__()
264
+ assert d % h == 0
265
+ self.h, self.dk = h, d // h
266
+ self.use_relpos = use_relpos
267
+ self.q = nn.Linear(d, d, bias=False)
268
+ self.k = nn.Linear(d, d, bias=False)
269
+ self.v = nn.Linear(d, d, bias=False)
270
+ self.U = nn.Parameter(torch.randn(self.dk, r))
271
+ nn.init.orthogonal_(self.U)
272
+ self.proj = nn.Linear(h * r, d, bias=False)
273
+ self.drop = nn.Dropout(0.1)
274
+
275
+ def _proj(self, x):
276
+ B, N, _ = x.shape
277
+ return (x.view(B, N, self.h, self.dk).transpose(1, 2) @ self.U)
278
+
279
+ def forward(self, x, mask=None, rel_bias_tokens=None, kv_cache=None, use_cache=False):
280
+ q = self._proj(self.q(x))
281
+ k_new = self._proj(self.k(x))
282
+ v_new = self._proj(self.v(x))
283
+
284
+ if kv_cache is None: k, v = k_new, v_new
285
+ else:
286
+ k, v = kv_cache
287
+ if use_cache:
288
+ k, v = torch.cat([k, k_new], dim=2), torch.cat([v, v_new], dim=2)
289
+
290
+ att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
291
+ if q.size(2) == k.size(2):
292
+ if self.use_relpos and rel_bias_tokens is not None:
293
+ att = att + alibi_bias(self.h, rel_bias_tokens)
294
+ if mask is not None: att = att + mask
295
+
296
+ z = (att.softmax(-1) @ v).transpose(1, 2).reshape(x.size(0), x.size(1), -1)
297
+ out = self.drop(self.proj(z))
298
+ return (out, (k, v)) if use_cache else out
299
+
300
+ class Block(nn.Module):
301
+ def __init__(self, d: int, h: int, r: int):
302
+ super().__init__()
303
+ self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
304
+ self.mha = LowRankMHA(d, h, r)
305
+ self.ff = nn.Sequential(nn.Linear(d, 4 * d), nn.ReLU(), nn.Linear(4 * d, d))
306
+
307
+ def forward(self, x, mask, kv=None, use_cache=False):
308
+ n = x.size(1)
309
+ if use_cache:
310
+ y, new_kv = self.mha(self.ln1(x), mask, rel_bias_tokens=n if mask is not None else None, kv_cache=kv, use_cache=True)
311
+ x = x + y + self.ff(self.ln2(x + y))
312
+ return x, new_kv
313
+ else:
314
+ x = x + self.mha(self.ln1(x), mask, rel_bias_tokens=n)
315
+ return x + self.ff(self.ln2(x))
316
+
317
+ class Encoder(nn.Module):
318
+ def __init__(self, cfg):
319
+ super().__init__()
320
+ d, l, h, r = cfg["d"], cfg["layers"], cfg["heads"], cfg["rank"]
321
+ self.emb = nn.Embedding(VOCAB, d)
322
+ self.blocks = nn.ModuleList([Block(d, h, r) for _ in range(l)])
323
+ self.ln = nn.LayerNorm(d)
324
+
325
+ def forward(self, ids, mask, kv_caches=None, use_cache=False):
326
+ x = self.emb(ids)
327
+ if not use_cache:
328
+ for blk in self.blocks: x = blk(x, mask)
329
+ return self.ln(x)
330
+ new_kvs = []
331
+ for i, blk in enumerate(self.blocks):
332
+ kv = kv_caches[i] if kv_caches else None
333
+ x, kv_out = blk(x, mask, kv, use_cache=True)
334
+ new_kvs.append(kv_out)
335
+ return self.ln(x), new_kvs
336
+
337
+ class ARHead(nn.Module):
338
+ def __init__(self, d):
339
+ super().__init__()
340
+ self.proj = nn.Linear(d, VOCAB)
341
+ def forward(self, h): return self.proj(h)
342
+
343
+ class SATHead(nn.Module):
344
+ def __init__(self, d, mode="var"):
345
+ super().__init__()
346
+ self.proj = nn.Linear(d, VOCAB)
347
+ self.gate = nn.Linear(d, 2) if mode == "var" else None
348
+ def forward(self, h_last):
349
+ return self.proj(h_last), (self.gate(h_last[:, 0]) if self.gate else None)
350
+
351
+ # ───────────────────────── Masks ─────────────────────────
352
+ def causal_mask(n):
353
+ return torch.triu(torch.full((1, 1, n, n), float("-inf"), device=DEV), 1)
354
+
355
+ def sat_mask(n, block=SAT_BLOCK):
356
+ idx = torch.arange(n, device=DEV)
357
+ grp = idx.unsqueeze(0) // block
358
+ allow = (grp.T == grp) | (grp.T > grp)
359
+ return torch.where(allow, 0.0, float("-inf")).unsqueeze(0).unsqueeze(0)
360
+
361
+ # ───────────────────────── Checkpoint helpers ─────────────────────────
362
+ def save_ckpt(path: pathlib.Path, core, ar_h, sat_h, opt, scaler, meta):
363
+ path.parent.mkdir(exist_ok=True, parents=True)
364
+ tmp = path.with_suffix(path.suffix + ".tmp")
365
+ state = {
366
+ "core": core.state_dict(), "ar": ar_h.state_dict(), "sat": sat_h.state_dict(),
367
+ "opt": opt.state_dict(), "scaler": scaler.state_dict(),
368
+ "cfg": meta.get("cfg"), "tokenizer_id": TOKENIZER_ID,
369
+ **{k: v for k, v in meta.items() if k != "cfg"}
370
+ }
371
+ torch.save(state, tmp, _use_new_zipfile_serialization=False)
372
+ tmp.replace(path)
373
+ (path.parent / "latest.json").write_text(json.dumps({"path": str(path), "step": meta["step"]}))
374
+ print(f"\nβœ“ saved checkpoint {path.name}")
375
+
376
+ def load_ckpt(path, core, ar_h, sat_h, opt, scaler):
377
+ p = _resolve_ckpt(path) or path
378
+ ck = _try_load(p, map_location="cpu")
379
+ if ck is None: raise FileNotFoundError(f"No valid checkpoint at {p}")
380
+ core.load_state_dict(ck["core"])
381
+ ar_h.load_state_dict(ck["ar"])
382
+ sat_h.load_state_dict(ck["sat"])
383
+ opt.load_state_dict(ck["opt"])
384
+ scaler.load_state_dict(ck["scaler"])
385
+ return ck.get("step", 0), ck.get("seen_tok", 0), ck.get("wall_time", time.time())
386
+
387
+ def _safe_load_any(path: pathlib.Path, tgt: nn.Module, key: str | None = None):
388
+ p = _resolve_ckpt(path) or path
389
+ if not p.exists(): return 0
390
+ ck = _try_load(p, map_location="cpu")
391
+ if ck is None: return 0
392
+ sd = ck.get(key, ck) if key else ck
393
+ if isinstance(sd, dict) and "state_dict" in sd: sd = sd["state_dict"]
394
+ tgt_sd = tgt.state_dict()
395
+ filt = {k: v for k, v in sd.items() if k in tgt_sd and v.shape == tgt_sd[k].shape}
396
+ if filt: tgt.load_state_dict(filt, strict=False)
397
+ return len(filt)
398
+
399
+ def infer_cfg_from_ckpt(path: pathlib.Path):
400
+ p = _resolve_ckpt(path) or path
401
+ if not p.exists(): return None
402
+ sd = _try_load(p, map_location="cpu")
403
+ if sd is None: return None
404
+ if "cfg" in sd: return dict(sd["cfg"])
405
+ return None
406
+
407
+ # ───────────────────────── Training Logic ─────────────────────────
408
+ def _parse_grow_plan(s: str) -> List[int]:
409
+ return sorted(set([int(x.strip()) for x in s.split(",") if x.strip() and int(x.strip()) >= 128]))
410
+
411
+ def _count_enabled_params(*modules) -> int:
412
+ return sum(sum(p.numel() for p in m.parameters()) for m in modules if m is not None)
413
+
414
+ def _phase_freeze(core: nn.Module, *, freeze_core: bool, unfreeze_ln: bool, train_emb: bool):
415
+ for p in core.parameters(): p.requires_grad = not freeze_core
416
+ if freeze_core:
417
+ if unfreeze_ln:
418
+ for blk in core.blocks:
419
+ for p in blk.ln1.parameters(): p.requires_grad = True
420
+ for p in blk.ln2.parameters(): p.requires_grad = True
421
+ for p in core.ln.parameters(): p.requires_grad = True
422
+ if train_emb:
423
+ for p in core.emb.parameters(): p.requires_grad = True
424
+
425
+ def _train_phase(
426
+ args, phase_name: str,
427
+ core, ar_h, sat_h, opt, scaler,
428
+ start_step, seen_tok, resume_wall_time,
429
+ cfg, source, steps, block_size, batch_size,
430
+ chat_cfg: dict,
431
+ max_ckpts: int,
432
+ target_tokens_override: Optional[int] = None
433
+ ):
434
+ BLOCK = block_size
435
+ BATCH = batch_size
436
+
437
+ # Calculate Targets
438
+ if target_tokens_override is not None:
439
+ target_tokens = target_tokens_override
440
+ else:
441
+ # Chinchilla-ish: 25 tokens per param (or 51.2 if double)
442
+ ratio = 51.2 if args.chilla_max_double else 25
443
+ param_count = _count_enabled_params(core, ar_h, sat_h)
444
+ target_tokens = int(ratio * param_count)
445
+
446
+ # If steps are provided, they override the param-based token target for this phase
447
+ if steps:
448
+ phase_target_tokens = steps * BLOCK * BATCH
449
+ # The phase goal is relative to where we started this phase
450
+ total_tokens_needed = seen_tok + phase_target_tokens
451
+ else:
452
+ total_tokens_needed = target_tokens
453
+ if total_tokens_needed <= seen_tok:
454
+ print(f"[{phase_name}] target {total_tokens_needed} already reached.")
455
+ return start_step, seen_tok, resume_wall_time
456
+
457
+ # Setup Data Stream
458
+ stream = token_stream(
459
+ source, total_tokens_needed, seed=42,
460
+ chat=chat_cfg.get("chat", False),
461
+ chat_messages_key=chat_cfg.get("key", "messages"),
462
+ sft_add_generation_prompt=chat_cfg.get("gen_prompt", False),
463
+ dataset_field_text=chat_cfg.get("text_field", "text")
464
+ )
465
+
466
+ # Losses
467
+ ce_tok = nn.CrossEntropyLoss(label_smoothing=0.1)
468
+ ce_gate = nn.CrossEntropyLoss()
469
+
470
+ # Progress Bar
471
+ pbar = tqdm(total=total_tokens_needed, initial=seen_tok, unit="tok")
472
+
473
+ # Growth Plan
474
+ grow_plan = _parse_grow_plan(args.grow_plan) if args.auto_grow else []
475
+
476
+ # State
477
+ buf: list[int] = []
478
+ batch_accum: list[list[int]] = []
479
+ step = start_step
480
+ steps_since_last_grow = 0
481
+
482
+ # Timer setup
483
+ now_wall = time.time()
484
+ last_save_mono = time.monotonic() - (now_wall - (resume_wall_time or now_wall))
485
+
486
+ print(f"[{phase_name}] Starting. Goal: {total_tokens_needed:,} tokens. Batch={BATCH}, Block={BLOCK}")
487
+
488
+ while seen_tok < total_tokens_needed:
489
+ # Fill Batch
490
+ try:
491
+ while len(buf) < BLOCK:
492
+ buf.append(next(stream))
493
+ except StopIteration:
494
+ break # Stream exhausted
495
+
496
+ seq = buf[:BLOCK]
497
+ buf = buf[BLOCK:]
498
+ batch_accum.append(seq)
499
+
500
+ if len(batch_accum) < BATCH:
501
+ continue
502
+
503
+ ids = torch.tensor(batch_accum, device=DEV) # [B, L]
504
+ batch_accum = []
505
+
506
+ tgt_ar = ids.clone()
507
+
508
+ try:
509
+ with amp(args.amp):
510
+ # AR Forward
511
+ h_ar = core(ids, causal_mask(ids.size(1)))
512
+ logits_ar = ar_h(h_ar)[:, :-1]
513
+ loss_ar = ce_tok(logits_ar.reshape(-1, VOCAB), tgt_ar[:, 1:].reshape(-1))
514
+
515
+ # SAT Forward
516
+ h_sat = core(ids, sat_mask(ids.size(1)))
517
+ logits_sat, gate = sat_h(h_sat[:, -SAT_BLOCK:])
518
+ tgt_sat = ids[:, 1:SAT_BLOCK+1]
519
+ loss_sat = ce_tok(logits_sat.reshape(-1, VOCAB), tgt_sat.reshape(-1))
520
+ if gate is not None:
521
+ loss_sat += EMIT_LAMBDA * ce_gate(gate, torch.ones(ids.size(0), device=DEV, dtype=torch.long))
522
+
523
+ loss = loss_ar + loss_sat
524
+
525
+ scaler.scale(loss).backward()
526
+ scaler.unscale_(opt)
527
+ nn.utils.clip_grad_norm_(core.parameters(), 1.0)
528
+ scaler.step(opt)
529
+ scaler.update()
530
+ opt.zero_grad(set_to_none=True)
531
+
532
+ except RuntimeError as e:
533
+ msg = str(e).lower()
534
+ if "out of memory" in msg or "cuda error" in msg:
535
+ # ADAPTIVE OOM STRATEGY: Reduce Batch, then Block
536
+ if BATCH > 1:
537
+ print(f"\n[{phase_name} OOM] Reducing Batch: {BATCH} -> {BATCH - 1}")
538
+ BATCH -= 1
539
+ else:
540
+ new_block = max(128, BLOCK // 2)
541
+ print(f"\n[{phase_name} OOM] Reducing Block: {BLOCK} -> {new_block}")
542
+ BLOCK = new_block
543
+
544
+ batch_accum = [] # Drop failed batch
545
+ if DEV.type == "cuda": torch.cuda.empty_cache()
546
+ steps_since_last_grow = 0
547
+ continue
548
+ raise
549
+
550
+ step += 1
551
+ toks_processed = BLOCK * BATCH
552
+ seen_tok += toks_processed
553
+ pbar.update(toks_processed)
554
+ pbar.set_postfix(loss=f"{loss.item():.3f}", B=BATCH, L=BLOCK)
555
+
556
+ # Saving - DELETE FIRST, THEN DUMP
557
+ if args.save_every_sec > 0:
558
+ now_mono = time.monotonic()
559
+ if now_mono - last_save_mono >= args.save_every_sec:
560
+ ck_name = f"{phase_name}_step{step:08d}.pt"
561
+ # 1. PRUNE OLD CHECKPOINTS FIRST
562
+ _prune_checkpoints(pathlib.Path(args.save_dir), phase_name, max_ckpts)
563
+ # 2. THEN SAVE NEW CHECKPOINT
564
+ save_ckpt(pathlib.Path(args.save_dir) / ck_name, core, ar_h, sat_h, opt, scaler,
565
+ meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time()})
566
+ last_save_mono = now_mono
567
+
568
+ # Auto Grow
569
+ if args.auto_grow:
570
+ steps_since_last_grow += 1
571
+ if steps_since_last_grow >= args.grow_every_steps:
572
+ steps_since_last_grow = 0
573
+ try:
574
+ idx = grow_plan.index(BLOCK)
575
+ if idx + 1 < len(grow_plan):
576
+ BLOCK = grow_plan[idx + 1]
577
+ print(f"[{phase_name} Grow] Block -> {BLOCK}")
578
+ if DEV.type == "cuda": torch.cuda.empty_cache()
579
+ except ValueError:
580
+ grow_plan = sorted(set(grow_plan + [BLOCK]))
581
+
582
+ pbar.close()
583
+
584
+ # Final Phase Save
585
+ save_ckpt(pathlib.Path(args.save_dir) / f"{phase_name}_final.pt", core, ar_h, sat_h, opt, scaler,
586
+ meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time()})
587
+
588
+ return step, seen_tok, time.time()
589
+
590
+ # ───────────────────────── Main Orchestrator ─────────────────────────
591
+ def train(args):
592
+ cfg = PRESETS[args.preset].copy()
593
+
594
+ # 1. Warmstart / Config Inference
595
+ if not args.fresh:
596
+ src_probe = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
597
+ prev_cfg = infer_cfg_from_ckpt(src_probe)
598
+ else: prev_cfg = None
599
+
600
+ if prev_cfg:
601
+ cfg.update({k: v for k, v in prev_cfg.items() if k in cfg})
602
+ if args.x2 and prev_cfg.get("layers"): cfg["layers"] = max(cfg["layers"], prev_cfg["layers"] * 2)
603
+
604
+ if args.rank: cfg["rank"] = args.rank
605
+ if args.x2 and not prev_cfg: cfg["layers"] *= 2
606
+
607
+ print(f"Config: {cfg}")
608
+
609
+ # 2. Model Init
610
+ core = Encoder(cfg).to(DEV)
611
+ ar_h = ARHead(cfg["d"]).to(DEV)
612
+ sat_h = SATHead(cfg["d"], mode="var").to(DEV)
613
+
614
+ # 3. Load Weights (Safe Warmstart)
615
+ if not args.fresh:
616
+ src = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
617
+ src = _resolve_ckpt(src)
618
+ if src:
619
+ loaded = _safe_load_any(src, core, key="core")
620
+ _safe_load_any(src, ar_h, key="ar")
621
+ _safe_load_any(src, sat_h, key="sat")
622
+ if loaded: print(f"Warm-start loaded from {src}")
623
+
624
+ # 4. Phase 1: Pretrain Setup
625
+ _phase_freeze(core, freeze_core=args.freeze_core, unfreeze_ln=args.unfreeze_ln, train_emb=args.train_emb)
626
+
627
+ opt = torch.optim.AdamW([
628
+ {"params": [p for p in core.parameters() if p.requires_grad], "lr": args.lr_core},
629
+ {"params": ar_h.parameters(), "lr": args.lr_head},
630
+ {"params": sat_h.parameters(), "lr": args.lr_head},
631
+ ])
632
+ scaler = GradScaler(enabled=(args.amp and DEV.type == "cuda"))
633
+
634
+ start_step, seen_tok, last_wall = 0, 0, None
635
+ if args.resume and not args.fresh:
636
+ start_step, seen_tok, last_wall = load_ckpt(pathlib.Path(args.resume), core, ar_h, sat_h, opt, scaler)
637
+ print(f"Resumed from step {start_step}")
638
+
639
+ # 5. Run Phase 1
640
+ step, seen_tok, last_wall = _train_phase(
641
+ args, "pretrain", core, ar_h, sat_h, opt, scaler,
642
+ start_step, seen_tok, last_wall, cfg,
643
+ args.source, args.steps,
644
+ args.block or DEFAULT_BLOCK,
645
+ args.batch_size or DEFAULT_BATCH,
646
+ chat_cfg={"chat": args.chat, "key": args.chat_messages_key, "gen_prompt": args.sft_add_generation_prompt, "text_field": args.dataset_field_text},
647
+ max_ckpts=args.max_ckpts,
648
+ target_tokens_override=args.target_tokens
649
+ )
650
+
651
+ # 6. Phase 2: Automatic SFT (If requested)
652
+ # Auto-wire SFT defaults if steps provided but no source
653
+ if (not args.after_sft_source) and (args.after_sft_steps and args.after_sft_steps > 0):
654
+ args.after_sft_source = DEFAULT_AFTER_SFT_SOURCES
655
+ args.after_sft_chat = True
656
+ if args.after_sft_add_generation_prompt is None: args.after_sft_add_generation_prompt = True
657
+ if not args.after_sft_block: args.after_sft_block = DEFAULT_AFTER_SFT_BLOCK
658
+
659
+ if args.after_sft_source and args.after_sft_steps and args.after_sft_steps > 0:
660
+ print("\n[Orchestrator] Starting Post-Pretraining SFT Phase...")
661
+
662
+ # Re-configure Freezing for SFT
663
+ _phase_freeze(core,
664
+ freeze_core=args.after_sft_freeze_core,
665
+ unfreeze_ln=args.after_sft_unfreeze_ln,
666
+ train_emb=args.after_sft_train_emb)
667
+
668
+ # Re-init Optimizer (Core might be frozen, but Heads must train)
669
+ opt = torch.optim.AdamW([
670
+ {"params": [p for p in core.parameters() if p.requires_grad], "lr": args.after_sft_lr_core or args.lr_core},
671
+ {"params": ar_h.parameters(), "lr": args.after_sft_lr_head or args.lr_head},
672
+ {"params": sat_h.parameters(), "lr": args.after_sft_lr_head or args.lr_head},
673
+ ])
674
+
675
+ step, seen_tok, last_wall = _train_phase(
676
+ args, "sft", core, ar_h, sat_h, opt, scaler,
677
+ step, seen_tok, last_wall, cfg,
678
+ args.after_sft_source, args.after_sft_steps,
679
+ args.after_sft_block or DEFAULT_AFTER_SFT_BLOCK,
680
+ args.batch_size or DEFAULT_BATCH,
681
+ chat_cfg={
682
+ "chat": args.after_sft_chat,
683
+ "key": args.after_sft_chat_messages_key,
684
+ "gen_prompt": args.after_sft_add_generation_prompt if args.after_sft_add_generation_prompt is not None else args.sft_add_generation_prompt,
685
+ "text_field": args.after_sft_dataset_field_text
686
+ },
687
+ max_ckpts=args.max_ckpts,
688
+ target_tokens_override=None
689
+ )
690
+
691
+ # Final Save
692
+ save_ckpt(pathlib.Path(args.save_dir) / "final.pt", core, ar_h, sat_h, opt, scaler,
693
+ meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time()})
694
+ print("πŸŽ‰ All Training Complete")
695
+
696
+ # ───────────────────────── Sampling ─────────────────────────
697
+ def _apply_penalties(logits, ids, n, rep_p, pres_p, freq_p):
698
+ if ids.numel() == 0: return logits
699
+ hist = ids[0, -n:].long() if n > 0 else ids[0].long()
700
+ uniq, counts = torch.unique(hist, return_counts=True)
701
+ if pres_p or freq_p:
702
+ logits[..., uniq] -= (pres_p + freq_p * counts.float())
703
+ if rep_p != 1.0:
704
+ sel = logits[..., uniq]
705
+ logits[..., uniq] = torch.where(sel > 0, sel / rep_p, sel * rep_p)
706
+ return logits
707
+
708
+ def _sample(logits, T, top_k, top_p, min_p, greedy):
709
+ if greedy: return logits.argmax(-1, keepdim=True)
710
+ probs = (logits / max(T, 1e-8)).softmax(-1)
711
+ if top_k:
712
+ v, i = torch.topk(probs, min(top_k, probs.size(-1)))
713
+ probs = torch.zeros_like(probs).scatter_(-1, i, v)
714
+ if top_p < 1.0:
715
+ s_probs, s_idx = torch.sort(probs, descending=True, dim=-1)
716
+ probs = torch.zeros_like(probs).scatter_(-1, s_idx, s_probs * (torch.cumsum(s_probs, -1) <= top_p).float())
717
+ if min_p > 0: probs[probs < min_p] = 0
718
+ if probs.sum() == 0: return logits.argmax(-1, keepdim=True)
719
+ return probs.div_(probs.sum()).multinomial(1)
720
+
721
+ @torch.no_grad()
722
+ def infer(args):
723
+ path = _resolve_ckpt(pathlib.Path(args.ckpt)) or pathlib.Path(args.ckpt)
724
+ sd = torch.load(path, map_location="cpu")
725
+ cfg = sd["cfg"]
726
+
727
+ core = Encoder(cfg).to(DEV)
728
+ ar_h = ARHead(cfg["d"]).to(DEV)
729
+ sat_h = SATHead(cfg["d"]).to(DEV)
730
+
731
+ core.load_state_dict(sd["core"])
732
+ ar_h.load_state_dict(sd["ar"])
733
+ sat_h.load_state_dict(sd["sat"])
734
+
735
+ # Encode prompt and track length
736
+ prompt_tokens = tok.encode(args.prompt)
737
+ prompt_len = len(prompt_tokens)
738
+ ids = torch.tensor([prompt_tokens], device=DEV)
739
+ if ids.size(1) == 0:
740
+ ids = torch.tensor([[EOS]], device=DEV)
741
+ prompt_len = 1
742
+
743
+ print(f"{Colors.INFO}Generating ({args.mode})...{Colors.RESET}")
744
+ start = time.time()
745
+
746
+ if args.mode == "ar":
747
+ h, kvs = core(ids, causal_mask(ids.size(1)), use_cache=True)
748
+ for _ in range(args.max_new):
749
+ logits = ar_h(h)[:, -1]
750
+ logits = _apply_penalties(logits, ids, args.penalty_last_n, args.repetition_penalty, args.presence_penalty, args.frequency_penalty)
751
+ nxt = _sample(logits, args.temperature, args.top_k, args.top_p, args.min_p, args.greedy)
752
+ ids = torch.cat([ids, nxt], 1)
753
+ h, kvs = core(ids[:, -1:], None, kv_caches=kvs, use_cache=True)
754
+ # Stop on EOS
755
+ if EOS is not None and nxt.item() == EOS:
756
+ break
757
+ else:
758
+ added = 0
759
+ while added < args.max_new:
760
+ h = core(ids, sat_mask(ids.size(1)))
761
+ logits_all, gate = sat_h(h[:, -SAT_BLOCK:])
762
+ stride = 2 if (not args.var or gate is None) else (gate.softmax(-1).multinomial(1).item() + 1)
763
+
764
+ for i in range(int(stride)):
765
+ logits = logits_all[:, i]
766
+ logits = _apply_penalties(logits, ids, args.penalty_last_n, args.repetition_penalty, args.presence_penalty, args.frequency_penalty)
767
+ nxt = _sample(logits, args.temperature, args.top_k, args.top_p, args.min_p, args.greedy)
768
+ ids = torch.cat([ids, nxt], 1)
769
+ added += 1
770
+ if added >= args.max_new: break
771
+ # Stop on EOS
772
+ if EOS is not None and nxt.item() == EOS:
773
+ added = args.max_new
774
+ break
775
+
776
+ # Decode separately for coloring
777
+ all_tokens = ids[0].tolist()
778
+ prompt_text = tok.decode(all_tokens[:prompt_len], skip_special_tokens=True)
779
+ gen_text = tok.decode(all_tokens[prompt_len:], skip_special_tokens=True)
780
+
781
+ # Print with colors
782
+ print(f"\n{Colors.BOLD}─── Output ───{Colors.RESET}")
783
+ print(f"{Colors.PROMPT}{prompt_text}{Colors.RESET}{Colors.GEN}{gen_text}{Colors.RESET}")
784
+ print(f"{Colors.BOLD}──────────────{Colors.RESET}")
785
+ print(f"{Colors.INFO}[{time.time()-start:.2f}s | {len(all_tokens)-prompt_len} tokens generated]{Colors.RESET}")
786
+
787
+ # ───────────────────────── CLI ─────────────────────────
788
+ def main():
789
+ ap = argparse.ArgumentParser()
790
+ sub = ap.add_subparsers(dest="cmd", required=True)
791
+
792
+ tr = sub.add_parser("train")
793
+ tr.add_argument("--preset", choices=PRESETS, default="small")
794
+ tr.add_argument("--rank", type=int)
795
+ tr.add_argument("--block", type=int, default=DEFAULT_BLOCK)
796
+ tr.add_argument("--batch_size", type=int, default=DEFAULT_BATCH)
797
+ tr.add_argument("--source", default=DEFAULT_PRETRAIN_SOURCES)
798
+ tr.add_argument("--target_tokens", type=int)
799
+ tr.add_argument("--steps", type=int)
800
+ tr.add_argument("--amp", action="store_true")
801
+ tr.add_argument("--save_every_sec", type=int, default=DEFAULT_SAVE_SEC)
802
+ tr.add_argument("--save_dir", default=str(CKDIR))
803
+ tr.add_argument("--resume", type=str)
804
+ tr.add_argument("--x2", action="store_true")
805
+ tr.add_argument("--warmstart_from", type=str)
806
+ tr.add_argument("--fresh", action="store_true")
807
+ tr.add_argument("--max_ckpts", type=int, default=None)
808
+ tr.add_argument("--chilla_max_double", action="store_true")
809
+
810
+ # Phase 1 freeze options
811
+ tr.add_argument("--freeze_core", action="store_true")
812
+ tr.add_argument("--unfreeze_ln", action="store_true")
813
+ tr.add_argument("--train_emb", action="store_true")
814
+ tr.add_argument("--lr_core", type=float, default=LR_CORE)
815
+ tr.add_argument("--lr_head", type=float, default=LR_HEAD)
816
+
817
+ # Chat / Data
818
+ tr.add_argument("--chat", action="store_true")
819
+ tr.add_argument("--chat_messages_key", default="messages")
820
+ tr.add_argument("--dataset_field_text", default="text")
821
+ tr.add_argument("--sft_add_generation_prompt", action="store_true")
822
+
823
+ # Auto Grow
824
+ tr.add_argument("--auto_grow", action="store_true")
825
+ tr.add_argument("--grow_plan", default="576,640,768,896,1024,1122")
826
+ tr.add_argument("--grow_every_steps", type=int, default=50000)
827
+
828
+ # Phase 2: SFT
829
+ tr.add_argument("--after_sft_source", default="")
830
+ tr.add_argument("--after_sft_steps", type=int, default=0)
831
+ tr.add_argument("--after_sft_chat", action="store_true")
832
+ tr.add_argument("--after_sft_chat_messages_key", default="messages")
833
+ tr.add_argument("--after_sft_dataset_field_text", default="text")
834
+ tr.add_argument("--after_sft_add_generation_prompt", type=bool, default=None)
835
+ tr.add_argument("--after_sft_block", type=int, default=0)
836
+ tr.add_argument("--after_sft_freeze_core", action="store_true")
837
+ tr.add_argument("--after_sft_unfreeze_ln", action="store_true")
838
+ tr.add_argument("--after_sft_train_emb", action="store_true")
839
+ tr.add_argument("--after_sft_lr_core", type=float, default=0.0)
840
+ tr.add_argument("--after_sft_lr_head", type=float, default=0.0)
841
+
842
+ inf = sub.add_parser("infer")
843
+ inf.add_argument("--mode", choices=["ar", "sat"], required=True)
844
+ inf.add_argument("--ckpt", required=True)
845
+ inf.add_argument("--prompt", required=True)
846
+ inf.add_argument("--max_new", type=int, default=120)
847
+ inf.add_argument("--temperature", type=float, default=1.0)
848
+ inf.add_argument("--greedy", action="store_true")
849
+ inf.add_argument("--top_k", type=int, default=0)
850
+ inf.add_argument("--top_p", type=float, default=1.0)
851
+ inf.add_argument("--min_p", type=float, default=0.0)
852
+ inf.add_argument("--repetition_penalty", type=float, default=1.0)
853
+ inf.add_argument("--presence_penalty", type=float, default=0.0)
854
+ inf.add_argument("--frequency_penalty", type=float, default=0.0)
855
+ inf.add_argument("--penalty_last_n", type=int, default=64)
856
+ inf.add_argument("--var", action="store_true")
857
+
858
+ args = ap.parse_args()
859
+ if args.cmd == "train": train(args)
860
+ else: infer(args)
861
+
862
+ if __name__ == "__main__":
863
+ main()
pretrain_step00902666.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0266ca20874996613b2ebd13b3e37b7430743c24f2c84e7fce9e640cd91d4eb8
3
+ size 5350537875