OpenTransformer commited on
Commit
9429964
Β·
verified Β·
1 Parent(s): 78f9c71

Backup script n.py

Browse files
Files changed (1) hide show
  1. scripts/n.py +1100 -0
scripts/n.py ADDED
@@ -0,0 +1,1100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # n.py β€” Joint AR+SAT Trainer with Expansion Ratio Testing
4
+ # Enhanced inference: checkpoint name, tok/s, UK time
5
+
6
+ from __future__ import annotations
7
+ import argparse, json, math, pathlib, random, time, os, sys
8
+ from contextlib import nullcontext
9
+ from typing import Dict, Any, List, Optional, Tuple
10
+ from datetime import datetime, timezone
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ from datasets import load_dataset, DownloadConfig
15
+ from stream_loader import ScraperStreamDataset
16
+ from transformers import AutoTokenizer, logging as hf_log
17
+ from tqdm.auto import tqdm
18
+
19
+ # Auto-rotating log to prevent context-window suicide
20
+ try:
21
+ from rotating_log import install_rotating_log
22
+ install_rotating_log()
23
+ except ImportError:
24
+ pass # Running without rotation
25
+ # ───────────────────────── HF Auto-Upload ─────────────────────────
26
+ def _hf_upload_checkpoint(ckpt_path: str):
27
+ """Upload checkpoint to HuggingFace in background thread"""
28
+ import subprocess
29
+ import threading
30
+ def _upload():
31
+ try:
32
+ import os
33
+ token = os.environ.get("HF_TOKEN", "")
34
+ if not token:
35
+ print("[HF] No HF_TOKEN, skipping upload")
36
+ return
37
+ result = subprocess.run(
38
+ ["python", "/workspace/hf_upload.py", str(ckpt_path)],
39
+ capture_output=True, text=True, timeout=600
40
+ )
41
+ if result.returncode == 0:
42
+ print(f"[HF] Uploaded {ckpt_path}")
43
+ else:
44
+ print(f"[HF] Failed: {result.stderr[:100]}")
45
+ except Exception as e:
46
+ print(f"[HF] Error: {e}")
47
+ threading.Thread(target=_upload, daemon=True).start()
48
+
49
+ # ───────────────────────── Hot-Reload Config ─────────────────────────
50
+ HOT_CONFIG_PATH = pathlib.Path("/workspace/hot_config.json")
51
+ _hot_config_cache = {}
52
+ _hot_config_mtime = 0
53
+
54
+ def _load_hot_config():
55
+ """Load config overrides from hot_config.json if modified"""
56
+ global _hot_config_cache, _hot_config_mtime
57
+ if not HOT_CONFIG_PATH.exists():
58
+ return _hot_config_cache
59
+ try:
60
+ mtime = HOT_CONFIG_PATH.stat().st_mtime
61
+ if mtime > _hot_config_mtime:
62
+ with open(HOT_CONFIG_PATH) as f:
63
+ _hot_config_cache = json.load(f)
64
+ _hot_config_mtime = mtime
65
+ print(f"[hot_config] Reloaded: {list(_hot_config_cache.keys())}")
66
+ except Exception as e:
67
+ print(f"[hot_config] Error: {e}")
68
+ return _hot_config_cache
69
+
70
+ def hot_get(key: str, default):
71
+ """Get config value, hot-reloadable. Check every call."""
72
+ cfg = _load_hot_config()
73
+ return cfg.get(key, default)
74
+
75
+
76
+
77
+
78
+ # ───────────────────────── ANSI Colors ─────────────────────────
79
+ class Colors:
80
+ RESET = "\033[0m"
81
+ BOLD = "\033[1m"
82
+ PROMPT = "\033[36m"
83
+ GEN = "\033[0m"
84
+ INFO = "\033[90m"
85
+ WARN = "\033[93m"
86
+
87
+ # ───────────────────────── Globals ─────────────────────────
88
+ hf_log.set_verbosity_error()
89
+ DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
90
+ torch.backends.cuda.matmul.allow_tf32 = True
91
+ try:
92
+ torch.set_float32_matmul_precision("high")
93
+ except Exception:
94
+ pass
95
+
96
+ TOKENIZER_ID = os.environ.get("TOKENIZER_ID", "deepseek-ai/DeepSeek-V3.2")
97
+ tok = AutoTokenizer.from_pretrained(TOKENIZER_ID, use_fast=True, trust_remote_code=True)
98
+ if tok.pad_token is None:
99
+ tok.add_special_tokens({"pad_token": "<|pad|>"})
100
+
101
+ VOCAB, EOS = (
102
+ max(tok.get_vocab().values()) + 1,
103
+ tok.eos_token_id if tok.eos_token_id is not None else tok.sep_token_id
104
+ )
105
+
106
+ # ───────────────────────── PRESETS ─────────────────────────
107
+ PRESETS: Dict[str, Dict[str, int]] = {
108
+ "femto_1x": dict(d=16, layers=1, heads=1, rank=16),
109
+ "femto_12x": dict(d=16, layers=1, heads=1, rank=192),
110
+ "femto_24x": dict(d=16, layers=1, heads=1, rank=384),
111
+ "pico_1x": dict(d=32, layers=1, heads=2, rank=16),
112
+ "pico_3x": dict(d=32, layers=1, heads=2, rank=48),
113
+ "pico_6x": dict(d=32, layers=1, heads=2, rank=96),
114
+ "pico_12x": dict(d=32, layers=1, heads=2, rank=192),
115
+ "pico_24x": dict(d=32, layers=1, heads=2, rank=384),
116
+ "pico_48x": dict(d=32, layers=1, heads=2, rank=768),
117
+ "nano_1x": dict(d=64, layers=2, heads=4, rank=16),
118
+ "nano_3x": dict(d=64, layers=2, heads=4, rank=48),
119
+ "nano_6x": dict(d=64, layers=2, heads=4, rank=96),
120
+ "nano_12x": dict(d=64, layers=2, heads=4, rank=192),
121
+ "nano_24x": dict(d=64, layers=2, heads=4, rank=384),
122
+ "nano_48x": dict(d=64, layers=2, heads=4, rank=768),
123
+ "nano_96x": dict(d=64, layers=2, heads=4, rank=1536),
124
+ "micro_3x": dict(d=128, layers=4, heads=8, rank=48),
125
+ "micro_6x": dict(d=128, layers=4, heads=8, rank=96),
126
+ "micro_12x": dict(d=128, layers=4, heads=8, rank=192),
127
+ "micro_24x": dict(d=128, layers=4, heads=8, rank=384),
128
+ "small": dict(d=512, layers=8, heads=16, rank=64),
129
+ "smallx2": dict(d=512, layers=16, heads=16, rank=64),
130
+ "base": dict(d=768, layers=12, heads=24, rank=96),
131
+ "base18": dict(d=768, layers=18, heads=24, rank=96),
132
+ "large": dict(d=1024, layers=24, heads=16, rank=128),
133
+ }
134
+
135
+ DEFAULT_BLOCK = 1122
136
+ DEFAULT_BATCH = 4
137
+ SAT_BLOCK = 2
138
+ LR_CORE, LR_HEAD = 5e-5, 2e-4
139
+ EMIT_LAMBDA = 0.1
140
+ DEFAULT_SAVE_SEC = 24 * 3600
141
+ CKDIR = pathlib.Path("ckpts_expansion")
142
+
143
+ DEFAULT_PRETRAIN_SOURCES = "cerebras/SlimPajama-627B"
144
+ DEFAULT_AFTER_SFT_SOURCES = "mlabonne/opc-sft-stage2-chat,HuggingFaceH4/ultrachat_200k"
145
+ DEFAULT_AFTER_SFT_BLOCK = 1122
146
+
147
+ # ───────────────────────── UK Time Helper ─────────────────────────
148
+ def get_uk_time() -> str:
149
+ utc_now = datetime.now(timezone.utc)
150
+ year = utc_now.year
151
+ march_last = datetime(year, 3, 31, 1, 0, tzinfo=timezone.utc)
152
+ while march_last.weekday() != 6:
153
+ march_last = march_last.replace(day=march_last.day - 1)
154
+ oct_last = datetime(year, 10, 31, 1, 0, tzinfo=timezone.utc)
155
+ while oct_last.weekday() != 6:
156
+ oct_last = oct_last.replace(day=oct_last.day - 1)
157
+ if march_last <= utc_now < oct_last:
158
+ uk_offset = 1
159
+ tz_name = "BST"
160
+ else:
161
+ uk_offset = 0
162
+ tz_name = "GMT"
163
+ from datetime import timedelta
164
+ uk_time = utc_now + timedelta(hours=uk_offset)
165
+ return uk_time.strftime(f'%Y-%m-%d %H:%M:%S {tz_name}')
166
+
167
+ # ───────────────────────── Utilities ─────────────────────────
168
+ def rng_state():
169
+ if DEV.type == "cuda":
170
+ try:
171
+ return torch.cuda.get_rng_state(DEV)
172
+ except TypeError:
173
+ return torch.cuda.get_rng_state()
174
+ return torch.get_rng_state()
175
+
176
+ def _is_probably_ckpt(path: pathlib.Path) -> bool:
177
+ try:
178
+ return path.is_file() and path.suffix == ".pt" and not path.name.endswith(".pt.tmp") and path.stat().st_size > (1<<20)
179
+ except Exception:
180
+ return False
181
+
182
+ def _resolve_ckpt(path: pathlib.Path) -> pathlib.Path | None:
183
+ try:
184
+ if path.is_dir():
185
+ cands = sorted([p for p in path.glob("*.pt") if _is_probably_ckpt(p)],
186
+ key=lambda p: p.stat().st_mtime, reverse=True)
187
+ return cands[0] if cands else None
188
+ if path.suffix == ".tmp":
189
+ solid = path.with_suffix("")
190
+ return solid if _is_probably_ckpt(solid) else _resolve_ckpt(path.parent)
191
+ return path if _is_probably_ckpt(path) else _resolve_ckpt(path.parent)
192
+ except Exception:
193
+ return None
194
+
195
+ def _try_load(path: pathlib.Path, map_location="cpu"):
196
+ try:
197
+ return torch.load(path, map_location="cpu")
198
+ except Exception as e:
199
+ print(f"[ckpt-skip] {path} not usable: {e}")
200
+ return None
201
+
202
+ def _prune_checkpoints(save_dir: pathlib.Path, phase_name: str, max_ckpts: int):
203
+ if max_ckpts is None or max_ckpts <= 0:
204
+ return
205
+ try:
206
+ pattern = f"{phase_name}_step*.pt"
207
+ ckpts = sorted(
208
+ [p for p in save_dir.glob(pattern) if _is_probably_ckpt(p)],
209
+ key=lambda p: p.stat().st_mtime
210
+ )
211
+ excess = len(ckpts) - max_ckpts
212
+ if excess > 0:
213
+ for p in ckpts[:excess]:
214
+ try:
215
+ p.unlink()
216
+ print(f" [prune] deleted old {p.name}")
217
+ except Exception:
218
+ pass
219
+ except Exception as e:
220
+ print(f"[ckpt-prune] error: {e}")
221
+
222
+ def print_expansion_info(cfg: dict, tie_weights: bool = False):
223
+ d_k = cfg["d"] // cfg["heads"]
224
+ rank = cfg["rank"]
225
+ ratio = rank / d_k
226
+ regime = "COMPRESSION" if ratio < 1 else ("IDENTITY" if ratio == 1 else "EXPANSION")
227
+ tie_str = "YES" if tie_weights else "NO"
228
+ print(f"β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”")
229
+ print(f"β”‚ TUNEABLE ATTENTION CONFIG β”‚")
230
+ print(f"β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€")
231
+ print(f"β”‚ d_model: {cfg['d']:4d} heads: {cfg['heads']:2d} d_k: {d_k:3d} β”‚")
232
+ print(f"β”‚ layers: {cfg['layers']:4d} tie_weights: {tie_str:3s} β”‚")
233
+ print(f"β”‚ rank: {rank:4d} ratio: {ratio:.1f}x [{regime:11s}] β”‚")
234
+ print(f"β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜")
235
+
236
+ # ─────────────���─────────── AMP helper ─────────────────────────
237
+ try:
238
+ from torch.amp import autocast as _ac, GradScaler
239
+ except ImportError:
240
+ from torch.cuda.amp import autocast as _ac, GradScaler
241
+
242
+ def _auto_amp_dtype():
243
+ if DEV.type == "cuda":
244
+ try:
245
+ if torch.cuda.is_bf16_supported(): return torch.bfloat16
246
+ return torch.float16
247
+ except Exception: return torch.float16
248
+ return torch.float32
249
+
250
+ def amp(enabled: bool):
251
+ return nullcontext() if not (enabled and DEV.type == "cuda") else _ac(device_type="cuda", dtype=_auto_amp_dtype())
252
+
253
+ # ───────────────────────── Chat & Data Stream ─────────────────────────
254
+ def _coerce_role(r: str) -> str:
255
+ r = (r or "").lower()
256
+ if r in {"user", "human", "customer"}: return "user"
257
+ if r in {"assistant", "gpt", "bot"}: return "assistant"
258
+ if r in {"system", "context"}: return "system"
259
+ return r or "user"
260
+
261
+ def _render_chat_text_from_ex(ex: dict, messages_key: str, add_generation_prompt: bool) -> Optional[str]:
262
+ msgs = ex.get(messages_key)
263
+ if msgs is None:
264
+ for alt in ("conversations", "dialog", "turns"):
265
+ if isinstance(ex.get(alt), list):
266
+ msgs = ex[alt]; break
267
+ if isinstance(msgs, list) and msgs and isinstance(msgs[0], dict):
268
+ try:
269
+ norm = []
270
+ for m in msgs:
271
+ role = _coerce_role(m.get("role", "")); content = m.get("content", m.get("text", ""))
272
+ if not isinstance(content, str): continue
273
+ norm.append({"role": role, "content": content})
274
+ if not norm: return None
275
+ return tok.apply_chat_template(norm, tokenize=False, add_generation_prompt=add_generation_prompt)
276
+ except Exception: return None
277
+ for a, b in (("prompt", "response"), ("instruction", "output"), ("question", "answer")):
278
+ if isinstance(ex.get(a), str) and isinstance(ex.get(b), str):
279
+ return f"User: {ex[a]}\nAssistant: {ex[b]}"
280
+ return None
281
+
282
+ def _open_stream_one(ds_name: str, seed: int, streaming: bool = True):
283
+ # Custom scraper streaming support
284
+ if ds_name == "scraper" or ds_name.startswith("http://"):
285
+ url = ds_name if ds_name.startswith("http://") else "http://localhost:8888"
286
+ print(f"[stream] Using scraper: {url}")
287
+ return iter(ScraperStreamDataset(server_url=url, batch_size=100))
288
+ dc = DownloadConfig(max_retries=5, use_etag=True, resume_download=True)
289
+ if ":" in ds_name: base, config = ds_name.split(":", 1)
290
+ else: base, config = ds_name, None
291
+ if not streaming:
292
+ print(f"[download] Downloading {ds_name} (non-streaming)...")
293
+ if base == "json":
294
+ data_files = {"train": config}
295
+ ds = load_dataset("json", data_files=data_files, split="train", streaming=streaming, download_config=dc)
296
+ else:
297
+ ds = load_dataset(base, config, split="train", streaming=streaming, download_config=dc) if config else \
298
+ load_dataset(base, split="train", streaming=streaming, download_config=dc)
299
+ if streaming:
300
+ return iter(ds.shuffle(buffer_size=10_000, seed=seed))
301
+ else:
302
+ print(f"[download] Got {len(ds):,} examples. Shuffling...")
303
+ ds = ds.shuffle(seed=seed)
304
+ return iter(ds)
305
+
306
+ def token_stream(ds_names: str, target: int, seed: int = 42,
307
+ chat: bool = False, chat_messages_key: str = "messages",
308
+ sft_add_generation_prompt: bool = False, dataset_field_text: str = "text",
309
+ streaming: bool = True):
310
+ sources = [s.strip() for s in ds_names.split(",") if s.strip()]
311
+ if not sources: return
312
+ src_idx = 0; emitted = 0; it = None; attempts = 0; backoff_base = 2.0
313
+ while emitted < target:
314
+ try:
315
+ if it is None: it = _open_stream_one(sources[src_idx], seed, streaming=streaming)
316
+ ex = next(it)
317
+ text = None
318
+ if isinstance(ex, dict):
319
+ if chat:
320
+ text = _render_chat_text_from_ex(ex, chat_messages_key, sft_add_generation_prompt)
321
+ if text is None:
322
+ if dataset_field_text and isinstance(ex.get(dataset_field_text), str):
323
+ text = ex[dataset_field_text]
324
+ elif isinstance(ex.get("text"), str):
325
+ text = ex["text"]
326
+ if not isinstance(text, str):
327
+ attempts = 0; continue
328
+ enc = tok.encode(text)
329
+ if EOS is not None and (len(enc) == 0 or enc[-1] != EOS):
330
+ enc = enc + [EOS]
331
+ for t in enc:
332
+ yield t
333
+ emitted += 1
334
+ if emitted >= target: return
335
+ attempts = 0
336
+ except StopIteration:
337
+ it = None; src_idx = (src_idx + 1) % len(sources)
338
+ except Exception as e:
339
+ attempts += 1
340
+ sleep_s = min(60.0, backoff_base ** min(attempts, 6))
341
+ print(f"[stream-retry] {sources[src_idx]} error: {type(e).__name__}, sleeping {sleep_s:.1f}s")
342
+ time.sleep(sleep_s); it = None
343
+ if attempts % 5 == 0 and len(sources) > 1:
344
+ src_idx = (src_idx + 1) % len(sources)
345
+
346
+ # ───────────────────────── ALiBi ─────────────────────────
347
+ def _alibi_slopes(n_heads: int):
348
+ def pow2slopes(n):
349
+ start = 2 ** (-2 ** -(math.log2(n) - 3))
350
+ ratio = start
351
+ return [start * (ratio ** i) for i in range(n)]
352
+ if math.log2(n_heads).is_integer(): vals = pow2slopes(n_heads)
353
+ else:
354
+ closest = 2 ** math.floor(math.log2(n_heads))
355
+ vals = pow2slopes(closest)
356
+ extra = pow2slopes(2 * closest)
357
+ vals += extra[0::2][: n_heads - closest]
358
+ return torch.tensor(vals, device=DEV).view(1, n_heads, 1, 1)
359
+
360
+ def alibi_bias(n_heads: int, n_tokens: int):
361
+ i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
362
+ j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
363
+ dist = (j - i).clamp_min(0)
364
+ return -_alibi_slopes(n_heads) * dist
365
+
366
+ # ───────────────────────── Model components ─────────────────────────
367
+ class TuneableAttentionMHA(nn.Module):
368
+ def __init__(self, d: int, h: int, r: int, use_relpos: bool = True):
369
+ super().__init__()
370
+ assert d % h == 0
371
+ self.h, self.dk, self.r = h, d // h, r
372
+ self.use_relpos = use_relpos
373
+ self.q = nn.Linear(d, d, bias=False)
374
+ self.k = nn.Linear(d, d, bias=False)
375
+ self.v = nn.Linear(d, d, bias=False)
376
+ self.U = nn.Parameter(torch.randn(self.dk, r))
377
+ nn.init.orthogonal_(self.U)
378
+ self.proj = nn.Linear(h * self.dk, d, bias=False)
379
+ self.drop = nn.Dropout(0.1)
380
+
381
+ def _proj_qk(self, x):
382
+ B, N, _ = x.shape
383
+ return (x.view(B, N, self.h, self.dk).transpose(1, 2) @ self.U)
384
+
385
+ def _reshape_v(self, x):
386
+ B, N, _ = x.shape
387
+ return x.view(B, N, self.h, self.dk).transpose(1, 2)
388
+
389
+ def forward(self, x, mask=None, rel_bias_tokens=None, kv_cache=None, use_cache=False):
390
+ q = self._proj_qk(self.q(x))
391
+ k_new = self._proj_qk(self.k(x))
392
+ v_new = self._reshape_v(self.v(x))
393
+ if kv_cache is None:
394
+ k, v = k_new, v_new
395
+ else:
396
+ k_cached, v_cached = kv_cache
397
+ if use_cache:
398
+ k = torch.cat([k_cached, k_new], dim=2)
399
+ v = torch.cat([v_cached, v_new], dim=2)
400
+ else:
401
+ k, v = k_new, v_new
402
+ att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
403
+ if self.use_relpos and rel_bias_tokens is not None:
404
+ att = att + alibi_bias(self.h, rel_bias_tokens)[:, :, -q.size(2):, :]
405
+ if mask is not None:
406
+ att = att + mask
407
+ z = (att.softmax(-1) @ v).transpose(1, 2).reshape(x.size(0), x.size(1), -1)
408
+ out = self.drop(self.proj(z))
409
+ return (out, (k, v)) if use_cache else out
410
+
411
+
412
+ class Block(nn.Module):
413
+ def __init__(self, d: int, h: int, r: int):
414
+ super().__init__()
415
+ self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
416
+ self.mha = TuneableAttentionMHA(d, h, r)
417
+ self.ff = nn.Sequential(nn.Linear(d, 4 * d), nn.ReLU(), nn.Linear(4 * d, d))
418
+
419
+ def forward(self, x, mask, kv=None, use_cache=False, total_seq_len=None):
420
+ if use_cache:
421
+ y, new_kv = self.mha(self.ln1(x), mask, rel_bias_tokens=total_seq_len, kv_cache=kv, use_cache=True)
422
+ x = x + y + self.ff(self.ln2(x + y))
423
+ return x, new_kv
424
+ else:
425
+ n = x.size(1)
426
+ x = x + self.mha(self.ln1(x), mask, rel_bias_tokens=n)
427
+ return x + self.ff(self.ln2(x))
428
+
429
+
430
+ class Encoder(nn.Module):
431
+ def __init__(self, cfg, tie_weights: bool = False):
432
+ super().__init__()
433
+ d, l, h, r = cfg["d"], cfg["layers"], cfg["heads"], cfg["rank"]
434
+ self.emb = nn.Embedding(VOCAB, d)
435
+ self.blocks = nn.ModuleList([Block(d, h, r) for _ in range(l)])
436
+ self.ln = nn.LayerNorm(d)
437
+ self.tie_weights = tie_weights
438
+
439
+ def forward(self, ids, mask, kv_caches=None, use_cache=False, total_seq_len=None):
440
+ x = self.emb(ids)
441
+ if not use_cache:
442
+ for blk in self.blocks:
443
+ x = blk(x, mask)
444
+ return self.ln(x)
445
+ new_kvs = []
446
+ for i, blk in enumerate(self.blocks):
447
+ kv = kv_caches[i] if kv_caches else None
448
+ x, kv_out = blk(x, mask, kv, use_cache=True, total_seq_len=total_seq_len)
449
+ new_kvs.append(kv_out)
450
+ return self.ln(x), new_kvs
451
+
452
+
453
+ class ARHead(nn.Module):
454
+ def __init__(self, d, tie_weights: bool = False, embedding_weight: nn.Parameter = None):
455
+ super().__init__()
456
+ self.tie_weights = tie_weights
457
+ if tie_weights and embedding_weight is not None:
458
+ self.proj = nn.Linear(d, VOCAB, bias=False)
459
+ self.proj.weight = embedding_weight
460
+ else:
461
+ self.proj = nn.Linear(d, VOCAB)
462
+
463
+ def forward(self, h):
464
+ return self.proj(h)
465
+
466
+
467
+ class SATHead(nn.Module):
468
+ def __init__(self, d, mode="var"):
469
+ super().__init__()
470
+ self.proj = nn.Linear(d, VOCAB)
471
+ self.gate = nn.Linear(d, 2) if mode == "var" else None
472
+ def forward(self, h_last):
473
+ return self.proj(h_last), (self.gate(h_last[:, 0]) if self.gate else None)
474
+
475
+
476
+ # ───────────────────────── Masks ─────────────────────────
477
+ def causal_mask(n):
478
+ return torch.triu(torch.full((1, 1, n, n), float("-inf"), device=DEV), 1)
479
+
480
+ def sat_mask(n, block=SAT_BLOCK):
481
+ idx = torch.arange(n, device=DEV)
482
+ grp = idx.unsqueeze(0) // block
483
+ allow = (grp.T == grp) | (grp.T > grp)
484
+ return torch.where(allow, 0.0, float("-inf")).unsqueeze(0).unsqueeze(0)
485
+
486
+ def sat_mask_cached(new_len: int, cached_len: int, block=SAT_BLOCK):
487
+ total_len = cached_len + new_len
488
+ mask = torch.zeros((1, 1, new_len, total_len), device=DEV)
489
+ return mask
490
+
491
+
492
+ # ───────────────────────── Checkpoint helpers ─────────────────────────
493
+ def save_ckpt(path: pathlib.Path, core, ar_h, sat_h, opt, scaler, meta):
494
+ path.parent.mkdir(exist_ok=True, parents=True)
495
+ tmp = path.with_suffix(path.suffix + ".tmp")
496
+ state = {
497
+ "core": core.state_dict(), "ar": ar_h.state_dict(), "sat": sat_h.state_dict(),
498
+ "opt": opt.state_dict(), "scaler": scaler.state_dict(),
499
+ "cfg": meta.get("cfg"), "tokenizer_id": TOKENIZER_ID,
500
+ "tie_weights": meta.get("tie_weights", False),
501
+ **{k: v for k, v in meta.items() if k not in ("cfg", "tie_weights")}
502
+ }
503
+ torch.save(state, tmp, _use_new_zipfile_serialization=False)
504
+ tmp.replace(path)
505
+ (path.parent / "latest.json").write_text(json.dumps({"path": str(path), "step": meta["step"]}))
506
+ print(f"\nβœ“ saved checkpoint {path.name}")
507
+
508
+ def load_ckpt(path, core, ar_h, sat_h, opt, scaler):
509
+ p = _resolve_ckpt(path) or path
510
+ ck = _try_load(p, map_location="cpu")
511
+ if ck is None: raise FileNotFoundError(f"No valid checkpoint at {p}")
512
+ core.load_state_dict(ck["core"])
513
+ ar_h.load_state_dict(ck["ar"])
514
+ sat_h.load_state_dict(ck["sat"])
515
+ opt.load_state_dict(ck["opt"])
516
+ scaler.load_state_dict(ck["scaler"])
517
+ return ck.get("step", 0), ck.get("seen_tok", 0), ck.get("wall_time", time.time())
518
+
519
+ def _safe_load_any(path: pathlib.Path, tgt: nn.Module, key: str | None = None):
520
+ p = _resolve_ckpt(path) or path
521
+ if not p.exists(): return 0
522
+ ck = _try_load(p, map_location="cpu")
523
+ if ck is None: return 0
524
+ sd = ck.get(key, ck) if key else ck
525
+ if isinstance(sd, dict) and "state_dict" in sd: sd = sd["state_dict"]
526
+ tgt_sd = tgt.state_dict()
527
+ filt = {k: v for k, v in sd.items() if k in tgt_sd and v.shape == tgt_sd[k].shape}
528
+ if filt: tgt.load_state_dict(filt, strict=False)
529
+ return len(filt)
530
+
531
+ def infer_cfg_from_ckpt(path: pathlib.Path):
532
+ p = _resolve_ckpt(path) or path
533
+ if not p.exists(): return None
534
+ sd = _try_load(p, map_location="cpu")
535
+ if sd is None: return None
536
+ if "cfg" in sd: return dict(sd["cfg"])
537
+ return None
538
+
539
+
540
+ # ───────────────────────── Training Logic ─────────────────────────
541
+ def _parse_grow_plan(s: str) -> List[int]:
542
+ return sorted(set([int(x.strip()) for x in s.split(",") if x.strip() and int(x.strip()) >= 128]))
543
+
544
+ def _count_enabled_params(*modules) -> int:
545
+ seen_data_ptrs = set()
546
+ total = 0
547
+ for m in modules:
548
+ if m is None:
549
+ continue
550
+ for p in m.parameters():
551
+ if p.data_ptr() not in seen_data_ptrs:
552
+ seen_data_ptrs.add(p.data_ptr())
553
+ total += p.numel()
554
+ return total
555
+
556
+ def _phase_freeze(core: nn.Module, *, freeze_core: bool, unfreeze_ln: bool, train_emb: bool):
557
+ for p in core.parameters(): p.requires_grad = not freeze_core
558
+ if freeze_core:
559
+ if unfreeze_ln:
560
+ for blk in core.blocks:
561
+ for p in blk.ln1.parameters(): p.requires_grad = True
562
+ for p in blk.ln2.parameters(): p.requires_grad = True
563
+ for p in core.ln.parameters(): p.requires_grad = True
564
+ if train_emb:
565
+ for p in core.emb.parameters(): p.requires_grad = True
566
+
567
+ def _train_phase(
568
+ args, phase_name: str,
569
+ core, ar_h, sat_h, opt, scaler,
570
+ start_step, seen_tok, resume_wall_time,
571
+ cfg, source, steps, block_size, batch_size,
572
+ chat_cfg: dict,
573
+ max_ckpts: int,
574
+ target_tokens_override: Optional[int] = None,
575
+ tie_weights: bool = False,
576
+ streaming: bool = True
577
+ ):
578
+ BLOCK = block_size
579
+ BATCH = batch_size
580
+ if target_tokens_override is not None:
581
+ target_tokens = target_tokens_override
582
+ else:
583
+ ratio = 51.2 if args.chilla_max_double else 25
584
+ param_count = _count_enabled_params(core, ar_h, sat_h)
585
+ target_tokens = int(ratio * param_count)
586
+ if steps:
587
+ phase_target_tokens = steps * BLOCK * BATCH
588
+ total_tokens_needed = seen_tok + phase_target_tokens
589
+ else:
590
+ total_tokens_needed = target_tokens
591
+ if total_tokens_needed <= seen_tok:
592
+ print(f"[{phase_name}] target {total_tokens_needed} already reached.")
593
+ return start_step, seen_tok, resume_wall_time
594
+ stream = token_stream(
595
+ source, total_tokens_needed, seed=42,
596
+ chat=chat_cfg.get("chat", False),
597
+ chat_messages_key=chat_cfg.get("key", "messages"),
598
+ sft_add_generation_prompt=chat_cfg.get("gen_prompt", False),
599
+ dataset_field_text=chat_cfg.get("text_field", "text"),
600
+ streaming=streaming
601
+ )
602
+ ce_tok = nn.CrossEntropyLoss(label_smoothing=0.1)
603
+ ce_gate = nn.CrossEntropyLoss()
604
+ pbar = tqdm(total=total_tokens_needed, initial=seen_tok, unit="tok")
605
+ grow_plan = _parse_grow_plan(args.grow_plan) if args.auto_grow else []
606
+ buf: list[int] = []
607
+ batch_accum: list[list[int]] = []
608
+ step = start_step
609
+ steps_since_last_grow = 0
610
+ oom_retries = 0
611
+ MAX_OOM_RETRIES = 2
612
+ now_wall = time.time()
613
+ last_save_mono = time.monotonic() - (now_wall - (resume_wall_time or now_wall))
614
+ # Status tracking for external monitoring
615
+ status_path = pathlib.Path(args.save_dir) / "status.json"
616
+ loss_history: list[float] = []
617
+ tokens_history: list[tuple[float, int]] = [] # (timestamp, seen_tok)
618
+ LOSS_WINDOW = 100
619
+ last_status_time = time.time()
620
+ STATUS_INTERVAL = 30 # write status every 30 seconds
621
+ print(f"[{phase_name}] Starting. Goal: {total_tokens_needed:,} tokens. Batch={BATCH}, Block={BLOCK}")
622
+ print(f"[{phase_name}] AR_ONLY={args.ar_only}, TIE_WEIGHTS={tie_weights}, STREAMING={streaming}")
623
+ while seen_tok < total_tokens_needed:
624
+ try:
625
+ while len(buf) < BLOCK:
626
+ buf.append(next(stream))
627
+ except StopIteration:
628
+ break
629
+ seq = buf[:BLOCK]
630
+ buf = buf[BLOCK:]
631
+ batch_accum.append(seq)
632
+ if len(batch_accum) < BATCH:
633
+ continue
634
+ ids = torch.tensor(batch_accum, device=DEV)
635
+ batch_accum = []
636
+ tgt_ar = ids.clone()
637
+ try:
638
+ with amp(args.amp):
639
+ h_ar = core(ids, causal_mask(ids.size(1)))
640
+ logits_ar = ar_h(h_ar)[:, :-1]
641
+ loss_ar = ce_tok(logits_ar.reshape(-1, VOCAB), tgt_ar[:, 1:].reshape(-1))
642
+ if args.ar_only:
643
+ loss = loss_ar
644
+ else:
645
+ h_sat = core(ids, sat_mask(ids.size(1)))
646
+ logits_sat, gate = sat_h(h_sat[:, -SAT_BLOCK:])
647
+ tgt_sat = ids[:, 1:SAT_BLOCK+1]
648
+ loss_sat = ce_tok(logits_sat.reshape(-1, VOCAB), tgt_sat.reshape(-1))
649
+ if gate is not None:
650
+ loss_sat += EMIT_LAMBDA * ce_gate(gate, torch.ones(ids.size(0), device=DEV, dtype=torch.long))
651
+ loss = loss_ar + loss_sat
652
+ scaler.scale(loss).backward()
653
+ scaler.unscale_(opt)
654
+ nn.utils.clip_grad_norm_(core.parameters(), 1.0)
655
+ scaler.step(opt)
656
+ scaler.update()
657
+ opt.zero_grad(set_to_none=True)
658
+ except RuntimeError as e:
659
+ msg = str(e).lower()
660
+ if "out of memory" in msg or "cuda error" in msg:
661
+ batch_accum = []
662
+ opt.zero_grad(set_to_none=True)
663
+ if DEV.type == "cuda":
664
+ torch.cuda.empty_cache()
665
+ torch.cuda.synchronize()
666
+ oom_retries += 1
667
+ if oom_retries <= MAX_OOM_RETRIES:
668
+ print(f"\n[{phase_name} OOM] Retry {oom_retries}/{MAX_OOM_RETRIES} at Batch={BATCH}, clearing VRAM...")
669
+ time.sleep(2)
670
+ continue
671
+ oom_retries = 0
672
+ if BATCH > 1:
673
+ print(f"\n[{phase_name} OOM] Reducing Batch: {BATCH} -> {BATCH - 1} (after {MAX_OOM_RETRIES} retries)")
674
+ BATCH -= 1
675
+ time.sleep(2)
676
+ else:
677
+ new_block = max(128, BLOCK // 2)
678
+ print(f"\n[{phase_name} OOM] Reducing Block: {BLOCK} -> {new_block}")
679
+ BLOCK = new_block
680
+ time.sleep(2)
681
+ steps_since_last_grow = 0
682
+ continue
683
+ raise
684
+ step += 1
685
+ oom_retries = 0
686
+ toks_processed = BLOCK * BATCH
687
+ seen_tok += toks_processed
688
+ pbar.update(toks_processed)
689
+ pbar.set_postfix(loss=f"{loss.item():.3f}", B=BATCH, L=BLOCK)
690
+ # Track loss and tokens for status
691
+ loss_history.append(loss.item())
692
+ if len(loss_history) > LOSS_WINDOW:
693
+ loss_history.pop(0)
694
+ tokens_history.append((time.time(), seen_tok))
695
+ if len(tokens_history) > 60:
696
+ tokens_history.pop(0)
697
+ # Write status.json periodically
698
+ now_status = time.time()
699
+ if now_status - last_status_time >= STATUS_INTERVAL:
700
+ # Calculate tokens/sec from history
701
+ if len(tokens_history) >= 2:
702
+ dt = tokens_history[-1][0] - tokens_history[0][0]
703
+ dtok = tokens_history[-1][1] - tokens_history[0][1]
704
+ tok_per_sec = dtok / dt if dt > 0 else 0
705
+ else:
706
+ tok_per_sec = 0
707
+ # ETA calculation
708
+ remaining = total_tokens_needed - seen_tok
709
+ eta_sec = remaining / tok_per_sec if tok_per_sec > 0 else float('inf')
710
+ eta_hours = eta_sec / 3600
711
+ status = {
712
+ "phase": phase_name,
713
+ "step": step,
714
+ "seen_tok": seen_tok,
715
+ "target_tok": total_tokens_needed,
716
+ "progress_pct": round(100 * seen_tok / total_tokens_needed, 2),
717
+ "loss": round(loss.item(), 4),
718
+ "loss_avg": round(sum(loss_history) / len(loss_history), 4) if loss_history else 0,
719
+ "tok_per_sec": round(tok_per_sec, 1),
720
+ "eta_hours": round(eta_hours, 2) if eta_hours != float('inf') else -1,
721
+ "batch": BATCH,
722
+ "block": BLOCK,
723
+ "timestamp": now_status,
724
+ "updated": get_uk_time()
725
+ }
726
+ try:
727
+ status_path.write_text(json.dumps(status, indent=2))
728
+ except Exception:
729
+ pass
730
+ last_status_time = now_status
731
+ # Hot-reloadable config check
732
+ if hot_get("pause_training", False):
733
+ print("[hot_config] Training PAUSED. Set pause_training=false to resume.")
734
+ while hot_get("pause_training", False):
735
+ time.sleep(5)
736
+ print("[hot_config] Resumed.")
737
+ save_interval = hot_get("save_every_sec", args.save_every_sec)
738
+ if save_interval > 0:
739
+ now_mono = time.monotonic()
740
+ if now_mono - last_save_mono >= save_interval:
741
+ ck_name = f"{phase_name}_step{step:08d}.pt"
742
+ _prune_checkpoints(pathlib.Path(args.save_dir), phase_name, max_ckpts)
743
+ save_ckpt(pathlib.Path(args.save_dir) / ck_name, core, ar_h, sat_h, opt, scaler,
744
+ meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(), "tie_weights": tie_weights})
745
+ _hf_upload_checkpoint(pathlib.Path(args.save_dir) / ck_name)
746
+ last_save_mono = now_mono
747
+ if args.auto_grow:
748
+ steps_since_last_grow += 1
749
+ if steps_since_last_grow >= args.grow_every_steps:
750
+ steps_since_last_grow = 0
751
+ try:
752
+ idx = grow_plan.index(BLOCK)
753
+ if idx + 1 < len(grow_plan):
754
+ BLOCK = grow_plan[idx + 1]
755
+ print(f"[{phase_name} Grow] Block -> {BLOCK}")
756
+ if DEV.type == "cuda": torch.cuda.empty_cache()
757
+ except ValueError:
758
+ grow_plan = sorted(set(grow_plan + [BLOCK]))
759
+ pbar.close()
760
+ save_ckpt(pathlib.Path(args.save_dir) / f"{phase_name}_final.pt", core, ar_h, sat_h, opt, scaler,
761
+ meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(), "tie_weights": tie_weights})
762
+ return step, seen_tok, time.time()
763
+
764
+
765
+ # ───────────────────────── Main Orchestrator ─────────────────────────
766
+ def train(args):
767
+ cfg = PRESETS[args.preset].copy()
768
+ tie_weights = args.tie_weights
769
+ print_expansion_info(cfg, tie_weights)
770
+ if not args.fresh:
771
+ src_probe = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
772
+ prev_cfg = infer_cfg_from_ckpt(src_probe)
773
+ else: prev_cfg = None
774
+ if prev_cfg:
775
+ cfg.update({k: v for k, v in prev_cfg.items() if k in cfg})
776
+ if args.x2 and prev_cfg.get("layers"): cfg["layers"] = max(cfg["layers"], prev_cfg["layers"] * 2)
777
+ if args.rank: cfg["rank"] = args.rank
778
+ if args.x2 and not prev_cfg: cfg["layers"] *= 2
779
+ print(f"Config: {cfg}")
780
+ core = Encoder(cfg, tie_weights=tie_weights).to(DEV)
781
+ ar_h = ARHead(cfg["d"], tie_weights=tie_weights, embedding_weight=core.emb.weight if tie_weights else None).to(DEV)
782
+ sat_h = SATHead(cfg["d"], mode="var").to(DEV)
783
+ total_params = _count_enabled_params(core, ar_h, sat_h)
784
+ print(f"Total parameters: {total_params:,}")
785
+ if tie_weights:
786
+ print(f"{Colors.WARN}[weight-tying] Embedding and LM head share weights{Colors.RESET}")
787
+ if not args.fresh:
788
+ src = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
789
+ src = _resolve_ckpt(src)
790
+ if src:
791
+ loaded = _safe_load_any(src, core, key="core")
792
+ _safe_load_any(src, ar_h, key="ar")
793
+ _safe_load_any(src, sat_h, key="sat")
794
+ if loaded: print(f"Warm-start loaded from {src}")
795
+ _phase_freeze(core, freeze_core=args.freeze_core, unfreeze_ln=args.unfreeze_ln, train_emb=args.train_emb)
796
+ opt = torch.optim.AdamW([
797
+ {"params": [p for p in core.parameters() if p.requires_grad], "lr": args.lr_core},
798
+ {"params": ar_h.parameters(), "lr": args.lr_head},
799
+ {"params": sat_h.parameters(), "lr": args.lr_head},
800
+ ])
801
+ scaler = GradScaler(enabled=(args.amp and DEV.type == "cuda"))
802
+ start_step, seen_tok, last_wall = 0, 0, None
803
+ if args.resume and not args.fresh:
804
+ start_step, seen_tok, last_wall = load_ckpt(pathlib.Path(args.resume), core, ar_h, sat_h, opt, scaler)
805
+ print(f"Resumed from step {start_step}")
806
+ step, seen_tok, last_wall = _train_phase(
807
+ args, "pretrain", core, ar_h, sat_h, opt, scaler,
808
+ start_step, seen_tok, last_wall, cfg,
809
+ args.source, args.steps,
810
+ args.block or DEFAULT_BLOCK,
811
+ args.batch_size or DEFAULT_BATCH,
812
+ chat_cfg={"chat": args.chat, "key": args.chat_messages_key, "gen_prompt": args.sft_add_generation_prompt, "text_field": args.dataset_field_text},
813
+ max_ckpts=args.max_ckpts,
814
+ target_tokens_override=args.target_tokens,
815
+ tie_weights=tie_weights
816
+ )
817
+ if (not args.after_sft_source) and (args.after_sft_steps and args.after_sft_steps > 0):
818
+ args.after_sft_source = DEFAULT_AFTER_SFT_SOURCES
819
+ args.after_sft_chat = True
820
+ if args.after_sft_add_generation_prompt is None: args.after_sft_add_generation_prompt = True
821
+ if not args.after_sft_block: args.after_sft_block = DEFAULT_AFTER_SFT_BLOCK
822
+ if args.after_sft_source and args.after_sft_steps and args.after_sft_steps > 0:
823
+ print("\n[Orchestrator] Starting Post-Pretraining SFT Phase...")
824
+ _phase_freeze(core,
825
+ freeze_core=args.after_sft_freeze_core,
826
+ unfreeze_ln=args.after_sft_unfreeze_ln,
827
+ train_emb=args.after_sft_train_emb)
828
+ opt = torch.optim.AdamW([
829
+ {"params": [p for p in core.parameters() if p.requires_grad], "lr": args.after_sft_lr_core or args.lr_core},
830
+ {"params": ar_h.parameters(), "lr": args.after_sft_lr_head or args.lr_head},
831
+ {"params": sat_h.parameters(), "lr": args.after_sft_lr_head or args.lr_head},
832
+ ])
833
+ step, seen_tok, last_wall = _train_phase(
834
+ args, "sft", core, ar_h, sat_h, opt, scaler,
835
+ step, seen_tok, last_wall, cfg,
836
+ args.after_sft_source, args.after_sft_steps,
837
+ args.after_sft_block or DEFAULT_AFTER_SFT_BLOCK,
838
+ args.batch_size or DEFAULT_BATCH,
839
+ chat_cfg={
840
+ "chat": args.after_sft_chat,
841
+ "key": args.after_sft_chat_messages_key,
842
+ "gen_prompt": args.after_sft_add_generation_prompt if args.after_sft_add_generation_prompt is not None else args.sft_add_generation_prompt,
843
+ "text_field": args.after_sft_dataset_field_text
844
+ },
845
+ max_ckpts=args.max_ckpts,
846
+ target_tokens_override=None,
847
+ tie_weights=tie_weights,
848
+ streaming=False
849
+ )
850
+ save_ckpt(pathlib.Path(args.save_dir) / "final.pt", core, ar_h, sat_h, opt, scaler,
851
+ meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(), "tie_weights": tie_weights})
852
+ print("πŸŽ‰ All Training Complete")
853
+
854
+
855
+ # ───────────────────────── Sampling ─────────────────────────
856
+ def _apply_penalties(logits, ids, n, rep_p, pres_p, freq_p):
857
+ if ids.numel() == 0: return logits
858
+ hist = ids[0, -n:].long() if n > 0 else ids[0].long()
859
+ uniq, counts = torch.unique(hist, return_counts=True)
860
+ if pres_p or freq_p:
861
+ logits[..., uniq] -= (pres_p + freq_p * counts.float())
862
+ if rep_p != 1.0:
863
+ sel = logits[..., uniq]
864
+ logits[..., uniq] = torch.where(sel > 0, sel / rep_p, sel * rep_p)
865
+ return logits
866
+
867
+ def _sample(logits, T, top_k, top_p, min_p, greedy):
868
+ if greedy: return logits.argmax(-1, keepdim=True)
869
+ probs = (logits / max(T, 1e-8)).softmax(-1)
870
+ if top_k:
871
+ v, i = torch.topk(probs, min(top_k, probs.size(-1)))
872
+ probs = torch.zeros_like(probs).scatter_(-1, i, v)
873
+ if top_p < 1.0:
874
+ s_probs, s_idx = torch.sort(probs, descending=True, dim=-1)
875
+ probs = torch.zeros_like(probs).scatter_(-1, s_idx, s_probs * (torch.cumsum(s_probs, -1) <= top_p).float())
876
+ if min_p > 0: probs[probs < min_p] = 0
877
+ if probs.sum() == 0: return logits.argmax(-1, keepdim=True)
878
+ return probs.div_(probs.sum()).multinomial(1)
879
+
880
+ @torch.no_grad()
881
+ def infer(args):
882
+ if args.mode == "ar":
883
+ if args.temperature is None: args.temperature = 0.7
884
+ if args.top_k is None: args.top_k = 0
885
+ if args.repetition_penalty is None: args.repetition_penalty = 1.3
886
+ if args.presence_penalty is None: args.presence_penalty = 0.0
887
+ if args.frequency_penalty is None: args.frequency_penalty = 0.3
888
+ if args.penalty_last_n is None: args.penalty_last_n = 128
889
+ if args.var is None: args.var = False
890
+ else:
891
+ if args.temperature is None: args.temperature = 0.5
892
+ if args.top_k is None: args.top_k = 30
893
+ if args.repetition_penalty is None: args.repetition_penalty = 2.0
894
+ if args.presence_penalty is None: args.presence_penalty = 0.6
895
+ if args.frequency_penalty is None: args.frequency_penalty = 1.0
896
+ if args.penalty_last_n is None: args.penalty_last_n = 200
897
+ if args.var is None: args.var = True
898
+ path = _resolve_ckpt(pathlib.Path(args.ckpt)) or pathlib.Path(args.ckpt)
899
+ sd = torch.load(path, map_location="cpu")
900
+ cfg = sd["cfg"]
901
+ tie_weights = sd.get("tie_weights", False)
902
+ uk_time = get_uk_time()
903
+ ckpt_name = path.name
904
+ print(f"β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”")
905
+ print(f"β”‚ INFERENCE @ {uk_time:<35s} β”‚")
906
+ print(f"β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€")
907
+ print(f"β”‚ Checkpoint: {ckpt_name:<35s} β”‚")
908
+ print(f"β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜")
909
+ print_expansion_info(cfg, tie_weights)
910
+ core = Encoder(cfg, tie_weights=tie_weights).to(DEV)
911
+ ar_h = ARHead(cfg["d"], tie_weights=tie_weights, embedding_weight=core.emb.weight if tie_weights else None).to(DEV)
912
+ sat_h = SATHead(cfg["d"]).to(DEV)
913
+ core.load_state_dict(sd["core"])
914
+ ar_h.load_state_dict(sd["ar"])
915
+ sat_h.load_state_dict(sd["sat"])
916
+ core.eval()
917
+ ar_h.eval()
918
+ sat_h.eval()
919
+ total_params = _count_enabled_params(core, ar_h, sat_h)
920
+ if total_params >= 1_000_000_000:
921
+ param_str = f"{total_params / 1_000_000_000:.2f}B"
922
+ elif total_params >= 1_000_000:
923
+ param_str = f"{total_params / 1_000_000:.2f}M"
924
+ elif total_params >= 1_000:
925
+ param_str = f"{total_params / 1_000:.2f}K"
926
+ else:
927
+ param_str = f"{total_params}"
928
+ print(f"Model size: {param_str} parameters ({total_params:,})")
929
+ prompt_tokens = tok.encode(args.prompt)
930
+ prompt_len = len(prompt_tokens)
931
+ ids = torch.tensor([prompt_tokens], device=DEV)
932
+ if ids.size(1) == 0:
933
+ ids = torch.tensor([[EOS]], device=DEV)
934
+ prompt_len = 1
935
+ mode_str = args.mode
936
+ if args.mode == "sat":
937
+ mode_str = f"sat-{'var' if args.var else 'fixed'}"
938
+ print(f"{Colors.INFO}Generating ({mode_str})...{Colors.RESET}")
939
+ start = time.time()
940
+ if args.mode == "ar":
941
+ h, kvs = core(ids, causal_mask(ids.size(1)), use_cache=True, total_seq_len=ids.size(1))
942
+ for _ in range(args.max_new):
943
+ logits = ar_h(h)[:, -1]
944
+ logits = _apply_penalties(logits, ids, args.penalty_last_n, args.repetition_penalty, args.presence_penalty, args.frequency_penalty)
945
+ nxt = _sample(logits, args.temperature, args.top_k, args.top_p, args.min_p, args.greedy)
946
+ ids = torch.cat([ids, nxt], 1)
947
+ h, kvs = core(ids[:, -1:], None, kv_caches=kvs, use_cache=True, total_seq_len=ids.size(1))
948
+ else:
949
+ cached_len = ids.size(1)
950
+ h, kvs = core(ids, sat_mask(ids.size(1)), use_cache=True, total_seq_len=cached_len)
951
+ added = 0
952
+ while added < args.max_new:
953
+ logits_all, gate = sat_h(h[:, -SAT_BLOCK:])
954
+ stride = SAT_BLOCK if (not args.var or gate is None) else (gate.softmax(-1).multinomial(1).item() + 1)
955
+ new_tokens = []
956
+ for i in range(int(stride)):
957
+ logits = logits_all[:, i]
958
+ logits = _apply_penalties(logits, ids, args.penalty_last_n, args.repetition_penalty, args.presence_penalty, args.frequency_penalty)
959
+ nxt = _sample(logits, args.temperature, args.top_k, args.top_p, args.min_p, args.greedy)
960
+ new_tokens.append(nxt)
961
+ ids = torch.cat([ids, nxt], 1)
962
+ added += 1
963
+ if added >= args.max_new: break
964
+ if added >= args.max_new: break
965
+ new_ids = torch.cat(new_tokens, dim=1)
966
+ mask = sat_mask_cached(new_ids.size(1), cached_len)
967
+ h, kvs = core(new_ids, mask, kv_caches=kvs, use_cache=True, total_seq_len=ids.size(1))
968
+ cached_len = ids.size(1)
969
+ elapsed = time.time() - start
970
+ gen_tokens = len(ids[0]) - prompt_len
971
+ tok_per_sec = gen_tokens / elapsed if elapsed > 0 else 0
972
+ all_tokens = ids[0].tolist()
973
+ prompt_text = tok.decode(all_tokens[:prompt_len], skip_special_tokens=True)
974
+ gen_text = tok.decode(all_tokens[prompt_len:], skip_special_tokens=True)
975
+ print(f"{Colors.PROMPT}{prompt_text}{Colors.RESET}{gen_text}")
976
+ print(f"{Colors.INFO}[{elapsed:.2f}s | {gen_tokens} tokens | {tok_per_sec:.1f} tok/s]{Colors.RESET}")
977
+
978
+
979
+ # ───────────────────────── CLI ─────────────────────────
980
+ # ───────────────────────── Status Check ─────────────────────────
981
+ def status(args):
982
+ """Display current training status from status.json"""
983
+ status_path = pathlib.Path(args.save_dir) / "status.json"
984
+ if not status_path.exists():
985
+ print(f"No status file found at {status_path}")
986
+ print("Training may not be running or hasn't written status yet.")
987
+ return
988
+ try:
989
+ data = json.loads(status_path.read_text())
990
+ except Exception as e:
991
+ print(f"Error reading status: {e}")
992
+ return
993
+
994
+ # Calculate time since last update
995
+ age_sec = time.time() - data.get("timestamp", 0)
996
+ if age_sec > 120:
997
+ age_str = f"{Colors.WARN}(stale: {age_sec/60:.1f}m ago){Colors.RESET}"
998
+ else:
999
+ age_str = f"({age_sec:.0f}s ago)"
1000
+
1001
+ # Format numbers nicely
1002
+ seen = data.get("seen_tok", 0)
1003
+ target = data.get("target_tok", 1)
1004
+ progress = data.get("progress_pct", 0)
1005
+ eta_h = data.get("eta_hours", -1)
1006
+ eta_str = f"{eta_h:.1f}h" if eta_h >= 0 else "∞"
1007
+
1008
+ # Progress bar
1009
+ bar_width = 30
1010
+ filled = int(bar_width * progress / 100)
1011
+ bar = "β–ˆ" * filled + "β–‘" * (bar_width - filled)
1012
+
1013
+ print(f"β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”")
1014
+ print(f"β”‚ AGILLM-3 TRAINING STATUS β”‚")
1015
+ print(f"β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€")
1016
+ print(f"β”‚ Phase: {data.get('phase', '?'):<20s} Step: {data.get('step', 0):>12,d} β”‚")
1017
+ print(f"β”‚ [{bar}] {progress:5.1f}% β”‚")
1018
+ print(f"β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€")
1019
+ print(f"β”‚ Tokens: {seen:>14,d} / {target:<14,d} β”‚")
1020
+ print(f"β”‚ Speed: {data.get('tok_per_sec', 0):>8.1f} tok/s ETA: {eta_str:>10s} β”‚")
1021
+ print(f"β”‚ Loss: {data.get('loss', 0):>8.4f} (avg: {data.get('loss_avg', 0):.4f}) β”‚")
1022
+ print(f"β”‚ Batch: {data.get('batch', 0):>4d} Block: {data.get('block', 0):>4d} β”‚")
1023
+ print(f"β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€")
1024
+ print(f"β”‚ Updated: {data.get('updated', '?'):<28s} {age_str:>8s} β”‚")
1025
+ print(f"β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜")
1026
+
1027
+
1028
+ def main():
1029
+ ap = argparse.ArgumentParser(description="AGILLM Expansion Ratio Testing")
1030
+ sub = ap.add_subparsers(dest="cmd", required=True)
1031
+ tr = sub.add_parser("train")
1032
+ tr.add_argument("--preset", choices=PRESETS.keys(), default="nano_3x")
1033
+ tr.add_argument("--rank", type=int)
1034
+ tr.add_argument("--block", type=int, default=DEFAULT_BLOCK)
1035
+ tr.add_argument("--batch_size", type=int, default=DEFAULT_BATCH)
1036
+ tr.add_argument("--source", default=DEFAULT_PRETRAIN_SOURCES)
1037
+ tr.add_argument("--target_tokens", type=int)
1038
+ tr.add_argument("--steps", type=int)
1039
+ tr.add_argument("--amp", action="store_true")
1040
+ tr.add_argument("--save_every_sec", type=int, default=DEFAULT_SAVE_SEC)
1041
+ tr.add_argument("--save_dir", default=str(CKDIR))
1042
+ tr.add_argument("--resume", type=str)
1043
+ tr.add_argument("--x2", action="store_true")
1044
+ tr.add_argument("--warmstart_from", type=str)
1045
+ tr.add_argument("--fresh", action="store_true")
1046
+ tr.add_argument("--max_ckpts", type=int, default=3)
1047
+ tr.add_argument("--chilla_max_double", action="store_true", default=True)
1048
+ tr.add_argument("--tie_weights", action="store_true")
1049
+ tr.add_argument("--ar_only", action="store_true")
1050
+ tr.add_argument("--freeze_core", action="store_true")
1051
+ tr.add_argument("--unfreeze_ln", action="store_true")
1052
+ tr.add_argument("--train_emb", action="store_true")
1053
+ tr.add_argument("--lr_core", type=float, default=LR_CORE)
1054
+ tr.add_argument("--lr_head", type=float, default=LR_HEAD)
1055
+ tr.add_argument("--chat", action="store_true")
1056
+ tr.add_argument("--chat_messages_key", default="messages")
1057
+ tr.add_argument("--dataset_field_text", default="text")
1058
+ tr.add_argument("--sft_add_generation_prompt", action="store_true")
1059
+ tr.add_argument("--auto_grow", action="store_true")
1060
+ tr.add_argument("--grow_plan", default="576,640,768,896,1024,1122")
1061
+ tr.add_argument("--grow_every_steps", type=int, default=50000)
1062
+ tr.add_argument("--after_sft_source", default=DEFAULT_AFTER_SFT_SOURCES)
1063
+ tr.add_argument("--after_sft_steps", type=int, default=80000)
1064
+ tr.add_argument("--after_sft_chat", action="store_true", default=True)
1065
+ tr.add_argument("--after_sft_chat_messages_key", default="messages")
1066
+ tr.add_argument("--after_sft_dataset_field_text", default="text")
1067
+ tr.add_argument("--after_sft_add_generation_prompt", type=bool, default=True)
1068
+ tr.add_argument("--after_sft_block", type=int, default=0)
1069
+ tr.add_argument("--after_sft_freeze_core", action="store_true")
1070
+ tr.add_argument("--after_sft_unfreeze_ln", action="store_true")
1071
+ tr.add_argument("--after_sft_train_emb", action="store_true")
1072
+ tr.add_argument("--after_sft_lr_core", type=float, default=0.0)
1073
+ tr.add_argument("--after_sft_lr_head", type=float, default=0.0)
1074
+ inf = sub.add_parser("infer")
1075
+ inf.add_argument("--mode", choices=["ar", "sat"], required=True)
1076
+ inf.add_argument("--ckpt", required=True)
1077
+ inf.add_argument("--prompt", required=True)
1078
+ inf.add_argument("--max_new", type=int, default=120)
1079
+ inf.add_argument("--temperature", type=float, default=None)
1080
+ inf.add_argument("--greedy", action="store_true")
1081
+ inf.add_argument("--top_k", type=int, default=None)
1082
+ inf.add_argument("--top_p", type=float, default=0.9)
1083
+ inf.add_argument("--min_p", type=float, default=0.0)
1084
+ inf.add_argument("--repetition_penalty", type=float, default=None)
1085
+ inf.add_argument("--presence_penalty", type=float, default=None)
1086
+ inf.add_argument("--frequency_penalty", type=float, default=None)
1087
+ inf.add_argument("--penalty_last_n", type=int, default=None)
1088
+ inf.add_argument("--var", action="store_true", default=None)
1089
+ inf.add_argument("--no-var", dest="var", action="store_false")
1090
+ # Status subcommand
1091
+ st = sub.add_parser("status")
1092
+ st.add_argument("--save_dir", default=str(CKDIR), help="Directory containing status.json")
1093
+ args = ap.parse_args()
1094
+ if args.cmd == "train": train(args)
1095
+ elif args.cmd == "status": status(args)
1096
+ else: infer(args)
1097
+
1098
+
1099
+ if __name__ == "__main__":
1100
+ main()