Remove n.py — was pushed here by mistake, belongs in AGILLM-3-large
Browse files
n.py
DELETED
|
@@ -1,1107 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python3
|
| 2 |
-
|
| 3 |
-
# n.py — Joint AR+SAT Trainer with Expansion Ratio Testing
|
| 4 |
-
# Enhanced inference: checkpoint name, tok/s, UK time
|
| 5 |
-
|
| 6 |
-
from __future__ import annotations
|
| 7 |
-
import argparse, json, math, pathlib, random, time, os, sys, threading, hashlib
|
| 8 |
-
from pathlib import Path
|
| 9 |
-
from contextlib import nullcontext
|
| 10 |
-
from typing import Dict, Any, List, Optional, Tuple
|
| 11 |
-
from datetime import datetime, timezone
|
| 12 |
-
import torch
|
| 13 |
-
|
| 14 |
-
# SafeProgress - Claude-safe progress (discrete lines, not single growing line)
|
| 15 |
-
class SafeProgress:
|
| 16 |
-
def __init__(self, total, initial=0, unit="tok", print_every=500):
|
| 17 |
-
self.total, self.n, self.unit = total, initial, unit
|
| 18 |
-
self.last_print, self.postfix = initial, {}
|
| 19 |
-
self.start_time = __import__('time').time()
|
| 20 |
-
def update(self, n=1):
|
| 21 |
-
self.n += n
|
| 22 |
-
if self.n - self.last_print >= 1000000: # print every ~1M tokens
|
| 23 |
-
self._print(); self.last_print = self.n
|
| 24 |
-
def set_postfix(self, **kwargs): self.postfix = kwargs
|
| 25 |
-
def _print(self):
|
| 26 |
-
elapsed = __import__('time').time() - self.start_time
|
| 27 |
-
rate = self.n / elapsed if elapsed > 0 else 0
|
| 28 |
-
pct = 100 * self.n / self.total if self.total > 0 else 0
|
| 29 |
-
pf = ' '.join(f"{k}={v}" for k,v in self.postfix.items())
|
| 30 |
-
print(f"[{pct:.1f}%] {self.n:,}/{self.total:,} {self.unit} | {rate:.0f} tok/s | {pf}")
|
| 31 |
-
def close(self): self._print(); print("Done.")
|
| 32 |
-
|
| 33 |
-
import torch.nn as nn
|
| 34 |
-
import torch.nn.functional as F
|
| 35 |
-
from datasets import load_dataset, DownloadConfig
|
| 36 |
-
from transformers import AutoTokenizer, logging as hf_log
|
| 37 |
-
# from tqdm.auto import tqdm # DISABLED - kills Claude context
|
| 38 |
-
|
| 39 |
-
# ─────────────────────────────── HOT DATASET LOADING ───────────────────────────────
|
| 40 |
-
HOT_CONFIG_PATH = Path("/workspace/hot_config.json")
|
| 41 |
-
_hot_config_cache = {"mtime": 0, "data": {}}
|
| 42 |
-
|
| 43 |
-
def get_hot_config() -> dict:
|
| 44 |
-
"""Load hot_config.json with caching, return empty dict if missing"""
|
| 45 |
-
try:
|
| 46 |
-
if HOT_CONFIG_PATH.exists():
|
| 47 |
-
mtime = HOT_CONFIG_PATH.stat().st_mtime
|
| 48 |
-
if mtime > _hot_config_cache["mtime"]:
|
| 49 |
-
with open(HOT_CONFIG_PATH) as f:
|
| 50 |
-
_hot_config_cache["data"] = json.load(f)
|
| 51 |
-
_hot_config_cache["mtime"] = mtime
|
| 52 |
-
return _hot_config_cache["data"]
|
| 53 |
-
except Exception as e:
|
| 54 |
-
print(f"[hot_config] Error loading: {e}")
|
| 55 |
-
return {}
|
| 56 |
-
|
| 57 |
-
def get_hot_datasets(default_sources: str) -> str:
|
| 58 |
-
"""Get datasets from hot_config if present, else use default"""
|
| 59 |
-
cfg = get_hot_config()
|
| 60 |
-
if "datasets" in cfg and cfg["datasets"]:
|
| 61 |
-
hot_ds = cfg["datasets"]
|
| 62 |
-
if isinstance(hot_ds, list):
|
| 63 |
-
hot_ds = ",".join(hot_ds)
|
| 64 |
-
print(f"[hot_config] Using hot datasets: {hot_ds}")
|
| 65 |
-
return hot_ds
|
| 66 |
-
return default_sources
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
# DISABLED: # Auto-rotating log to prevent context-window suicide
|
| 70 |
-
# DISABLED: try:
|
| 71 |
-
# DISABLED: from rotating_log import install_rotating_log
|
| 72 |
-
# DISABLED: install_rotating_log()
|
| 73 |
-
# DISABLED: except ImportError:
|
| 74 |
-
# pass # Running without rotation
|
| 75 |
-
|
| 76 |
-
# ───────────────────────── ANSI Colors ─────────────────────────
|
| 77 |
-
class Colors:
|
| 78 |
-
RESET = "\033[0m"
|
| 79 |
-
BOLD = "\033[1m"
|
| 80 |
-
PROMPT = "\033[36m"
|
| 81 |
-
GEN = "\033[0m"
|
| 82 |
-
INFO = "\033[90m"
|
| 83 |
-
WARN = "\033[93m"
|
| 84 |
-
|
| 85 |
-
# ───────────────────────── Globals ─────────────────────────
|
| 86 |
-
hf_log.set_verbosity_error()
|
| 87 |
-
DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 88 |
-
torch.backends.cuda.matmul.allow_tf32 = True
|
| 89 |
-
try:
|
| 90 |
-
torch.set_float32_matmul_precision("high")
|
| 91 |
-
except Exception:
|
| 92 |
-
pass
|
| 93 |
-
|
| 94 |
-
TOKENIZER_ID = os.environ.get("TOKENIZER_ID", "deepseek-ai/DeepSeek-V3.2")
|
| 95 |
-
tok = AutoTokenizer.from_pretrained(TOKENIZER_ID, use_fast=True, trust_remote_code=True)
|
| 96 |
-
if tok.pad_token is None:
|
| 97 |
-
tok.add_special_tokens({"pad_token": "<|pad|>"})
|
| 98 |
-
|
| 99 |
-
VOCAB, EOS = (
|
| 100 |
-
max(tok.get_vocab().values()) + 1,
|
| 101 |
-
tok.eos_token_id if tok.eos_token_id is not None else tok.sep_token_id
|
| 102 |
-
)
|
| 103 |
-
|
| 104 |
-
# ───────────────────────── PRESETS ─────────────────────────
|
| 105 |
-
PRESETS: Dict[str, Dict[str, int]] = {
|
| 106 |
-
"femto_1x": dict(d=16, layers=1, heads=1, rank=16),
|
| 107 |
-
"femto_12x": dict(d=16, layers=1, heads=1, rank=192),
|
| 108 |
-
"femto_24x": dict(d=16, layers=1, heads=1, rank=384),
|
| 109 |
-
"pico_1x": dict(d=32, layers=1, heads=2, rank=16),
|
| 110 |
-
"pico_3x": dict(d=32, layers=1, heads=2, rank=48),
|
| 111 |
-
"pico_6x": dict(d=32, layers=1, heads=2, rank=96),
|
| 112 |
-
"pico_12x": dict(d=32, layers=1, heads=2, rank=192),
|
| 113 |
-
"pico_24x": dict(d=32, layers=1, heads=2, rank=384),
|
| 114 |
-
"pico_48x": dict(d=32, layers=1, heads=2, rank=768),
|
| 115 |
-
"nano_1x": dict(d=64, layers=2, heads=4, rank=16),
|
| 116 |
-
"nano_3x": dict(d=64, layers=2, heads=4, rank=48),
|
| 117 |
-
"nano_6x": dict(d=64, layers=2, heads=4, rank=96),
|
| 118 |
-
"nano_12x": dict(d=64, layers=2, heads=4, rank=192),
|
| 119 |
-
"nano_24x": dict(d=64, layers=2, heads=4, rank=384),
|
| 120 |
-
"nano_48x": dict(d=64, layers=2, heads=4, rank=768),
|
| 121 |
-
"nano_96x": dict(d=64, layers=2, heads=4, rank=1536),
|
| 122 |
-
"micro_3x": dict(d=128, layers=4, heads=8, rank=48),
|
| 123 |
-
"micro_6x": dict(d=128, layers=4, heads=8, rank=96),
|
| 124 |
-
"micro_12x": dict(d=128, layers=4, heads=8, rank=192),
|
| 125 |
-
"micro_24x": dict(d=128, layers=4, heads=8, rank=384),
|
| 126 |
-
"small": dict(d=512, layers=8, heads=16, rank=64),
|
| 127 |
-
"smallx2": dict(d=512, layers=16, heads=16, rank=64),
|
| 128 |
-
"base": dict(d=768, layers=12, heads=24, rank=96),
|
| 129 |
-
"base18": dict(d=768, layers=18, heads=24, rank=96),
|
| 130 |
-
"large": dict(d=1024, layers=24, heads=16, rank=128),
|
| 131 |
-
}
|
| 132 |
-
|
| 133 |
-
DEFAULT_BLOCK = 1122
|
| 134 |
-
DEFAULT_BATCH = 4
|
| 135 |
-
SAT_BLOCK = 2
|
| 136 |
-
LR_CORE, LR_HEAD = 5e-5, 2e-4
|
| 137 |
-
EMIT_LAMBDA = 0.1
|
| 138 |
-
DEFAULT_SAVE_SEC = 24 * 3600
|
| 139 |
-
DEFAULT_DELTA_STEPS = 500 # lightweight weight-only save every N steps
|
| 140 |
-
DEFAULT_MAX_DELTAS = 5 # keep last N deltas (older pruned after full save)
|
| 141 |
-
CKDIR = pathlib.Path("ckpts_expansion")
|
| 142 |
-
|
| 143 |
-
DEFAULT_PRETRAIN_SOURCES = "OpenTransformer/goddess-crawl,OpenTransformer/agillm-crawl-data,OpenTransformer/web-crawl-2026,OpenTransformer/web-crawl-clean-v2,OpenTransformer/scraped-web-data,OpenTransformer/turbo-crawl,OpenTransformer/sft-data-clean,OpenTransformer/web-crawl-v1"
|
| 144 |
-
DEFAULT_AFTER_SFT_SOURCES = "mlabonne/opc-sft-stage2-chat,HuggingFaceH4/ultrachat_200k"
|
| 145 |
-
DEFAULT_AFTER_SFT_BLOCK = 1122
|
| 146 |
-
|
| 147 |
-
# ───────────────────────── UK Time Helper ─────────────────────────
|
| 148 |
-
def get_uk_time() -> str:
|
| 149 |
-
utc_now = datetime.now(timezone.utc)
|
| 150 |
-
year = utc_now.year
|
| 151 |
-
march_last = datetime(year, 3, 31, 1, 0, tzinfo=timezone.utc)
|
| 152 |
-
while march_last.weekday() != 6:
|
| 153 |
-
march_last = march_last.replace(day=march_last.day - 1)
|
| 154 |
-
oct_last = datetime(year, 10, 31, 1, 0, tzinfo=timezone.utc)
|
| 155 |
-
while oct_last.weekday() != 6:
|
| 156 |
-
oct_last = oct_last.replace(day=oct_last.day - 1)
|
| 157 |
-
if march_last <= utc_now < oct_last:
|
| 158 |
-
uk_offset = 1
|
| 159 |
-
tz_name = "BST"
|
| 160 |
-
else:
|
| 161 |
-
uk_offset = 0
|
| 162 |
-
tz_name = "GMT"
|
| 163 |
-
from datetime import timedelta
|
| 164 |
-
uk_time = utc_now + timedelta(hours=uk_offset)
|
| 165 |
-
return uk_time.strftime(f'%Y-%m-%d %H:%M:%S {tz_name}')
|
| 166 |
-
|
| 167 |
-
# ───────────────────────── Utilities ─────────────────────────
|
| 168 |
-
def rng_state():
|
| 169 |
-
if DEV.type == "cuda":
|
| 170 |
-
try:
|
| 171 |
-
return torch.cuda.get_rng_state(DEV)
|
| 172 |
-
except TypeError:
|
| 173 |
-
return torch.cuda.get_rng_state()
|
| 174 |
-
return torch.get_rng_state()
|
| 175 |
-
|
| 176 |
-
def _is_probably_ckpt(path: pathlib.Path) -> bool:
|
| 177 |
-
try:
|
| 178 |
-
return path.is_file() and path.suffix == ".pt" and not path.name.endswith(".pt.tmp") and path.stat().st_size > (1<<20)
|
| 179 |
-
except Exception:
|
| 180 |
-
return False
|
| 181 |
-
|
| 182 |
-
def _resolve_ckpt(path: pathlib.Path) -> pathlib.Path | None:
|
| 183 |
-
try:
|
| 184 |
-
if path.is_dir():
|
| 185 |
-
cands = sorted([p for p in path.glob("*.pt") if _is_probably_ckpt(p)],
|
| 186 |
-
key=lambda p: p.stat().st_mtime, reverse=True)
|
| 187 |
-
return cands[0] if cands else None
|
| 188 |
-
if path.suffix == ".tmp":
|
| 189 |
-
solid = path.with_suffix("")
|
| 190 |
-
return solid if _is_probably_ckpt(solid) else _resolve_ckpt(path.parent)
|
| 191 |
-
return path if _is_probably_ckpt(path) else _resolve_ckpt(path.parent)
|
| 192 |
-
except Exception:
|
| 193 |
-
return None
|
| 194 |
-
|
| 195 |
-
def _try_load(path: pathlib.Path, map_location="cpu"):
|
| 196 |
-
try:
|
| 197 |
-
return torch.load(path, map_location="cpu")
|
| 198 |
-
except Exception as e:
|
| 199 |
-
print(f"[ckpt-skip] {path} not usable: {e}")
|
| 200 |
-
return None
|
| 201 |
-
|
| 202 |
-
def _prune_checkpoints(save_dir: pathlib.Path, phase_name: str, max_ckpts: int):
|
| 203 |
-
if max_ckpts is None or max_ckpts <= 0:
|
| 204 |
-
return
|
| 205 |
-
try:
|
| 206 |
-
pattern = f"{phase_name}_step*.pt"
|
| 207 |
-
ckpts = sorted(
|
| 208 |
-
[p for p in save_dir.glob(pattern) if _is_probably_ckpt(p)],
|
| 209 |
-
key=lambda p: p.stat().st_mtime
|
| 210 |
-
)
|
| 211 |
-
excess = len(ckpts) - max_ckpts
|
| 212 |
-
if excess > 0:
|
| 213 |
-
for p in ckpts[:excess]:
|
| 214 |
-
try:
|
| 215 |
-
p.unlink()
|
| 216 |
-
print(f" [prune] deleted old {p.name}")
|
| 217 |
-
except Exception:
|
| 218 |
-
pass
|
| 219 |
-
except Exception as e:
|
| 220 |
-
print(f"[ckpt-prune] error: {e}")
|
| 221 |
-
|
| 222 |
-
def print_expansion_info(cfg: dict, tie_weights: bool = False):
|
| 223 |
-
d_k = cfg["d"] // cfg["heads"]
|
| 224 |
-
rank = cfg["rank"]
|
| 225 |
-
ratio = rank / d_k
|
| 226 |
-
regime = "COMPRESSION" if ratio < 1 else ("IDENTITY" if ratio == 1 else "EXPANSION")
|
| 227 |
-
tie_str = "YES" if tie_weights else "NO"
|
| 228 |
-
print(f"┌───────────────────────────────────��─────┐")
|
| 229 |
-
print(f"│ TUNEABLE ATTENTION CONFIG │")
|
| 230 |
-
print(f"├─────────────────────────────────────────┤")
|
| 231 |
-
print(f"│ d_model: {cfg['d']:4d} heads: {cfg['heads']:2d} d_k: {d_k:3d} │")
|
| 232 |
-
print(f"│ layers: {cfg['layers']:4d} tie_weights: {tie_str:3s} │")
|
| 233 |
-
print(f"│ rank: {rank:4d} ratio: {ratio:.1f}x [{regime:11s}] │")
|
| 234 |
-
print(f"└─────────────────────────────────────────┘")
|
| 235 |
-
|
| 236 |
-
# ───────────────────────── AMP helper ─────────────────────────
|
| 237 |
-
try:
|
| 238 |
-
from torch.amp import autocast as _ac, GradScaler
|
| 239 |
-
except ImportError:
|
| 240 |
-
from torch.cuda.amp import autocast as _ac, GradScaler
|
| 241 |
-
|
| 242 |
-
def _auto_amp_dtype():
|
| 243 |
-
if DEV.type == "cuda":
|
| 244 |
-
try:
|
| 245 |
-
if torch.cuda.is_bf16_supported(): return torch.bfloat16
|
| 246 |
-
return torch.float16
|
| 247 |
-
except Exception: return torch.float16
|
| 248 |
-
return torch.float32
|
| 249 |
-
|
| 250 |
-
def amp(enabled: bool):
|
| 251 |
-
return nullcontext() if not (enabled and DEV.type == "cuda") else _ac(device_type="cuda", dtype=_auto_amp_dtype())
|
| 252 |
-
|
| 253 |
-
# ───────────────────────── Chat & Data Stream ─────────────────────────
|
| 254 |
-
def _coerce_role(r: str) -> str:
|
| 255 |
-
r = (r or "").lower()
|
| 256 |
-
if r in {"user", "human", "customer"}: return "user"
|
| 257 |
-
if r in {"assistant", "gpt", "bot"}: return "assistant"
|
| 258 |
-
if r in {"system", "context"}: return "system"
|
| 259 |
-
return r or "user"
|
| 260 |
-
|
| 261 |
-
def _render_chat_text_from_ex(ex: dict, messages_key: str, add_generation_prompt: bool) -> Optional[str]:
|
| 262 |
-
msgs = ex.get(messages_key)
|
| 263 |
-
if msgs is None:
|
| 264 |
-
for alt in ("conversations", "dialog", "turns"):
|
| 265 |
-
if isinstance(ex.get(alt), list):
|
| 266 |
-
msgs = ex[alt]; break
|
| 267 |
-
if isinstance(msgs, list) and msgs and isinstance(msgs[0], dict):
|
| 268 |
-
try:
|
| 269 |
-
norm = []
|
| 270 |
-
for m in msgs:
|
| 271 |
-
role = _coerce_role(m.get("role", "")); content = m.get("content", m.get("text", ""))
|
| 272 |
-
if not isinstance(content, str): continue
|
| 273 |
-
norm.append({"role": role, "content": content})
|
| 274 |
-
if not norm: return None
|
| 275 |
-
return tok.apply_chat_template(norm, tokenize=False, add_generation_prompt=add_generation_prompt)
|
| 276 |
-
except Exception: return None
|
| 277 |
-
for a, b in (("prompt", "response"), ("instruction", "output"), ("question", "answer")):
|
| 278 |
-
if isinstance(ex.get(a), str) and isinstance(ex.get(b), str):
|
| 279 |
-
return f"User: {ex[a]}\nAssistant: {ex[b]}"
|
| 280 |
-
return None
|
| 281 |
-
|
| 282 |
-
def _open_stream_one(ds_name: str, seed: int, streaming: bool = True):
|
| 283 |
-
dc = DownloadConfig(max_retries=5, use_etag=True, resume_download=True)
|
| 284 |
-
if ":" in ds_name: base, config = ds_name.split(":", 1)
|
| 285 |
-
else: base, config = ds_name, None
|
| 286 |
-
if not streaming:
|
| 287 |
-
print(f"[download] Downloading {ds_name} (non-streaming)...")
|
| 288 |
-
if base == "json":
|
| 289 |
-
data_files = {"train": config}
|
| 290 |
-
ds = load_dataset("json", data_files=data_files, split="train", streaming=streaming, download_config=dc)
|
| 291 |
-
else:
|
| 292 |
-
ds = load_dataset(base, config, split="train", streaming=streaming, download_config=dc) if config else \
|
| 293 |
-
load_dataset(base, split="train", streaming=streaming, download_config=dc)
|
| 294 |
-
if streaming:
|
| 295 |
-
return iter(ds.shuffle(buffer_size=1000, seed=seed))
|
| 296 |
-
else:
|
| 297 |
-
print(f"[download] Got {len(ds):,} examples. Shuffling...")
|
| 298 |
-
ds = ds.shuffle(seed=seed)
|
| 299 |
-
return iter(ds)
|
| 300 |
-
|
| 301 |
-
def token_stream(ds_names: str, target: int, seed: int = 42,
|
| 302 |
-
chat: bool = False, chat_messages_key: str = "messages",
|
| 303 |
-
sft_add_generation_prompt: bool = False, dataset_field_text: str = "text",
|
| 304 |
-
streaming: bool = True):
|
| 305 |
-
ds_names = get_hot_datasets(ds_names) # HOT LOAD
|
| 306 |
-
sources = [s.strip() for s in ds_names.split(",") if s.strip()]
|
| 307 |
-
if not sources: return
|
| 308 |
-
src_idx = 0; emitted = 0; it = None; attempts = 0; backoff_base = 2.0
|
| 309 |
-
while emitted < target:
|
| 310 |
-
try:
|
| 311 |
-
if it is None: it = _open_stream_one(sources[src_idx], seed, streaming=streaming)
|
| 312 |
-
ex = next(it)
|
| 313 |
-
text = None
|
| 314 |
-
if isinstance(ex, dict):
|
| 315 |
-
if chat:
|
| 316 |
-
text = _render_chat_text_from_ex(ex, chat_messages_key, sft_add_generation_prompt)
|
| 317 |
-
if text is None:
|
| 318 |
-
if dataset_field_text and isinstance(ex.get(dataset_field_text), str):
|
| 319 |
-
text = ex[dataset_field_text]
|
| 320 |
-
elif isinstance(ex.get("text"), str):
|
| 321 |
-
text = ex["text"]
|
| 322 |
-
if not isinstance(text, str):
|
| 323 |
-
attempts = 0; continue
|
| 324 |
-
enc = tok.encode(text)
|
| 325 |
-
if EOS is not None and (len(enc) == 0 or enc[-1] != EOS):
|
| 326 |
-
enc = enc + [EOS]
|
| 327 |
-
for t in enc:
|
| 328 |
-
yield t
|
| 329 |
-
emitted += 1
|
| 330 |
-
if emitted >= target: return
|
| 331 |
-
attempts = 0
|
| 332 |
-
except StopIteration:
|
| 333 |
-
it = None; src_idx = (src_idx + 1) % len(sources)
|
| 334 |
-
except Exception as e:
|
| 335 |
-
attempts += 1
|
| 336 |
-
sleep_s = min(60.0, backoff_base ** min(attempts, 6))
|
| 337 |
-
print(f"[stream-retry] {sources[src_idx]} error: {type(e).__name__}, sleeping {sleep_s:.1f}s")
|
| 338 |
-
time.sleep(sleep_s); it = None
|
| 339 |
-
if attempts % 5 == 0 and len(sources) > 1:
|
| 340 |
-
src_idx = (src_idx + 1) % len(sources)
|
| 341 |
-
|
| 342 |
-
# ───────────────────────── ALiBi ─────────────────────────
|
| 343 |
-
def _alibi_slopes(n_heads: int):
|
| 344 |
-
def pow2slopes(n):
|
| 345 |
-
start = 2 ** (-2 ** -(math.log2(n) - 3))
|
| 346 |
-
ratio = start
|
| 347 |
-
return [start * (ratio ** i) for i in range(n)]
|
| 348 |
-
if math.log2(n_heads).is_integer(): vals = pow2slopes(n_heads)
|
| 349 |
-
else:
|
| 350 |
-
closest = 2 ** math.floor(math.log2(n_heads))
|
| 351 |
-
vals = pow2slopes(closest)
|
| 352 |
-
extra = pow2slopes(2 * closest)
|
| 353 |
-
vals += extra[0::2][: n_heads - closest]
|
| 354 |
-
return torch.tensor(vals, device=DEV).view(1, n_heads, 1, 1)
|
| 355 |
-
|
| 356 |
-
def alibi_bias(n_heads: int, n_tokens: int):
|
| 357 |
-
i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
|
| 358 |
-
j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
|
| 359 |
-
dist = (j - i).clamp_min(0)
|
| 360 |
-
return -_alibi_slopes(n_heads) * dist
|
| 361 |
-
|
| 362 |
-
# ───────────────────────── Model components ─────────────────────────
|
| 363 |
-
class TuneableAttentionMHA(nn.Module):
|
| 364 |
-
def __init__(self, d: int, h: int, r: int, use_relpos: bool = True):
|
| 365 |
-
super().__init__()
|
| 366 |
-
assert d % h == 0
|
| 367 |
-
self.h, self.dk, self.r = h, d // h, r
|
| 368 |
-
self.use_relpos = use_relpos
|
| 369 |
-
self.q = nn.Linear(d, d, bias=False)
|
| 370 |
-
self.k = nn.Linear(d, d, bias=False)
|
| 371 |
-
self.v = nn.Linear(d, d, bias=False)
|
| 372 |
-
self.U = nn.Parameter(torch.randn(self.dk, r))
|
| 373 |
-
nn.init.orthogonal_(self.U)
|
| 374 |
-
self.proj = nn.Linear(h * self.dk, d, bias=False)
|
| 375 |
-
self.drop = nn.Dropout(0.1)
|
| 376 |
-
|
| 377 |
-
def _proj_qk(self, x):
|
| 378 |
-
B, N, _ = x.shape
|
| 379 |
-
return (x.view(B, N, self.h, self.dk).transpose(1, 2) @ self.U)
|
| 380 |
-
|
| 381 |
-
def _reshape_v(self, x):
|
| 382 |
-
B, N, _ = x.shape
|
| 383 |
-
return x.view(B, N, self.h, self.dk).transpose(1, 2)
|
| 384 |
-
|
| 385 |
-
def forward(self, x, mask=None, rel_bias_tokens=None, kv_cache=None, use_cache=False):
|
| 386 |
-
q = self._proj_qk(self.q(x))
|
| 387 |
-
k_new = self._proj_qk(self.k(x))
|
| 388 |
-
v_new = self._reshape_v(self.v(x))
|
| 389 |
-
if kv_cache is None:
|
| 390 |
-
k, v = k_new, v_new
|
| 391 |
-
else:
|
| 392 |
-
k_cached, v_cached = kv_cache
|
| 393 |
-
if use_cache:
|
| 394 |
-
k = torch.cat([k_cached, k_new], dim=2)
|
| 395 |
-
v = torch.cat([v_cached, v_new], dim=2)
|
| 396 |
-
else:
|
| 397 |
-
k, v = k_new, v_new
|
| 398 |
-
att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
|
| 399 |
-
if self.use_relpos and rel_bias_tokens is not None:
|
| 400 |
-
att = att + alibi_bias(self.h, rel_bias_tokens)[:, :, -q.size(2):, :]
|
| 401 |
-
if mask is not None:
|
| 402 |
-
att = att + mask
|
| 403 |
-
z = (att.softmax(-1) @ v).transpose(1, 2).reshape(x.size(0), x.size(1), -1)
|
| 404 |
-
out = self.drop(self.proj(z))
|
| 405 |
-
return (out, (k, v)) if use_cache else out
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
class Block(nn.Module):
|
| 409 |
-
def __init__(self, d: int, h: int, r: int):
|
| 410 |
-
super().__init__()
|
| 411 |
-
self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
|
| 412 |
-
self.mha = TuneableAttentionMHA(d, h, r)
|
| 413 |
-
self.ff = nn.Sequential(nn.Linear(d, 4 * d), nn.ReLU(), nn.Linear(4 * d, d))
|
| 414 |
-
|
| 415 |
-
def forward(self, x, mask, kv=None, use_cache=False, total_seq_len=None):
|
| 416 |
-
if use_cache:
|
| 417 |
-
y, new_kv = self.mha(self.ln1(x), mask, rel_bias_tokens=total_seq_len, kv_cache=kv, use_cache=True)
|
| 418 |
-
x = x + y + self.ff(self.ln2(x + y))
|
| 419 |
-
return x, new_kv
|
| 420 |
-
else:
|
| 421 |
-
n = x.size(1)
|
| 422 |
-
x = x + self.mha(self.ln1(x), mask, rel_bias_tokens=n)
|
| 423 |
-
return x + self.ff(self.ln2(x))
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
class Encoder(nn.Module):
|
| 427 |
-
def __init__(self, cfg, tie_weights: bool = False):
|
| 428 |
-
super().__init__()
|
| 429 |
-
d, l, h, r = cfg["d"], cfg["layers"], cfg["heads"], cfg["rank"]
|
| 430 |
-
self.emb = nn.Embedding(VOCAB, d)
|
| 431 |
-
self.blocks = nn.ModuleList([Block(d, h, r) for _ in range(l)])
|
| 432 |
-
self.ln = nn.LayerNorm(d)
|
| 433 |
-
self.tie_weights = tie_weights
|
| 434 |
-
|
| 435 |
-
def forward(self, ids, mask, kv_caches=None, use_cache=False, total_seq_len=None):
|
| 436 |
-
x = self.emb(ids)
|
| 437 |
-
if not use_cache:
|
| 438 |
-
for blk in self.blocks:
|
| 439 |
-
x = blk(x, mask)
|
| 440 |
-
return self.ln(x)
|
| 441 |
-
new_kvs = []
|
| 442 |
-
for i, blk in enumerate(self.blocks):
|
| 443 |
-
kv = kv_caches[i] if kv_caches else None
|
| 444 |
-
x, kv_out = blk(x, mask, kv, use_cache=True, total_seq_len=total_seq_len)
|
| 445 |
-
new_kvs.append(kv_out)
|
| 446 |
-
return self.ln(x), new_kvs
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
class ARHead(nn.Module):
|
| 450 |
-
def __init__(self, d, tie_weights: bool = False, embedding_weight: nn.Parameter = None):
|
| 451 |
-
super().__init__()
|
| 452 |
-
self.tie_weights = tie_weights
|
| 453 |
-
if tie_weights and embedding_weight is not None:
|
| 454 |
-
self.proj = nn.Linear(d, VOCAB, bias=False)
|
| 455 |
-
self.proj.weight = embedding_weight
|
| 456 |
-
else:
|
| 457 |
-
self.proj = nn.Linear(d, VOCAB)
|
| 458 |
-
|
| 459 |
-
def forward(self, h):
|
| 460 |
-
return self.proj(h)
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
class SATHead(nn.Module):
|
| 464 |
-
def __init__(self, d, mode="var"):
|
| 465 |
-
super().__init__()
|
| 466 |
-
self.proj = nn.Linear(d, VOCAB)
|
| 467 |
-
self.gate = nn.Linear(d, 2) if mode == "var" else None
|
| 468 |
-
def forward(self, h_last):
|
| 469 |
-
return self.proj(h_last), (self.gate(h_last[:, 0]) if self.gate else None)
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
# ───────────────────────── Masks ─────────────────────────
|
| 473 |
-
def causal_mask(n):
|
| 474 |
-
return torch.triu(torch.full((1, 1, n, n), float("-inf"), device=DEV), 1)
|
| 475 |
-
|
| 476 |
-
def sat_mask(n, block=SAT_BLOCK):
|
| 477 |
-
idx = torch.arange(n, device=DEV)
|
| 478 |
-
grp = idx.unsqueeze(0) // block
|
| 479 |
-
allow = (grp.T == grp) | (grp.T > grp)
|
| 480 |
-
return torch.where(allow, 0.0, float("-inf")).unsqueeze(0).unsqueeze(0)
|
| 481 |
-
|
| 482 |
-
def sat_mask_cached(new_len: int, cached_len: int, block=SAT_BLOCK):
|
| 483 |
-
total_len = cached_len + new_len
|
| 484 |
-
mask = torch.zeros((1, 1, new_len, total_len), device=DEV)
|
| 485 |
-
return mask
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
# ───────────────────────── Checkpoint helpers ─────────────────────────
|
| 489 |
-
|
| 490 |
-
# ───────────────────────── Delta Checkpoints (weight-only, async) ─────────────────────────
|
| 491 |
-
_delta_lock = threading.Lock()
|
| 492 |
-
_delta_thread: Optional[threading.Thread] = None
|
| 493 |
-
|
| 494 |
-
def _sha256_file(path: pathlib.Path) -> str:
|
| 495 |
-
"""Compute SHA256 of a file for integrity verification."""
|
| 496 |
-
h = hashlib.sha256()
|
| 497 |
-
with open(path, "rb") as f:
|
| 498 |
-
for chunk in iter(lambda: f.read(1 << 20), b""):
|
| 499 |
-
h.update(chunk)
|
| 500 |
-
return h.hexdigest()
|
| 501 |
-
|
| 502 |
-
def _do_delta_save(tensors: dict, path: pathlib.Path, meta: dict):
|
| 503 |
-
"""Background worker: write weight-only checkpoint + checksum."""
|
| 504 |
-
try:
|
| 505 |
-
path.parent.mkdir(exist_ok=True, parents=True)
|
| 506 |
-
tmp = path.with_suffix(path.suffix + ".dtmp")
|
| 507 |
-
torch.save({"weights": tensors, **meta}, tmp, _use_new_zipfile_serialization=False)
|
| 508 |
-
digest = _sha256_file(tmp)
|
| 509 |
-
tmp.replace(path)
|
| 510 |
-
# Write sidecar checksum
|
| 511 |
-
path.with_suffix(".sha256").write_text(f"{digest} {path.name}\n")
|
| 512 |
-
print(f" [delta] saved {path.name} ({digest[:12]}...)")
|
| 513 |
-
except Exception as e:
|
| 514 |
-
print(f" [delta] FAILED {path.name}: {e}")
|
| 515 |
-
|
| 516 |
-
def save_delta(core, ar_h, sat_h, step: int, seen_tok: int, save_dir: pathlib.Path, phase_name: str):
|
| 517 |
-
"""Save weight-only delta in background thread. Non-blocking."""
|
| 518 |
-
global _delta_thread
|
| 519 |
-
# Wait for any previous delta write to finish
|
| 520 |
-
if _delta_thread is not None and _delta_thread.is_alive():
|
| 521 |
-
_delta_thread.join(timeout=60)
|
| 522 |
-
# Snapshot weights to CPU (detach from GPU graph)
|
| 523 |
-
with _delta_lock:
|
| 524 |
-
tensors = {
|
| 525 |
-
"core": {k: v.detach().cpu() for k, v in core.state_dict().items()},
|
| 526 |
-
"ar": {k: v.detach().cpu() for k, v in ar_h.state_dict().items()},
|
| 527 |
-
"sat": {k: v.detach().cpu() for k, v in sat_h.state_dict().items()},
|
| 528 |
-
}
|
| 529 |
-
meta = {"step": step, "seen_tok": seen_tok, "wall_time": time.time(), "delta": True}
|
| 530 |
-
path = save_dir / f"{phase_name}_delta_step{step:08d}.pt"
|
| 531 |
-
_delta_thread = threading.Thread(target=_do_delta_save, args=(tensors, path, meta), daemon=True)
|
| 532 |
-
_delta_thread.start()
|
| 533 |
-
|
| 534 |
-
def _prune_deltas(save_dir: pathlib.Path, phase_name: str, max_deltas: int):
|
| 535 |
-
"""Keep only the most recent max_deltas delta files."""
|
| 536 |
-
if max_deltas is None or max_deltas <= 0:
|
| 537 |
-
return
|
| 538 |
-
try:
|
| 539 |
-
pattern = f"{phase_name}_delta_step*.pt"
|
| 540 |
-
deltas = sorted(
|
| 541 |
-
[p for p in save_dir.glob(pattern) if p.stat().st_size > 0],
|
| 542 |
-
key=lambda p: p.stat().st_mtime
|
| 543 |
-
)
|
| 544 |
-
excess = len(deltas) - max_deltas
|
| 545 |
-
if excess > 0:
|
| 546 |
-
for p in deltas[:excess]:
|
| 547 |
-
try:
|
| 548 |
-
p.unlink()
|
| 549 |
-
sha = p.with_suffix(".sha256")
|
| 550 |
-
if sha.exists(): sha.unlink()
|
| 551 |
-
print(f" [delta-prune] deleted {p.name}")
|
| 552 |
-
except Exception:
|
| 553 |
-
pass
|
| 554 |
-
except Exception as e:
|
| 555 |
-
print(f" [delta-prune] error: {e}")
|
| 556 |
-
|
| 557 |
-
def load_delta(path: pathlib.Path, core, ar_h, sat_h):
|
| 558 |
-
"""Load weight-only delta. Returns (step, seen_tok) or raises."""
|
| 559 |
-
# Verify checksum if sidecar exists
|
| 560 |
-
sha_path = path.with_suffix(".sha256")
|
| 561 |
-
if sha_path.exists():
|
| 562 |
-
expected = sha_path.read_text().split()[0]
|
| 563 |
-
actual = _sha256_file(path)
|
| 564 |
-
if expected != actual:
|
| 565 |
-
raise ValueError(f"Checksum mismatch for {path.name}: expected {expected[:12]}... got {actual[:12]}...")
|
| 566 |
-
print(f" [delta] checksum OK for {path.name}")
|
| 567 |
-
ck = torch.load(path, map_location="cpu", weights_only=False)
|
| 568 |
-
if not ck.get("delta"):
|
| 569 |
-
raise ValueError(f"{path.name} is not a delta checkpoint")
|
| 570 |
-
core.load_state_dict(ck["weights"]["core"])
|
| 571 |
-
ar_h.load_state_dict(ck["weights"]["ar"])
|
| 572 |
-
sat_h.load_state_dict(ck["weights"]["sat"])
|
| 573 |
-
return ck.get("step", 0), ck.get("seen_tok", 0)
|
| 574 |
-
|
| 575 |
-
def _flush_delta():
|
| 576 |
-
"""Wait for any in-flight delta save to complete."""
|
| 577 |
-
global _delta_thread
|
| 578 |
-
if _delta_thread is not None and _delta_thread.is_alive():
|
| 579 |
-
print(" [delta] flushing in-flight write...")
|
| 580 |
-
_delta_thread.join(timeout=120)
|
| 581 |
-
|
| 582 |
-
def save_ckpt(path: pathlib.Path, core, ar_h, sat_h, opt, scaler, meta):
|
| 583 |
-
path.parent.mkdir(exist_ok=True, parents=True)
|
| 584 |
-
tmp = path.with_suffix(path.suffix + ".tmp")
|
| 585 |
-
state = {
|
| 586 |
-
"core": core.state_dict(), "ar": ar_h.state_dict(), "sat": sat_h.state_dict(),
|
| 587 |
-
"opt": opt.state_dict(), "scaler": scaler.state_dict(),
|
| 588 |
-
"cfg": meta.get("cfg"), "tokenizer_id": TOKENIZER_ID,
|
| 589 |
-
"tie_weights": meta.get("tie_weights", False),
|
| 590 |
-
**{k: v for k, v in meta.items() if k not in ("cfg", "tie_weights")}
|
| 591 |
-
}
|
| 592 |
-
torch.save(state, tmp, _use_new_zipfile_serialization=False)
|
| 593 |
-
tmp.replace(path)
|
| 594 |
-
(path.parent / "latest.json").write_text(json.dumps({"path": str(path), "step": meta["step"]}))
|
| 595 |
-
print(f"\n✓ saved checkpoint {path.name}")
|
| 596 |
-
|
| 597 |
-
def load_ckpt(path, core, ar_h, sat_h, opt, scaler):
|
| 598 |
-
p = _resolve_ckpt(path) or path
|
| 599 |
-
ck = _try_load(p, map_location="cpu")
|
| 600 |
-
if ck is None: raise FileNotFoundError(f"No valid checkpoint at {p}")
|
| 601 |
-
core.load_state_dict(ck["core"])
|
| 602 |
-
ar_h.load_state_dict(ck["ar"])
|
| 603 |
-
sat_h.load_state_dict(ck["sat"])
|
| 604 |
-
opt.load_state_dict(ck["opt"])
|
| 605 |
-
scaler.load_state_dict(ck["scaler"])
|
| 606 |
-
return ck.get("step", 0), ck.get("seen_tok", 0), ck.get("wall_time", time.time())
|
| 607 |
-
|
| 608 |
-
def _safe_load_any(path: pathlib.Path, tgt: nn.Module, key: str | None = None):
|
| 609 |
-
p = _resolve_ckpt(path) or path
|
| 610 |
-
if not p.exists(): return 0
|
| 611 |
-
ck = _try_load(p, map_location="cpu")
|
| 612 |
-
if ck is None: return 0
|
| 613 |
-
sd = ck.get(key, ck) if key else ck
|
| 614 |
-
if isinstance(sd, dict) and "state_dict" in sd: sd = sd["state_dict"]
|
| 615 |
-
tgt_sd = tgt.state_dict()
|
| 616 |
-
filt = {k: v for k, v in sd.items() if k in tgt_sd and v.shape == tgt_sd[k].shape}
|
| 617 |
-
if filt: tgt.load_state_dict(filt, strict=False)
|
| 618 |
-
return len(filt)
|
| 619 |
-
|
| 620 |
-
def infer_cfg_from_ckpt(path: pathlib.Path):
|
| 621 |
-
p = _resolve_ckpt(path) or path
|
| 622 |
-
if not p.exists(): return None
|
| 623 |
-
sd = _try_load(p, map_location="cpu")
|
| 624 |
-
if sd is None: return None
|
| 625 |
-
if "cfg" in sd: return dict(sd["cfg"])
|
| 626 |
-
return None
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
# ───────────────────────── Training Logic ─────────────────────────
|
| 630 |
-
def _parse_grow_plan(s: str) -> List[int]:
|
| 631 |
-
return sorted(set([int(x.strip()) for x in s.split(",") if x.strip() and int(x.strip()) >= 128]))
|
| 632 |
-
|
| 633 |
-
def _count_enabled_params(*modules) -> int:
|
| 634 |
-
seen_data_ptrs = set()
|
| 635 |
-
total = 0
|
| 636 |
-
for m in modules:
|
| 637 |
-
if m is None:
|
| 638 |
-
continue
|
| 639 |
-
for p in m.parameters():
|
| 640 |
-
if p.data_ptr() not in seen_data_ptrs:
|
| 641 |
-
seen_data_ptrs.add(p.data_ptr())
|
| 642 |
-
total += p.numel()
|
| 643 |
-
return total
|
| 644 |
-
|
| 645 |
-
def _phase_freeze(core: nn.Module, *, freeze_core: bool, unfreeze_ln: bool, train_emb: bool):
|
| 646 |
-
for p in core.parameters(): p.requires_grad = not freeze_core
|
| 647 |
-
if freeze_core:
|
| 648 |
-
if unfreeze_ln:
|
| 649 |
-
for blk in core.blocks:
|
| 650 |
-
for p in blk.ln1.parameters(): p.requires_grad = True
|
| 651 |
-
for p in blk.ln2.parameters(): p.requires_grad = True
|
| 652 |
-
for p in core.ln.parameters(): p.requires_grad = True
|
| 653 |
-
if train_emb:
|
| 654 |
-
for p in core.emb.parameters(): p.requires_grad = True
|
| 655 |
-
|
| 656 |
-
def _train_phase(
|
| 657 |
-
args, phase_name: str,
|
| 658 |
-
core, ar_h, sat_h, opt, scaler,
|
| 659 |
-
start_step, seen_tok, resume_wall_time,
|
| 660 |
-
cfg, source, steps, block_size, batch_size,
|
| 661 |
-
chat_cfg: dict,
|
| 662 |
-
max_ckpts: int,
|
| 663 |
-
target_tokens_override: Optional[int] = None,
|
| 664 |
-
tie_weights: bool = False,
|
| 665 |
-
streaming: bool = True
|
| 666 |
-
):
|
| 667 |
-
BLOCK = block_size
|
| 668 |
-
BATCH = batch_size
|
| 669 |
-
if target_tokens_override is not None:
|
| 670 |
-
target_tokens = target_tokens_override
|
| 671 |
-
else:
|
| 672 |
-
ratio = 51.2 if args.chilla_max_double else 25
|
| 673 |
-
param_count = _count_enabled_params(core, ar_h, sat_h)
|
| 674 |
-
target_tokens = int(ratio * param_count)
|
| 675 |
-
if steps:
|
| 676 |
-
phase_target_tokens = steps * BLOCK * BATCH
|
| 677 |
-
total_tokens_needed = seen_tok + phase_target_tokens
|
| 678 |
-
else:
|
| 679 |
-
total_tokens_needed = target_tokens
|
| 680 |
-
if total_tokens_needed <= seen_tok:
|
| 681 |
-
print(f"[{phase_name}] target {total_tokens_needed} already reached.")
|
| 682 |
-
return start_step, seen_tok, resume_wall_time
|
| 683 |
-
stream = token_stream(
|
| 684 |
-
source, total_tokens_needed, seed=42,
|
| 685 |
-
chat=chat_cfg.get("chat", False),
|
| 686 |
-
chat_messages_key=chat_cfg.get("key", "messages"),
|
| 687 |
-
sft_add_generation_prompt=chat_cfg.get("gen_prompt", False),
|
| 688 |
-
dataset_field_text=chat_cfg.get("text_field", "text"),
|
| 689 |
-
streaming=streaming
|
| 690 |
-
)
|
| 691 |
-
ce_tok = nn.CrossEntropyLoss(label_smoothing=0.1)
|
| 692 |
-
ce_gate = nn.CrossEntropyLoss()
|
| 693 |
-
pbar = SafeProgress(total=total_tokens_needed, initial=seen_tok, unit="tok")
|
| 694 |
-
grow_plan = _parse_grow_plan(args.grow_plan) if args.auto_grow else []
|
| 695 |
-
buf: list[int] = []
|
| 696 |
-
batch_accum: list[list[int]] = []
|
| 697 |
-
step = start_step
|
| 698 |
-
steps_since_last_grow = 0
|
| 699 |
-
oom_retries = 0
|
| 700 |
-
MAX_OOM_RETRIES = 2
|
| 701 |
-
now_wall = time.time()
|
| 702 |
-
last_save_mono = time.monotonic() - (now_wall - (resume_wall_time or now_wall))
|
| 703 |
-
last_delta_step = start_step
|
| 704 |
-
print(f"[{phase_name}] Starting. Goal: {total_tokens_needed:,} tokens. Batch={BATCH}, Block={BLOCK}")
|
| 705 |
-
print(f"[{phase_name}] AR_ONLY={args.ar_only}, TIE_WEIGHTS={tie_weights}, STREAMING={streaming}")
|
| 706 |
-
while seen_tok < total_tokens_needed:
|
| 707 |
-
try:
|
| 708 |
-
while len(buf) < BLOCK:
|
| 709 |
-
buf.append(next(stream))
|
| 710 |
-
except StopIteration:
|
| 711 |
-
break
|
| 712 |
-
seq = buf[:BLOCK]
|
| 713 |
-
buf = buf[BLOCK:]
|
| 714 |
-
batch_accum.append(seq)
|
| 715 |
-
if len(batch_accum) < BATCH:
|
| 716 |
-
continue
|
| 717 |
-
ids = torch.tensor(batch_accum, device=DEV)
|
| 718 |
-
batch_accum = []
|
| 719 |
-
tgt_ar = ids.clone()
|
| 720 |
-
try:
|
| 721 |
-
with amp(args.amp):
|
| 722 |
-
h_ar = core(ids, causal_mask(ids.size(1)))
|
| 723 |
-
logits_ar = ar_h(h_ar)[:, :-1]
|
| 724 |
-
loss_ar = ce_tok(logits_ar.reshape(-1, VOCAB), tgt_ar[:, 1:].reshape(-1))
|
| 725 |
-
if args.ar_only:
|
| 726 |
-
loss = loss_ar
|
| 727 |
-
else:
|
| 728 |
-
h_sat = core(ids, sat_mask(ids.size(1)))
|
| 729 |
-
logits_sat, gate = sat_h(h_sat[:, -SAT_BLOCK:])
|
| 730 |
-
tgt_sat = ids[:, 1:SAT_BLOCK+1]
|
| 731 |
-
loss_sat = ce_tok(logits_sat.reshape(-1, VOCAB), tgt_sat.reshape(-1))
|
| 732 |
-
if gate is not None:
|
| 733 |
-
loss_sat += EMIT_LAMBDA * ce_gate(gate, torch.ones(ids.size(0), device=DEV, dtype=torch.long))
|
| 734 |
-
loss = loss_ar + loss_sat
|
| 735 |
-
scaler.scale(loss).backward()
|
| 736 |
-
scaler.unscale_(opt)
|
| 737 |
-
nn.utils.clip_grad_norm_(core.parameters(), 1.0)
|
| 738 |
-
scaler.step(opt)
|
| 739 |
-
scaler.update()
|
| 740 |
-
opt.zero_grad(set_to_none=True)
|
| 741 |
-
except RuntimeError as e:
|
| 742 |
-
msg = str(e).lower()
|
| 743 |
-
if "out of memory" in msg or "cuda error" in msg:
|
| 744 |
-
batch_accum = []
|
| 745 |
-
opt.zero_grad(set_to_none=True)
|
| 746 |
-
if DEV.type == "cuda":
|
| 747 |
-
torch.cuda.empty_cache()
|
| 748 |
-
torch.cuda.synchronize()
|
| 749 |
-
oom_retries += 1
|
| 750 |
-
if oom_retries <= MAX_OOM_RETRIES:
|
| 751 |
-
print(f"\n[{phase_name} OOM] Retry {oom_retries}/{MAX_OOM_RETRIES} at Batch={BATCH}, clearing VRAM...")
|
| 752 |
-
time.sleep(2)
|
| 753 |
-
continue
|
| 754 |
-
oom_retries = 0
|
| 755 |
-
if BATCH > 1:
|
| 756 |
-
print(f"\n[{phase_name} OOM] Reducing Batch: {BATCH} -> {BATCH - 1} (after {MAX_OOM_RETRIES} retries)")
|
| 757 |
-
BATCH -= 1
|
| 758 |
-
time.sleep(2)
|
| 759 |
-
else:
|
| 760 |
-
new_block = max(128, BLOCK // 2)
|
| 761 |
-
print(f"\n[{phase_name} OOM] Reducing Block: {BLOCK} -> {new_block}")
|
| 762 |
-
BLOCK = new_block
|
| 763 |
-
time.sleep(2)
|
| 764 |
-
steps_since_last_grow = 0
|
| 765 |
-
continue
|
| 766 |
-
raise
|
| 767 |
-
step += 1
|
| 768 |
-
oom_retries = 0
|
| 769 |
-
toks_processed = BLOCK * BATCH
|
| 770 |
-
seen_tok += toks_processed
|
| 771 |
-
pbar.update(toks_processed)
|
| 772 |
-
pbar.set_postfix(loss=f"{loss.item():.3f}", B=BATCH, L=BLOCK)
|
| 773 |
-
if args.save_every_sec > 0:
|
| 774 |
-
now_mono = time.monotonic()
|
| 775 |
-
if now_mono - last_save_mono >= args.save_every_sec:
|
| 776 |
-
ck_name = f"{phase_name}_step{step:08d}.pt"
|
| 777 |
-
_flush_delta() # wait for any in-flight delta before full save
|
| 778 |
-
_prune_checkpoints(pathlib.Path(args.save_dir), phase_name, max_ckpts)
|
| 779 |
-
save_ckpt(pathlib.Path(args.save_dir) / ck_name, core, ar_h, sat_h, opt, scaler,
|
| 780 |
-
meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(), "tie_weights": tie_weights})
|
| 781 |
-
last_save_mono = now_mono
|
| 782 |
-
# Prune old deltas after a full save (they're superseded)
|
| 783 |
-
_prune_deltas(pathlib.Path(args.save_dir), phase_name, args.delta_max_keep)
|
| 784 |
-
last_delta_step = step # reset delta counter after full save
|
| 785 |
-
# ── Delta checkpoint (step-based, weight-only, async) ──
|
| 786 |
-
if args.delta_every_steps > 0 and (step - last_delta_step) >= args.delta_every_steps:
|
| 787 |
-
_prune_deltas(pathlib.Path(args.save_dir), phase_name, args.delta_max_keep)
|
| 788 |
-
save_delta(core, ar_h, sat_h, step, seen_tok, pathlib.Path(args.save_dir), phase_name)
|
| 789 |
-
last_delta_step = step
|
| 790 |
-
if args.auto_grow:
|
| 791 |
-
steps_since_last_grow += 1
|
| 792 |
-
if steps_since_last_grow >= args.grow_every_steps:
|
| 793 |
-
steps_since_last_grow = 0
|
| 794 |
-
try:
|
| 795 |
-
idx = grow_plan.index(BLOCK)
|
| 796 |
-
if idx + 1 < len(grow_plan):
|
| 797 |
-
BLOCK = grow_plan[idx + 1]
|
| 798 |
-
print(f"[{phase_name} Grow] Block -> {BLOCK}")
|
| 799 |
-
if DEV.type == "cuda": torch.cuda.empty_cache()
|
| 800 |
-
except ValueError:
|
| 801 |
-
grow_plan = sorted(set(grow_plan + [BLOCK]))
|
| 802 |
-
pbar.close()
|
| 803 |
-
_flush_delta() # ensure any in-flight delta completes before final save
|
| 804 |
-
save_ckpt(pathlib.Path(args.save_dir) / f"{phase_name}_final.pt", core, ar_h, sat_h, opt, scaler,
|
| 805 |
-
meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(), "tie_weights": tie_weights})
|
| 806 |
-
return step, seen_tok, time.time()
|
| 807 |
-
|
| 808 |
-
|
| 809 |
-
# ───────────────────────── Main Orchestrator ─────────────────────────
|
| 810 |
-
def train(args):
|
| 811 |
-
cfg = PRESETS[args.preset].copy()
|
| 812 |
-
tie_weights = args.tie_weights
|
| 813 |
-
print_expansion_info(cfg, tie_weights)
|
| 814 |
-
if not args.fresh:
|
| 815 |
-
src_probe = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
|
| 816 |
-
prev_cfg = infer_cfg_from_ckpt(src_probe)
|
| 817 |
-
else: prev_cfg = None
|
| 818 |
-
if prev_cfg:
|
| 819 |
-
cfg.update({k: v for k, v in prev_cfg.items() if k in cfg})
|
| 820 |
-
if args.x2 and prev_cfg.get("layers"): cfg["layers"] = max(cfg["layers"], prev_cfg["layers"] * 2)
|
| 821 |
-
if args.rank: cfg["rank"] = args.rank
|
| 822 |
-
if args.x2 and not prev_cfg: cfg["layers"] *= 2
|
| 823 |
-
print(f"Config: {cfg}")
|
| 824 |
-
core = Encoder(cfg, tie_weights=tie_weights).to(DEV)
|
| 825 |
-
ar_h = ARHead(cfg["d"], tie_weights=tie_weights, embedding_weight=core.emb.weight if tie_weights else None).to(DEV)
|
| 826 |
-
sat_h = SATHead(cfg["d"], mode="var").to(DEV)
|
| 827 |
-
total_params = _count_enabled_params(core, ar_h, sat_h)
|
| 828 |
-
print(f"Total parameters: {total_params:,}")
|
| 829 |
-
if tie_weights:
|
| 830 |
-
print(f"{Colors.WARN}[weight-tying] Embedding and LM head share weights{Colors.RESET}")
|
| 831 |
-
if not args.fresh:
|
| 832 |
-
src = pathlib.Path(args.warmstart_from) if args.warmstart_from else pathlib.Path(args.save_dir) / "final.pt"
|
| 833 |
-
src = _resolve_ckpt(src)
|
| 834 |
-
if src:
|
| 835 |
-
loaded = _safe_load_any(src, core, key="core")
|
| 836 |
-
_safe_load_any(src, ar_h, key="ar")
|
| 837 |
-
_safe_load_any(src, sat_h, key="sat")
|
| 838 |
-
if loaded: print(f"Warm-start loaded from {src}")
|
| 839 |
-
_phase_freeze(core, freeze_core=args.freeze_core, unfreeze_ln=args.unfreeze_ln, train_emb=args.train_emb)
|
| 840 |
-
opt = torch.optim.AdamW([
|
| 841 |
-
{"params": [p for p in core.parameters() if p.requires_grad], "lr": args.lr_core},
|
| 842 |
-
{"params": ar_h.parameters(), "lr": args.lr_head},
|
| 843 |
-
{"params": sat_h.parameters(), "lr": args.lr_head},
|
| 844 |
-
])
|
| 845 |
-
scaler = GradScaler(enabled=(args.amp and DEV.type == "cuda"))
|
| 846 |
-
start_step, seen_tok, last_wall = 0, 0, None
|
| 847 |
-
if args.resume_delta and not args.fresh:
|
| 848 |
-
delta_step, delta_tok = load_delta(pathlib.Path(args.resume_delta), core, ar_h, sat_h)
|
| 849 |
-
start_step, seen_tok, last_wall = delta_step, delta_tok, None
|
| 850 |
-
print(f"Resumed from DELTA at step {start_step} (optimizer state reset — momentum rebuilds in ~100 steps)")
|
| 851 |
-
elif args.resume and not args.fresh:
|
| 852 |
-
start_step, seen_tok, last_wall = load_ckpt(pathlib.Path(args.resume), core, ar_h, sat_h, opt, scaler)
|
| 853 |
-
print(f"Resumed from step {start_step}")
|
| 854 |
-
# torch.compile AFTER loading checkpoint (key names differ)
|
| 855 |
-
if args.compile:
|
| 856 |
-
print("[torch.compile] Compiling model...")
|
| 857 |
-
core = torch.compile(core, mode="reduce-overhead")
|
| 858 |
-
ar_h = torch.compile(ar_h, mode="reduce-overhead")
|
| 859 |
-
sat_h = torch.compile(sat_h, mode="reduce-overhead")
|
| 860 |
-
print("[torch.compile] Done.")
|
| 861 |
-
step, seen_tok, last_wall = _train_phase(
|
| 862 |
-
args, "pretrain", core, ar_h, sat_h, opt, scaler,
|
| 863 |
-
start_step, seen_tok, last_wall, cfg,
|
| 864 |
-
args.source, args.steps,
|
| 865 |
-
args.block or DEFAULT_BLOCK,
|
| 866 |
-
args.batch_size or DEFAULT_BATCH,
|
| 867 |
-
chat_cfg={"chat": args.chat, "key": args.chat_messages_key, "gen_prompt": args.sft_add_generation_prompt, "text_field": args.dataset_field_text},
|
| 868 |
-
max_ckpts=args.max_ckpts,
|
| 869 |
-
target_tokens_override=args.target_tokens,
|
| 870 |
-
tie_weights=tie_weights
|
| 871 |
-
)
|
| 872 |
-
if (not args.after_sft_source) and (args.after_sft_steps and args.after_sft_steps > 0):
|
| 873 |
-
args.after_sft_source = DEFAULT_AFTER_SFT_SOURCES
|
| 874 |
-
args.after_sft_chat = True
|
| 875 |
-
if args.after_sft_add_generation_prompt is None: args.after_sft_add_generation_prompt = True
|
| 876 |
-
if not args.after_sft_block: args.after_sft_block = DEFAULT_AFTER_SFT_BLOCK
|
| 877 |
-
if args.after_sft_source and args.after_sft_steps and args.after_sft_steps > 0:
|
| 878 |
-
print("\n[Orchestrator] Starting Post-Pretraining SFT Phase...")
|
| 879 |
-
_phase_freeze(core,
|
| 880 |
-
freeze_core=args.after_sft_freeze_core,
|
| 881 |
-
unfreeze_ln=args.after_sft_unfreeze_ln,
|
| 882 |
-
train_emb=args.after_sft_train_emb)
|
| 883 |
-
opt = torch.optim.AdamW([
|
| 884 |
-
{"params": [p for p in core.parameters() if p.requires_grad], "lr": args.after_sft_lr_core or args.lr_core},
|
| 885 |
-
{"params": ar_h.parameters(), "lr": args.after_sft_lr_head or args.lr_head},
|
| 886 |
-
{"params": sat_h.parameters(), "lr": args.after_sft_lr_head or args.lr_head},
|
| 887 |
-
])
|
| 888 |
-
step, seen_tok, last_wall = _train_phase(
|
| 889 |
-
args, "sft", core, ar_h, sat_h, opt, scaler,
|
| 890 |
-
step, seen_tok, last_wall, cfg,
|
| 891 |
-
args.after_sft_source, args.after_sft_steps,
|
| 892 |
-
args.after_sft_block or DEFAULT_AFTER_SFT_BLOCK,
|
| 893 |
-
args.batch_size or DEFAULT_BATCH,
|
| 894 |
-
chat_cfg={
|
| 895 |
-
"chat": args.after_sft_chat,
|
| 896 |
-
"key": args.after_sft_chat_messages_key,
|
| 897 |
-
"gen_prompt": args.after_sft_add_generation_prompt if args.after_sft_add_generation_prompt is not None else args.sft_add_generation_prompt,
|
| 898 |
-
"text_field": args.after_sft_dataset_field_text
|
| 899 |
-
},
|
| 900 |
-
max_ckpts=args.max_ckpts,
|
| 901 |
-
target_tokens_override=None,
|
| 902 |
-
tie_weights=tie_weights,
|
| 903 |
-
streaming=False
|
| 904 |
-
)
|
| 905 |
-
save_ckpt(pathlib.Path(args.save_dir) / "final.pt", core, ar_h, sat_h, opt, scaler,
|
| 906 |
-
meta={"cfg": cfg, "step": step, "seen_tok": seen_tok, "wall_time": time.time(), "tie_weights": tie_weights})
|
| 907 |
-
print("🎉 All Training Complete")
|
| 908 |
-
|
| 909 |
-
|
| 910 |
-
# ───────────────────────── Sampling ─────────────────────────
|
| 911 |
-
def _apply_penalties(logits, ids, n, rep_p, pres_p, freq_p):
|
| 912 |
-
if ids.numel() == 0: return logits
|
| 913 |
-
hist = ids[0, -n:].long() if n > 0 else ids[0].long()
|
| 914 |
-
uniq, counts = torch.unique(hist, return_counts=True)
|
| 915 |
-
if pres_p or freq_p:
|
| 916 |
-
logits[..., uniq] -= (pres_p + freq_p * counts.float())
|
| 917 |
-
if rep_p != 1.0:
|
| 918 |
-
sel = logits[..., uniq]
|
| 919 |
-
logits[..., uniq] = torch.where(sel > 0, sel / rep_p, sel * rep_p)
|
| 920 |
-
return logits
|
| 921 |
-
|
| 922 |
-
def _sample(logits, T, top_k, top_p, min_p, greedy):
|
| 923 |
-
if greedy: return logits.argmax(-1, keepdim=True)
|
| 924 |
-
probs = (logits / max(T, 1e-8)).softmax(-1)
|
| 925 |
-
if top_k:
|
| 926 |
-
v, i = torch.topk(probs, min(top_k, probs.size(-1)))
|
| 927 |
-
probs = torch.zeros_like(probs).scatter_(-1, i, v)
|
| 928 |
-
if top_p < 1.0:
|
| 929 |
-
s_probs, s_idx = torch.sort(probs, descending=True, dim=-1)
|
| 930 |
-
probs = torch.zeros_like(probs).scatter_(-1, s_idx, s_probs * (torch.cumsum(s_probs, -1) <= top_p).float())
|
| 931 |
-
if min_p > 0: probs[probs < min_p] = 0
|
| 932 |
-
if probs.sum() == 0: return logits.argmax(-1, keepdim=True)
|
| 933 |
-
return probs.div_(probs.sum()).multinomial(1)
|
| 934 |
-
|
| 935 |
-
@torch.no_grad()
|
| 936 |
-
def infer(args):
|
| 937 |
-
if args.mode == "ar":
|
| 938 |
-
if args.temperature is None: args.temperature = 0.7
|
| 939 |
-
if args.top_k is None: args.top_k = 0
|
| 940 |
-
if args.repetition_penalty is None: args.repetition_penalty = 1.3
|
| 941 |
-
if args.presence_penalty is None: args.presence_penalty = 0.0
|
| 942 |
-
if args.frequency_penalty is None: args.frequency_penalty = 0.3
|
| 943 |
-
if args.penalty_last_n is None: args.penalty_last_n = 128
|
| 944 |
-
if args.var is None: args.var = False
|
| 945 |
-
else:
|
| 946 |
-
if args.temperature is None: args.temperature = 0.5
|
| 947 |
-
if args.top_k is None: args.top_k = 30
|
| 948 |
-
if args.repetition_penalty is None: args.repetition_penalty = 2.0
|
| 949 |
-
if args.presence_penalty is None: args.presence_penalty = 0.6
|
| 950 |
-
if args.frequency_penalty is None: args.frequency_penalty = 1.0
|
| 951 |
-
if args.penalty_last_n is None: args.penalty_last_n = 200
|
| 952 |
-
if args.var is None: args.var = True
|
| 953 |
-
path = _resolve_ckpt(pathlib.Path(args.ckpt)) or pathlib.Path(args.ckpt)
|
| 954 |
-
sd = torch.load(path, map_location="cpu")
|
| 955 |
-
cfg = sd["cfg"]
|
| 956 |
-
tie_weights = sd.get("tie_weights", False)
|
| 957 |
-
uk_time = get_uk_time()
|
| 958 |
-
ckpt_name = path.name
|
| 959 |
-
print(f"┌─────────────────────────────────────────────────┐")
|
| 960 |
-
print(f"│ INFERENCE @ {uk_time:<35s} │")
|
| 961 |
-
print(f"├─────────────────────────────────────────────────┤")
|
| 962 |
-
print(f"│ Checkpoint: {ckpt_name:<35s} │")
|
| 963 |
-
print(f"└───���─────────────────────────────────────────────┘")
|
| 964 |
-
print_expansion_info(cfg, tie_weights)
|
| 965 |
-
core = Encoder(cfg, tie_weights=tie_weights).to(DEV)
|
| 966 |
-
ar_h = ARHead(cfg["d"], tie_weights=tie_weights, embedding_weight=core.emb.weight if tie_weights else None).to(DEV)
|
| 967 |
-
sat_h = SATHead(cfg["d"]).to(DEV)
|
| 968 |
-
core.load_state_dict(sd["core"])
|
| 969 |
-
ar_h.load_state_dict(sd["ar"])
|
| 970 |
-
sat_h.load_state_dict(sd["sat"])
|
| 971 |
-
core.eval()
|
| 972 |
-
ar_h.eval()
|
| 973 |
-
sat_h.eval()
|
| 974 |
-
total_params = _count_enabled_params(core, ar_h, sat_h)
|
| 975 |
-
if total_params >= 1_000_000_000:
|
| 976 |
-
param_str = f"{total_params / 1_000_000_000:.2f}B"
|
| 977 |
-
elif total_params >= 1_000_000:
|
| 978 |
-
param_str = f"{total_params / 1_000_000:.2f}M"
|
| 979 |
-
elif total_params >= 1_000:
|
| 980 |
-
param_str = f"{total_params / 1_000:.2f}K"
|
| 981 |
-
else:
|
| 982 |
-
param_str = f"{total_params}"
|
| 983 |
-
print(f"Model size: {param_str} parameters ({total_params:,})")
|
| 984 |
-
prompt_tokens = tok.encode(args.prompt)
|
| 985 |
-
prompt_len = len(prompt_tokens)
|
| 986 |
-
ids = torch.tensor([prompt_tokens], device=DEV)
|
| 987 |
-
if ids.size(1) == 0:
|
| 988 |
-
ids = torch.tensor([[EOS]], device=DEV)
|
| 989 |
-
prompt_len = 1
|
| 990 |
-
mode_str = args.mode
|
| 991 |
-
if args.mode == "sat":
|
| 992 |
-
mode_str = f"sat-{'var' if args.var else 'fixed'}"
|
| 993 |
-
print(f"{Colors.INFO}Generating ({mode_str})...{Colors.RESET}")
|
| 994 |
-
start = time.time()
|
| 995 |
-
if args.mode == "ar":
|
| 996 |
-
h, kvs = core(ids, causal_mask(ids.size(1)), use_cache=True, total_seq_len=ids.size(1))
|
| 997 |
-
for _ in range(args.max_new):
|
| 998 |
-
logits = ar_h(h)[:, -1]
|
| 999 |
-
logits = _apply_penalties(logits, ids, args.penalty_last_n, args.repetition_penalty, args.presence_penalty, args.frequency_penalty)
|
| 1000 |
-
nxt = _sample(logits, args.temperature, args.top_k, args.top_p, args.min_p, args.greedy)
|
| 1001 |
-
ids = torch.cat([ids, nxt], 1)
|
| 1002 |
-
h, kvs = core(ids[:, -1:], None, kv_caches=kvs, use_cache=True, total_seq_len=ids.size(1))
|
| 1003 |
-
else:
|
| 1004 |
-
cached_len = ids.size(1)
|
| 1005 |
-
h, kvs = core(ids, sat_mask(ids.size(1)), use_cache=True, total_seq_len=cached_len)
|
| 1006 |
-
added = 0
|
| 1007 |
-
while added < args.max_new:
|
| 1008 |
-
logits_all, gate = sat_h(h[:, -SAT_BLOCK:])
|
| 1009 |
-
stride = SAT_BLOCK if (not args.var or gate is None) else (gate.softmax(-1).multinomial(1).item() + 1)
|
| 1010 |
-
new_tokens = []
|
| 1011 |
-
for i in range(int(stride)):
|
| 1012 |
-
logits = logits_all[:, i]
|
| 1013 |
-
logits = _apply_penalties(logits, ids, args.penalty_last_n, args.repetition_penalty, args.presence_penalty, args.frequency_penalty)
|
| 1014 |
-
nxt = _sample(logits, args.temperature, args.top_k, args.top_p, args.min_p, args.greedy)
|
| 1015 |
-
new_tokens.append(nxt)
|
| 1016 |
-
ids = torch.cat([ids, nxt], 1)
|
| 1017 |
-
added += 1
|
| 1018 |
-
if added >= args.max_new: break
|
| 1019 |
-
if added >= args.max_new: break
|
| 1020 |
-
new_ids = torch.cat(new_tokens, dim=1)
|
| 1021 |
-
mask = sat_mask_cached(new_ids.size(1), cached_len)
|
| 1022 |
-
h, kvs = core(new_ids, mask, kv_caches=kvs, use_cache=True, total_seq_len=ids.size(1))
|
| 1023 |
-
cached_len = ids.size(1)
|
| 1024 |
-
elapsed = time.time() - start
|
| 1025 |
-
gen_tokens = len(ids[0]) - prompt_len
|
| 1026 |
-
tok_per_sec = gen_tokens / elapsed if elapsed > 0 else 0
|
| 1027 |
-
all_tokens = ids[0].tolist()
|
| 1028 |
-
prompt_text = tok.decode(all_tokens[:prompt_len], skip_special_tokens=True)
|
| 1029 |
-
gen_text = tok.decode(all_tokens[prompt_len:], skip_special_tokens=True)
|
| 1030 |
-
print(f"{Colors.PROMPT}{prompt_text}{Colors.RESET}{gen_text}")
|
| 1031 |
-
print(f"{Colors.INFO}[{elapsed:.2f}s | {gen_tokens} tokens | {tok_per_sec:.1f} tok/s]{Colors.RESET}")
|
| 1032 |
-
|
| 1033 |
-
|
| 1034 |
-
# ───────────────────────── CLI ─────────────────────────
|
| 1035 |
-
def main():
|
| 1036 |
-
ap = argparse.ArgumentParser(description="AGILLM Expansion Ratio Testing")
|
| 1037 |
-
sub = ap.add_subparsers(dest="cmd", required=True)
|
| 1038 |
-
tr = sub.add_parser("train")
|
| 1039 |
-
tr.add_argument("--preset", choices=PRESETS.keys(), default="nano_3x")
|
| 1040 |
-
tr.add_argument("--rank", type=int)
|
| 1041 |
-
tr.add_argument("--block", type=int, default=DEFAULT_BLOCK)
|
| 1042 |
-
tr.add_argument("--batch_size", type=int, default=DEFAULT_BATCH)
|
| 1043 |
-
tr.add_argument("--source", default=DEFAULT_PRETRAIN_SOURCES)
|
| 1044 |
-
tr.add_argument("--target_tokens", type=int)
|
| 1045 |
-
tr.add_argument("--steps", type=int)
|
| 1046 |
-
tr.add_argument("--amp", action="store_true")
|
| 1047 |
-
tr.add_argument("--compile", action="store_true", help="Use torch.compile for speedup")
|
| 1048 |
-
tr.add_argument("--save_every_sec", type=int, default=DEFAULT_SAVE_SEC)
|
| 1049 |
-
tr.add_argument("--delta_every_steps", type=int, default=DEFAULT_DELTA_STEPS, help="Weight-only delta save every N steps (0=off)")
|
| 1050 |
-
tr.add_argument("--delta_max_keep", type=int, default=DEFAULT_MAX_DELTAS, help="Max delta checkpoints to keep")
|
| 1051 |
-
tr.add_argument("--resume_delta", type=str, help="Resume from a delta (weight-only, no optimizer state)")
|
| 1052 |
-
tr.add_argument("--save_dir", default=str(CKDIR))
|
| 1053 |
-
tr.add_argument("--resume", type=str)
|
| 1054 |
-
tr.add_argument("--x2", action="store_true")
|
| 1055 |
-
tr.add_argument("--warmstart_from", type=str)
|
| 1056 |
-
tr.add_argument("--fresh", action="store_true")
|
| 1057 |
-
tr.add_argument("--max_ckpts", type=int, default=None)
|
| 1058 |
-
tr.add_argument("--chilla_max_double", action="store_true")
|
| 1059 |
-
tr.add_argument("--tie_weights", action="store_true")
|
| 1060 |
-
tr.add_argument("--ar_only", action="store_true")
|
| 1061 |
-
tr.add_argument("--freeze_core", action="store_true")
|
| 1062 |
-
tr.add_argument("--unfreeze_ln", action="store_true")
|
| 1063 |
-
tr.add_argument("--train_emb", action="store_true")
|
| 1064 |
-
tr.add_argument("--lr_core", type=float, default=LR_CORE)
|
| 1065 |
-
tr.add_argument("--lr_head", type=float, default=LR_HEAD)
|
| 1066 |
-
tr.add_argument("--chat", action="store_true")
|
| 1067 |
-
tr.add_argument("--chat_messages_key", default="messages")
|
| 1068 |
-
tr.add_argument("--dataset_field_text", default="text")
|
| 1069 |
-
tr.add_argument("--sft_add_generation_prompt", action="store_true")
|
| 1070 |
-
tr.add_argument("--auto_grow", action="store_true")
|
| 1071 |
-
tr.add_argument("--grow_plan", default="576,640,768,896,1024,1122")
|
| 1072 |
-
tr.add_argument("--grow_every_steps", type=int, default=50000)
|
| 1073 |
-
tr.add_argument("--after_sft_source", default="")
|
| 1074 |
-
tr.add_argument("--after_sft_steps", type=int, default=0)
|
| 1075 |
-
tr.add_argument("--after_sft_chat", action="store_true")
|
| 1076 |
-
tr.add_argument("--after_sft_chat_messages_key", default="messages")
|
| 1077 |
-
tr.add_argument("--after_sft_dataset_field_text", default="text")
|
| 1078 |
-
tr.add_argument("--after_sft_add_generation_prompt", type=bool, default=None)
|
| 1079 |
-
tr.add_argument("--after_sft_block", type=int, default=0)
|
| 1080 |
-
tr.add_argument("--after_sft_freeze_core", action="store_true")
|
| 1081 |
-
tr.add_argument("--after_sft_unfreeze_ln", action="store_true")
|
| 1082 |
-
tr.add_argument("--after_sft_train_emb", action="store_true")
|
| 1083 |
-
tr.add_argument("--after_sft_lr_core", type=float, default=0.0)
|
| 1084 |
-
tr.add_argument("--after_sft_lr_head", type=float, default=0.0)
|
| 1085 |
-
inf = sub.add_parser("infer")
|
| 1086 |
-
inf.add_argument("--mode", choices=["ar", "sat"], required=True)
|
| 1087 |
-
inf.add_argument("--ckpt", required=True)
|
| 1088 |
-
inf.add_argument("--prompt", required=True)
|
| 1089 |
-
inf.add_argument("--max_new", type=int, default=120)
|
| 1090 |
-
inf.add_argument("--temperature", type=float, default=None)
|
| 1091 |
-
inf.add_argument("--greedy", action="store_true")
|
| 1092 |
-
inf.add_argument("--top_k", type=int, default=None)
|
| 1093 |
-
inf.add_argument("--top_p", type=float, default=0.9)
|
| 1094 |
-
inf.add_argument("--min_p", type=float, default=0.0)
|
| 1095 |
-
inf.add_argument("--repetition_penalty", type=float, default=None)
|
| 1096 |
-
inf.add_argument("--presence_penalty", type=float, default=None)
|
| 1097 |
-
inf.add_argument("--frequency_penalty", type=float, default=None)
|
| 1098 |
-
inf.add_argument("--penalty_last_n", type=int, default=None)
|
| 1099 |
-
inf.add_argument("--var", action="store_true", default=None)
|
| 1100 |
-
inf.add_argument("--no-var", dest="var", action="store_false")
|
| 1101 |
-
args = ap.parse_args()
|
| 1102 |
-
if args.cmd == "train": train(args)
|
| 1103 |
-
else: infer(args)
|
| 1104 |
-
|
| 1105 |
-
|
| 1106 |
-
if __name__ == "__main__":
|
| 1107 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|