Spaces:
Sleeping
Sleeping
| # stage8.py | |
| # Author: Liam Grinstead | |
| # Purpose: RFT-LLM (Language-Only Transformer Validation) — Stage Eight of Twelve | |
| import math, time, json, random, argparse | |
| import torch, torch.nn as nn, torch.nn.functional as F | |
| # ---------------- Determinism ---------------- | |
| def set_seed(s=1234): | |
| random.seed(s); torch.manual_seed(s); torch.cuda.manual_seed_all(s) | |
| # ---------------- Telemetry ------------------ | |
| class Telemetry: | |
| def __init__(self, path="stage8_llm.jsonl"): | |
| self.t0 = time.time(); self.f = open(path,"w") | |
| def emit(self, **k): | |
| k["t"] = round(time.time()-self.t0,3) | |
| line = json.dumps(k,separators=(",",":")) | |
| print(line); self.f.write(line+"\n"); self.f.flush() | |
| def close(self): self.f.close() | |
| # ---------------- Orbital Coupler ------------ | |
| class Orbital: | |
| def __init__(self,g=0.006,floor=0.2): | |
| self.a=0.0; self.b=math.pi/3; self.g=g; self.floor=floor | |
| def step(self): | |
| d=(self.b-self.a+math.pi)%(2*math.pi)-math.pi | |
| if abs(d)<self.floor: d=self.floor*(1 if d>=0 else -1) | |
| s=math.sin(d) | |
| self.a=(self.a+self.g*s)%(2*math.pi) | |
| self.b=(self.b-self.g*s)%(2*math.pi) | |
| drift=abs((self.a-self.b+math.pi)%(2*math*pi)-math.pi) | |
| return drift, abs(s) | |
| # ---------------- DCLR Optimiser ------------- | |
| class DCLR(torch.optim.Optimizer): | |
| def __init__(self, params, lr=5e-4, beta=0.9, gamma=0.999, eps=1e-8, cg=0.05): | |
| super().__init__(params, dict(lr=lr,beta=beta,gamma=gamma,eps=eps,cg=cg)) | |
| def step(self, closure=None): | |
| tot=0.0 | |
| for g in self.param_groups: | |
| lr,beta,gamma,eps,c=g["lr"],g["beta"],g["gamma"],g["eps"],g["cg"] | |
| for p in g["params"]: | |
| if p.grad is None: continue | |
| st=self.state[p] | |
| if not st: | |
| st["m"]=torch.zeros_like(p); st["v"]=torch.zeros_like(p); st["coh"]=torch.zeros_like(p) | |
| m,v,h=st["m"],st["v"],st["coh"]; g0=p.grad | |
| m.mul_(beta).add_(g0,alpha=1-beta) | |
| v.mul_(gamma).addcmul_(g0,g0,value=1-gamma) | |
| d=g0-m; h.mul_(0.9).add_(d.abs(),alpha=0.1) | |
| lr_eff=lr/(1+c*h) | |
| step=lr_eff*m/(v.sqrt()+eps) | |
| p.add_(-step); tot += (step*step).sum().item() | |
| return None, tot | |
| # ---------------- LLM Proxy ------------------ | |
| class Block(nn.Module): | |
| def __init__(self, d=512, heads=8, mlp_ratio=4): | |
| super().__init__() | |
| self.n1=nn.LayerNorm(d) | |
| self.attn=nn.MultiheadAttention(d, heads, batch_first=True) | |
| self.n2=nn.LayerNorm(d) | |
| self.mlp=nn.Sequential(nn.Linear(d,int(d*mlp_ratio)), nn.GELU(), nn.Linear(int(d*mlp_ratio),d)) | |
| def forward(self, x): | |
| h=x; x=self.n1(x); x,_=self.attn(x,x,x,need_weights=False); x=x+h | |
| h=x; x=self.n2(x); x=x+self.mlp(x); return x | |
| class LLMProxy(nn.Module): | |
| def __init__(self, vocab=32768, d=512, L=6, heads=8, max_len=512): | |
| super().__init__() | |
| self.emb=nn.Embedding(vocab,d) | |
| self.pos=nn.Parameter(torch.zeros(1,max_len,d)) | |
| self.blocks=nn.ModuleList([Block(d,heads) for _ in range(L)]) | |
| self.norm=nn.LayerNorm(d) | |
| self.head=nn.Linear(d,vocab) | |
| def forward(self, tok): | |
| x=self.emb(tok)+self.pos[:,:tok.size(1)] | |
| for blk in self.blocks: x=blk(x) | |
| x=self.norm(x); return self.head(x) | |
| # ---------------- Data ----------------------- | |
| def make_batch(batch=64, seq=256, vocab=32768): | |
| x=torch.randint(0,vocab,(batch,seq)) | |
| y=torch.roll(x,shifts=-1,dims=1) | |
| return x,y | |
| # ---------------- Runner --------------------- | |
| def run(mode="RFT", steps=1000, batch=64, seq=256, vocab=32768, log="stage8_llm.jsonl"): | |
| set_seed(1234); tm=Telemetry(log); orb=Orbital() | |
| dev="cuda" if torch.cuda.is_available() else "cpu" | |
| model=LLMProxy(vocab=vocab,max_len=max(512,seq)).to(dev) | |
| opt=DCLR(model.parameters()) if mode=="RFT" else torch.optim.Adam(model.parameters(),lr=5e-4) | |
| loss_fn=nn.CrossEntropyLoss() | |
| use_bf16=(dev=="cuda" and torch.cuda.is_bf16_supported()) | |
| for s in range(1,steps+1): | |
| drift,flux=orb.step() | |
| x,y=make_batch(batch,seq,vocab); x,y=x.to(dev),y.to(dev) | |
| opt.zero_grad(set_to_none=True) | |
| if use_bf16: | |
| with torch.autocast(device_type="cuda",dtype=torch.bfloat16): | |
| out=model(x); loss=loss_fn(out.view(-1,out.size(-1)), y.view(-1)) | |
| else: | |
| out=model(x); loss=loss_fn(out.view(-1,out.size(-1)), y.view(-1)) | |
| loss.backward() | |
| if isinstance(opt,DCLR): _,J=opt.step() | |
| else: opt.step(); J=0.0 | |
| pred=out.argmax(-1); acc=(pred==y).float().mean().item() | |
| tm.emit(mode=mode, step=s, drift=round(drift,3), flux=round(flux,3), | |
| E_ret=0.994, coh=0.999, | |
| loss=round(float(loss.item()),4), acc=round(float(acc),3), | |
| J_step=round(float(J*1e-6),6)) | |
| tm.close() | |
| return f"Stage 8 complete. Telemetry saved to {log}" | |