| |
| """Minimal ablation suite β PyTorch only, no Triton. Addresses the 3 critique gaps.""" |
| import argparse,json,math,os,random,sys,time,urllib.request |
| from collections import defaultdict |
| import torch,torch.nn as nn,torch.nn.functional as F |
| import tiktoken |
| print("imports ok",flush=True) |
|
|
| |
| class Corpus: |
| _i=None |
| @classmethod |
| def get(cls,bs,dev): |
| if cls._i is None: cls._i=cls(bs,dev) |
| return cls._i |
| def __init__(self,bs,dev): |
| self.block_size,self.device=bs,dev |
| p="input.txt" |
| if not os.path.exists(p): |
| urllib.request.urlretrieve("https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt",p) |
| enc=tiktoken.get_encoding("gpt2"); t=enc.encode(open(p).read()) |
| self.vocab_size=enc.n_vocab; d=torch.tensor(t,dtype=torch.long) |
| si=int(0.9*len(d)); self.train_data,self.val_data=d[:si],d[si:] |
| print(f"Corpus: V={self.vocab_size} train={len(self.train_data):,} val={len(self.val_data):,}",flush=True) |
| def get_batch(self,split,bs,gen=None): |
| d=self.train_data if split=="train" else self.val_data |
| ix=torch.randint(len(d)-self.block_size-1,(bs,),generator=gen) |
| x=torch.stack([d[i:i+self.block_size] for i in ix]) |
| y=torch.stack([d[i+1:i+self.block_size+1] for i in ix]) |
| return x.to(self.device),y.to(self.device) |
|
|
| def mg(s): |
| g=torch.Generator(device="cpu"); g.manual_seed(s); return g |
|
|
| |
| class SparseBwd(torch.autograd.Function): |
| @staticmethod |
| def forward(ctx,x,w,b,ac,cs,sdx): |
| ctx.save_for_backward(x,w,ac); ctx.hb=b is not None; ctx.sdx=sdx; ctx.cs=cs |
| return F.linear(x,w,b) |
| @staticmethod |
| def backward(ctx,gy): |
| x,w,ac=ctx.saved_tensors; cs=ctx.cs |
| xf=x.reshape(-1,x.shape[-1]); gf=gy.reshape(-1,gy.shape[-1]) |
| gw=torch.zeros_like(w) |
| gb=torch.zeros(w.shape[0],device=w.device,dtype=w.dtype) if ctx.hb else None |
| gx=torch.zeros_like(xf) if ctx.sdx else gf@w |
| for c in ac.tolist(): |
| s,e=c*cs,(c+1)*cs; sl=gf[:,s:e] |
| gw[s:e]=sl.t()@xf |
| if gb is not None: gb[s:e]=sl.sum(0) |
| if ctx.sdx: gx+=sl@w[s:e] |
| return gx.reshape(x.shape),gw,gb,None,None,None |
|
|
| class SL(nn.Linear): |
| def __init__(self,i,o,bias=True): |
| super().__init__(i,o,bias=bias) |
| self.se=False; self.sdx=False; self.ac=None; self.cs=64 |
| def forward(self,x): |
| if not self.se or self.ac is None: return F.linear(x,self.weight,self.bias) |
| return SparseBwd.apply(x,self.weight,self.bias,self.ac,self.cs,self.sdx) |
|
|
| |
| class Attn(nn.Module): |
| def __init__(self,d,nh,bs,do): |
| super().__init__(); self.nh=nh; self.hd=d//nh |
| self.qkv=SL(d,3*d); self.proj=SL(d,d); self.drop=nn.Dropout(do) |
| self.register_buffer("mask",torch.tril(torch.ones(bs,bs)).view(1,1,bs,bs)) |
| def forward(self,x): |
| B,T,C=x.shape; q,k,v=self.qkv(x).split(C,2) |
| q=q.view(B,T,self.nh,self.hd).transpose(1,2) |
| k=k.view(B,T,self.nh,self.hd).transpose(1,2) |
| v=v.view(B,T,self.nh,self.hd).transpose(1,2) |
| a=(q@k.transpose(-2,-1))/math.sqrt(self.hd) |
| a=a.masked_fill(self.mask[:,:,:T,:T]==0,float("-inf")) |
| a=self.drop(F.softmax(a,dim=-1)) |
| return self.proj((a@v).transpose(1,2).contiguous().view(B,T,C)) |
|
|
| class FFN(nn.Module): |
| def __init__(self,d,do,fm=4): |
| super().__init__(); self.fc=SL(d,fm*d); self.proj=SL(fm*d,d); self.drop=nn.Dropout(do) |
| def forward(self,x): return self.drop(self.proj(F.gelu(self.fc(x)))) |
|
|
| class Blk(nn.Module): |
| def __init__(self,d,nh,bs,do,fm=4): |
| super().__init__(); self.ln1=nn.LayerNorm(d); self.attn=Attn(d,nh,bs,do) |
| self.ln2=nn.LayerNorm(d); self.mlp=FFN(d,do,fm) |
| def forward(self,x): x=x+self.attn(self.ln1(x)); return x+self.mlp(self.ln2(x)) |
|
|
| class GPT(nn.Module): |
| def __init__(self,V,bs,nl,nh,d,do,fm=4): |
| super().__init__(); self.te=nn.Embedding(V,d); self.pe=nn.Embedding(bs,d) |
| self.blocks=nn.Sequential(*[Blk(d,nh,bs,do,fm) for _ in range(nl)]) |
| self.ln=nn.LayerNorm(d); self.head=nn.Linear(d,V) |
| def forward(self,idx,tgt=None): |
| B,T=idx.shape; x=self.te(idx)+self.pe(torch.arange(T,device=idx.device))[None] |
| lo=self.head(self.ln(self.blocks(x))) |
| return lo,F.cross_entropy(lo.view(-1,lo.size(-1)),tgt.view(-1)) if tgt is not None else None |
| def np(self): return sum(p.numel() for p in self.parameters()) |
|
|
| def gsl(m): return [x for x in m.modules() if isinstance(x,SL)] |
|
|
| |
| class Sched: |
| def __init__(self,model,pol,frac,cs,dev,beta=0.95): |
| self.pol,self.frac,self.cs,self.dev,self.beta=pol,frac,cs,dev,beta |
| self.lins=gsl(model); self.m2i,self.m2l={},{}; off=0 |
| for m in self.lins: |
| m.cs=cs; nc=m.out_features//cs; assert m.out_features%cs==0 |
| self.m2i[m]=torch.arange(off,off+nc,device=dev) |
| self.m2l[m]=torch.arange(nc,device=dev); off+=nc |
| self.nc=off; self.ema=torch.zeros(self.nc,device=dev) |
| self.act=torch.zeros(self.nc,dtype=torch.bool,device=dev) |
| def gf(self,step,wu,an): |
| if step<wu: return 1.0 |
| if an>0 and step<wu+an: |
| p=(step-wu)/an; return self.frac+(1-self.frac)*0.5*(1+math.cos(math.pi*p)) |
| return self.frac |
| def choose(self,step,wu,an): |
| f=self.gf(step,wu,an) |
| if f>=0.999: self.act.fill_(True); self._inst(); return |
| k=max(1,int(f*self.nc)); self.act.fill_(False) |
| if self.pol=="random": idx=torch.randperm(self.nc,device=self.dev)[:k] |
| else: idx=torch.topk(self.ema+1e-9*torch.rand_like(self.ema),k=k).indices |
| self.act[idx]=True; self._inst() |
| def _inst(self): |
| for m,gi in self.m2i.items(): m.ac=self.m2l[m][self.act[gi]] |
| @torch.no_grad() |
| def update(self,step,wu): |
| cur=torch.zeros_like(self.ema) |
| for m,ids in self.m2i.items(): |
| if m.weight.grad is None: continue |
| s=m.weight.grad.square().view(len(ids),self.cs,-1).sum((1,2)) |
| if m.bias is not None and m.bias.grad is not None: |
| s+=m.bias.grad.square().view(len(ids),self.cs).sum(1) |
| cur[ids]=torch.sqrt(s+1e-30) |
| obs=self.act; new=obs&(self.ema==0); old=obs&~new |
| self.ema[new]=cur[new]; self.ema[old]=self.beta*self.ema[old]+(1-self.beta)*cur[old] |
| return cur |
| @torch.no_grad() |
| def oracle_scores(self): |
| sc=torch.zeros(self.nc,device=self.dev) |
| for m,ids in self.m2i.items(): |
| if m.weight.grad is None: continue |
| s=m.weight.grad.square().view(len(ids),self.cs,-1).sum((1,2)) |
| if m.bias is not None and m.bias.grad is not None: |
| s+=m.bias.grad.square().view(len(ids),self.cs).sum(1) |
| sc[ids]=torch.sqrt(s+1e-30) |
| return sc |
| def overlap(self,k): |
| o=set(torch.topk(self.oracle_scores(),k=k).indices.tolist()) |
| p=set(self.act.nonzero(as_tuple=True)[0].tolist()) |
| if not o or not p: return 0.,0. |
| i=o&p; return len(i)/len(o|p),len(i)/len(o) |
|
|
| |
| class CAdam: |
| def __init__(self,model,lr=3e-4,cs=64,mm="phantom"): |
| self.model,self.lr,self.cs,self.mm=model,lr,cs,mm |
| self.st={}; self.p2m={} |
| for m in gsl(model): |
| if m.weight is not None: self.p2m[m.weight]=m |
| if m.bias is not None: self.p2m[m.bias]=m |
| def zero_grad(self): |
| for p in self.model.parameters(): p.grad=None |
| @torch.no_grad() |
| def step(self): |
| for p in self.model.parameters(): |
| if p.grad is None: continue |
| if p not in self.st: self.st[p]={"m":torch.zeros_like(p),"v":torch.zeros_like(p)} |
| m,v=self.st[p]["m"],self.st[p]["v"] |
| sm=self.p2m.get(p); ac=getattr(sm,'ac',None) if sm else None |
| if ac is None: |
| m.mul_(0.9).add_(p.grad,alpha=0.1); v.mul_(0.999).addcmul_(p.grad,p.grad,value=0.001) |
| p.sub_(m/(torch.sqrt(v)+1e-8),alpha=self.lr) |
| elif self.mm=="phantom": |
| m.mul_(0.9).add_(p.grad,alpha=0.1); v.mul_(0.999).addcmul_(p.grad,p.grad,value=0.001) |
| for c in ac.tolist(): |
| s,e=c*self.cs,(c+1)*self.cs |
| p.data[s:e].sub_(m[s:e]/(torch.sqrt(v[s:e])+1e-8),alpha=self.lr) |
| else: |
| for c in ac.tolist(): |
| s,e=c*self.cs,(c+1)*self.cs; g=p.grad[s:e] |
| m[s:e].mul_(0.9).add_(g,alpha=0.1); v[s:e].mul_(0.999).addcmul_(g,g,value=0.001) |
| p.data[s:e].sub_(m[s:e]/(torch.sqrt(v[s:e])+1e-8),alpha=self.lr) |
|
|
| |
| @torch.no_grad() |
| def ev(model,corpus,bs,n=20,seed=9999): |
| model.eval(); ls=[model(*corpus.get_batch("val",bs,mg(seed+i)))[1].item() for i in range(n)] |
| model.train(); a=sum(ls)/len(ls); return a,math.exp(min(a,20)) |
|
|
| |
| def run1(pol,steps,bs,bsz,nl,nh,d,cs,af,wu,an,lr,dev,seed,mm="phantom",fm=4,mo=False,oi=50): |
| torch.manual_seed(seed); random.seed(seed) |
| if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) |
| corpus=Corpus.get(bsz,dev) |
| model=GPT(corpus.vocab_size,bsz,nl,nh,d,0.1,fm).to(dev) |
| for m in gsl(model): m.cs=cs |
| dense=pol=="dense"; sched=None if dense else Sched(model,pol,af,cs,dev) |
| opt=CAdam(model,lr,cs,mm); np_=model.np(); overlaps=[] |
| if dev=="cuda": torch.cuda.synchronize() |
| t0=time.perf_counter() |
| for step in range(steps): |
| x,y=corpus.get_batch("train",bs,mg(step)) |
| if dense: |
| for m in gsl(model): m.se=False; m.ac=None |
| else: |
| sched.choose(step,wu,an) |
| for m in gsl(model): m.se=True; m.sdx=False |
| opt.zero_grad(); _,loss=model(x,y); loss.backward() |
| if sched: |
| sched.update(step,wu) |
| if mo and step%oi==0 and step>=wu+an: |
| saved={p:p.grad.clone() for p in model.parameters() if p.grad is not None} |
| for m in gsl(model): m.se=False |
| for p in model.parameters(): p.grad=None |
| _,lo=model(x,y); lo.backward() |
| k=max(1,int(af*sched.nc)); j,r=sched.overlap(k) |
| overlaps.append((step,j,r)) |
| for p in model.parameters(): |
| if p in saved: p.grad=saved[p] |
| for m in gsl(model): m.se=True |
| opt.step() |
| if step%200==0: print(f" step {step}/{steps} loss={loss.item():.4f}",flush=True) |
| if dev=="cuda": torch.cuda.synchronize() |
| wall=time.perf_counter()-t0 |
| for m in gsl(model): m.se=False |
| vl,vp=ev(model,corpus,bs,n=30) |
| del model; torch.cuda.empty_cache() if dev=="cuda" else None |
| return {"vl":vl,"vp":vp,"wall":wall,"ms":1000*wall/steps,"np":np_,"tl":loss.item(),"ov":overlaps} |
|
|
| def runs(cfg,seeds): |
| rs=[]; |
| for s in seeds: cfg["seed"]=s; rs.append(run1(**cfg)) |
| vls=[r["vl"] for r in rs]; ml=sum(vls)/len(vls) |
| sl=(sum((x-ml)**2 for x in vls)/max(1,len(vls)-1))**0.5 |
| return {"ml":ml,"sl":sl,"rs":rs,"ms":sum(r["ms"] for r in rs)/len(rs)} |
|
|
| |
| def exp1(dev,steps,seeds,d,nl,nh,bs,bsz,cs,af,wu,an,lr): |
| """Phantom momentum ablation""" |
| print("\n"+"="*80+"\nEXP 1: Phantom Momentum\n"+"="*80,flush=True) |
| base=dict(steps=steps,bs=bs,bsz=bsz,nl=nl,nh=nh,d=d,cs=cs,af=af,wu=wu,an=an,lr=lr,dev=dev) |
| cfgs=[("dense","dense","phantom"),("ema+phantom","ema","phantom"),("ema+frozen","ema","frozen"), |
| ("random+phantom","random","phantom"),("random+frozen","random","frozen")] |
| R={} |
| for name,pol,mm in cfgs: |
| print(f"\n--- {name} ---",flush=True) |
| R[name]=runs({**base,"pol":pol,"mm":mm},seeds) |
| print(f"\n{'Method':<20} | {'Val Loss':>18} | {'ms/step':>8}",flush=True) |
| print("-"*52,flush=True) |
| for name,_,_ in cfgs: |
| r=R[name]; print(f"{name:<20} | {r['ml']:.4f} Β± {r['sl']:.4f} | {r['ms']:>7.1f}",flush=True) |
| return R |
|
|
| def exp2(dev,steps,seeds,d,nl,nh,bs,bsz,cs,af,wu,an,lr): |
| """Compute-matched baselines""" |
| print("\n"+"="*80+"\nEXP 2: Compute-Matched\n"+"="*80,flush=True) |
| base=dict(steps=steps,bs=bs,bsz=bsz,nl=nl,nh=nh,d=d,cs=cs,af=af,wu=wu,an=an,lr=lr,dev=dev,mm="phantom") |
| print("\n--- Sparse EMA ---",flush=True) |
| sp=runs({**base,"pol":"ema"},seeds) |
| print("\n--- Dense same steps ---",flush=True) |
| ds=runs({**base,"pol":"dense"},seeds) |
| ms=int(steps*(1+1+af)/3) |
| print(f"\n--- Dense matched {ms} steps ---",flush=True) |
| dm=runs({**base,"pol":"dense","steps":ms},seeds) |
| sfm=max(1,round(4*af)) |
| print(f"\n--- Small dense ffn_mult={sfm} ---",flush=True) |
| dd=runs({**base,"pol":"dense","fm":sfm},seeds) |
| R={"sparse_ema":sp,"dense_same":ds,f"dense_{ms}steps":dm,f"dense_ffn{sfm}":dd} |
| print(f"\n{'Method':<25} | {'Params':>7} | {'Val Loss':>18} | {'ms/step':>8}",flush=True) |
| print("-"*65,flush=True) |
| for n,r in R.items(): |
| np_=r["rs"][0]["np"] |
| print(f"{n:<25} | {np_/1e6:>6.1f}M | {r['ml']:.4f} Β± {r['sl']:.4f} | {r['ms']:>7.1f}",flush=True) |
| return R |
|
|
| def exp3(dev,steps,seeds,d,nl,nh,bs,bsz,cs,af,wu,an,lr): |
| """Predictor accuracy""" |
| print("\n"+"="*80+"\nEXP 3: Predictor Accuracy\n"+"="*80,flush=True) |
| base=dict(steps=steps,bs=bs,bsz=bsz,nl=nl,nh=nh,d=d,cs=cs,af=af,wu=wu,an=an,lr=lr,dev=dev,mm="phantom",mo=True,oi=25) |
| R={} |
| for pol in ["ema","random"]: |
| print(f"\n--- {pol} ---",flush=True) |
| R[pol]=runs({**base,"pol":pol},seeds) |
| for pol in ["ema","random"]: |
| print(f"\n{pol.upper()} overlap:",flush=True) |
| sd=defaultdict(lambda:{"j":[],"r":[]}) |
| for res in R[pol]["rs"]: |
| for s,j,r in res["ov"]: sd[s]["j"].append(j); sd[s]["r"].append(r) |
| for s in sorted(sd): |
| mj=sum(sd[s]["j"])/len(sd[s]["j"]); mr=sum(sd[s]["r"])/len(sd[s]["r"]) |
| print(f" step {s:>5}: jaccard={mj:.4f} recall={mr:.4f}",flush=True) |
| print(f"\n{'Pol':<8} | {'Val Loss':>18} | {'ms/step':>8}",flush=True) |
| for p in ["ema","random"]: |
| r=R[p]; print(f"{p:<8} | {r['ml']:.4f} Β± {r['sl']:.4f} | {r['ms']:>7.1f}",flush=True) |
| return R |
|
|
| def main(): |
| p=argparse.ArgumentParser() |
| p.add_argument("--exp",default="all",choices=["all","exp1","exp2","exp3"]) |
| p.add_argument("--device",default="cuda"); p.add_argument("--steps",type=int,default=1000) |
| p.add_argument("--seeds",default="42,123,456"); p.add_argument("--d",type=int,default=1024) |
| p.add_argument("--nl",type=int,default=4); p.add_argument("--nh",type=int,default=8) |
| p.add_argument("--bs",type=int,default=8); p.add_argument("--bsz",type=int,default=256) |
| p.add_argument("--cs",type=int,default=64); p.add_argument("--af",type=float,default=0.10) |
| p.add_argument("--wu",type=int,default=50); p.add_argument("--an",type=int,default=200) |
| p.add_argument("--lr",type=float,default=3e-4) |
| a=p.parse_args(); seeds=[int(s) for s in a.seeds.split(",")] |
| if a.device=="cuda" and torch.cuda.is_available(): |
| print(f"GPU: {torch.cuda.get_device_name()} VRAM: {torch.cuda.get_device_properties(0).total_memory/1e9:.1f}GB",flush=True) |
| print(f"d={a.d} nl={a.nl} nh={a.nh} steps={a.steps} seeds={seeds} cs={a.cs} af={a.af}",flush=True) |
| sh=dict(dev=a.device,steps=a.steps,seeds=seeds,d=a.d,nl=a.nl,nh=a.nh,bs=a.bs,bsz=a.bsz,cs=a.cs,af=a.af,wu=a.wu,an=a.an,lr=a.lr) |
| t0=time.time() |
| exps={"exp1":exp1,"exp2":exp2,"exp3":exp3} if a.exp=="all" else {a.exp:{"exp1":exp1,"exp2":exp2,"exp3":exp3}[a.exp]} |
| for n,fn in exps.items(): |
| print(f"\n{'#'*60}\n# {n} ({(time.time()-t0)/60:.1f}m)\n{'#'*60}",flush=True) |
| r=fn(**sh) |
| with open(f"{n}.json","w") as f: json.dump(r,f,indent=2,default=str) |
| print(f"β {n}.json saved",flush=True) |
| print(f"\nDONE in {(time.time()-t0)/60:.1f}m",flush=True) |
|
|
| if __name__=="__main__": main() |
|
|