|
|
|
|
|
""" |
|
|
FINAL SHOWDOWN: Standard depth vs Ultra-heavy mechanisms |
|
|
Question: At equal compute budget, does any heavy approach beat just adding layers? |
|
|
""" |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import time |
|
|
import math |
|
|
|
|
|
DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
|
VOCAB = 128256 |
|
|
|
|
|
def alibi_bias(n_heads, n_tokens): |
|
|
def slopes(n): |
|
|
start = 2 ** (-2 ** -(math.log2(n) - 3)) |
|
|
return [start * (start ** i) for i in range(n)] |
|
|
s = slopes(n_heads) if math.log2(n_heads).is_integer() else slopes(2 ** math.floor(math.log2(n_heads)))[:n_heads] |
|
|
s = torch.tensor(s, device=DEV).view(1, n_heads, 1, 1) |
|
|
i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1) |
|
|
j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens) |
|
|
return -s * (j - i).clamp_min(0).float() |
|
|
|
|
|
def causal_mask(n): |
|
|
return torch.triu(torch.full((1, 1, n, n), float("-inf"), device=DEV), 1) |
|
|
|
|
|
|
|
|
class StandardAttn(nn.Module): |
|
|
def __init__(self, d, h): |
|
|
super().__init__() |
|
|
self.h, self.dk = h, d // h |
|
|
self.qkv = nn.Linear(d, 3*d, bias=False) |
|
|
self.proj = nn.Linear(d, d, bias=False) |
|
|
|
|
|
def forward(self, x, mask=None): |
|
|
B, N, _ = x.shape |
|
|
qkv = self.qkv(x).reshape(B, N, 3, self.h, self.dk).permute(2, 0, 3, 1, 4) |
|
|
q, k, v = qkv[0], qkv[1], qkv[2] |
|
|
att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk) + alibi_bias(self.h, N) |
|
|
if mask is not None: att = att + mask |
|
|
return self.proj((att.softmax(-1) @ v).transpose(1, 2).reshape(B, N, -1)) |
|
|
|
|
|
|
|
|
class DoubleAttn(nn.Module): |
|
|
"""Simplest heavy: two sequential attention ops""" |
|
|
def __init__(self, d, h): |
|
|
super().__init__() |
|
|
self.attn1 = StandardAttn(d, h) |
|
|
self.attn2 = StandardAttn(d, h) |
|
|
self.gate = nn.Linear(d * 2, d) |
|
|
|
|
|
def forward(self, x, mask=None): |
|
|
o1 = self.attn1(x, mask) |
|
|
o2 = self.attn2(x + o1, mask) |
|
|
return self.gate(torch.cat([o1, o2], dim=-1)) |
|
|
|
|
|
|
|
|
class RecurrentAttn(nn.Module): |
|
|
"""Same attention applied k times""" |
|
|
def __init__(self, d, h, k=4): |
|
|
super().__init__() |
|
|
self.attn = StandardAttn(d, h) |
|
|
self.depth_emb = nn.Embedding(k, d) |
|
|
self.k = k |
|
|
|
|
|
def forward(self, x, mask=None): |
|
|
for i in range(self.k): |
|
|
x = x + self.attn(x + self.depth_emb.weight[i], mask) |
|
|
return x |
|
|
|
|
|
|
|
|
class Block(nn.Module): |
|
|
def __init__(self, d, h, mode="standard"): |
|
|
super().__init__() |
|
|
self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d) |
|
|
if mode == "standard": |
|
|
self.attn = StandardAttn(d, h) |
|
|
elif mode == "double": |
|
|
self.attn = DoubleAttn(d, h) |
|
|
elif mode == "recurrent": |
|
|
self.attn = RecurrentAttn(d, h, k=4) |
|
|
self.ff = nn.Sequential(nn.Linear(d, 4*d), nn.GELU(), nn.Linear(4*d, d)) |
|
|
|
|
|
def forward(self, x, mask=None): |
|
|
x = x + self.attn(self.ln1(x), mask) |
|
|
return x + self.ff(self.ln2(x)) |
|
|
|
|
|
|
|
|
class Model(nn.Module): |
|
|
def __init__(self, d, layers, h, mode="standard"): |
|
|
super().__init__() |
|
|
self.emb = nn.Embedding(VOCAB, d) |
|
|
self.blocks = nn.ModuleList([Block(d, h, mode) for _ in range(layers)]) |
|
|
self.ln = nn.LayerNorm(d) |
|
|
self.head = nn.Linear(d, VOCAB, bias=False) |
|
|
self.head.weight = self.emb.weight |
|
|
|
|
|
def forward(self, x, mask=None): |
|
|
x = self.emb(x) |
|
|
for b in self.blocks: x = b(x, mask) |
|
|
return self.head(self.ln(x)) |
|
|
|
|
|
def count_params(self): |
|
|
return sum(p.numel() for p in self.parameters()) |
|
|
|
|
|
|
|
|
def train(model, steps, batch, seq): |
|
|
opt = torch.optim.AdamW(model.parameters(), lr=1e-4) |
|
|
mask = causal_mask(seq - 1) |
|
|
losses, times = [], [] |
|
|
|
|
|
for step in range(steps): |
|
|
ids = torch.randint(0, VOCAB, (batch, seq), device=DEV) |
|
|
start = time.time() |
|
|
opt.zero_grad() |
|
|
loss = F.cross_entropy(model(ids[:, :-1], mask).view(-1, VOCAB), ids[:, 1:].reshape(-1)) |
|
|
loss.backward() |
|
|
opt.step() |
|
|
times.append(time.time() - start) |
|
|
losses.append(loss.item()) |
|
|
|
|
|
if step % 50 == 0 or step == steps - 1: |
|
|
tok_s = batch * seq / times[-1] |
|
|
print(f"Step {step:3d} | Loss {loss.item():.4f} | {tok_s:.0f} tok/s") |
|
|
|
|
|
return sum(losses[-20:]) / 20, batch * seq / (sum(times[-20:]) / 20) |
|
|
|
|
|
|
|
|
def main(): |
|
|
print(f"Device: {DEV}") |
|
|
if torch.cuda.is_available(): |
|
|
print(f"GPU: {torch.cuda.get_device_name()}") |
|
|
|
|
|
d, h, batch, seq = 256, 8, 16, 128 |
|
|
|
|
|
configs = [ |
|
|
|
|
|
("Standard-4L", 4, "standard", 500), |
|
|
("Standard-8L", 8, "standard", 250), |
|
|
("Standard-16L", 16, "standard", 125), |
|
|
("Double-4L", 4, "double", 250), |
|
|
("Recurrent-4L", 4, "recurrent", 125), |
|
|
] |
|
|
|
|
|
results = [] |
|
|
for name, layers, mode, steps in configs: |
|
|
print(f"\n{'='*60}") |
|
|
print(f"{name}") |
|
|
print(f"{'='*60}") |
|
|
|
|
|
model = Model(d, layers, h, mode).to(DEV) |
|
|
params = model.count_params() |
|
|
print(f"Parameters: {params:,}") |
|
|
|
|
|
avg_loss, avg_toks = train(model, steps, batch, seq) |
|
|
results.append((name, avg_loss, avg_toks, params, steps)) |
|
|
del model |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
print(f"\n{'='*60}") |
|
|
print("FINAL RESULTS (roughly compute-matched)") |
|
|
print(f"{'='*60}") |
|
|
for name, loss, toks, params, steps in results: |
|
|
total_tok = steps * batch * seq |
|
|
print(f"{name:15s} | Loss {loss:.4f} | {toks:.0f} tok/s | {params/1e6:.1f}M | {total_tok/1e6:.1f}M tok trained") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|