File size: 5,833 Bytes
764896d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | #!/usr/bin/env python3
"""
FINAL SHOWDOWN: Standard depth vs Ultra-heavy mechanisms
Question: At equal compute budget, does any heavy approach beat just adding layers?
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import math
DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.backends.cuda.matmul.allow_tf32 = True
VOCAB = 128256
def alibi_bias(n_heads, n_tokens):
def slopes(n):
start = 2 ** (-2 ** -(math.log2(n) - 3))
return [start * (start ** i) for i in range(n)]
s = slopes(n_heads) if math.log2(n_heads).is_integer() else slopes(2 ** math.floor(math.log2(n_heads)))[:n_heads]
s = torch.tensor(s, device=DEV).view(1, n_heads, 1, 1)
i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
return -s * (j - i).clamp_min(0).float()
def causal_mask(n):
return torch.triu(torch.full((1, 1, n, n), float("-inf"), device=DEV), 1)
class StandardAttn(nn.Module):
def __init__(self, d, h):
super().__init__()
self.h, self.dk = h, d // h
self.qkv = nn.Linear(d, 3*d, bias=False)
self.proj = nn.Linear(d, d, bias=False)
def forward(self, x, mask=None):
B, N, _ = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.h, self.dk).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk) + alibi_bias(self.h, N)
if mask is not None: att = att + mask
return self.proj((att.softmax(-1) @ v).transpose(1, 2).reshape(B, N, -1))
class DoubleAttn(nn.Module):
"""Simplest heavy: two sequential attention ops"""
def __init__(self, d, h):
super().__init__()
self.attn1 = StandardAttn(d, h)
self.attn2 = StandardAttn(d, h)
self.gate = nn.Linear(d * 2, d)
def forward(self, x, mask=None):
o1 = self.attn1(x, mask)
o2 = self.attn2(x + o1, mask)
return self.gate(torch.cat([o1, o2], dim=-1))
class RecurrentAttn(nn.Module):
"""Same attention applied k times"""
def __init__(self, d, h, k=4):
super().__init__()
self.attn = StandardAttn(d, h)
self.depth_emb = nn.Embedding(k, d)
self.k = k
def forward(self, x, mask=None):
for i in range(self.k):
x = x + self.attn(x + self.depth_emb.weight[i], mask)
return x
class Block(nn.Module):
def __init__(self, d, h, mode="standard"):
super().__init__()
self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
if mode == "standard":
self.attn = StandardAttn(d, h)
elif mode == "double":
self.attn = DoubleAttn(d, h)
elif mode == "recurrent":
self.attn = RecurrentAttn(d, h, k=4)
self.ff = nn.Sequential(nn.Linear(d, 4*d), nn.GELU(), nn.Linear(4*d, d))
def forward(self, x, mask=None):
x = x + self.attn(self.ln1(x), mask)
return x + self.ff(self.ln2(x))
class Model(nn.Module):
def __init__(self, d, layers, h, mode="standard"):
super().__init__()
self.emb = nn.Embedding(VOCAB, d)
self.blocks = nn.ModuleList([Block(d, h, mode) for _ in range(layers)])
self.ln = nn.LayerNorm(d)
self.head = nn.Linear(d, VOCAB, bias=False)
self.head.weight = self.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
for b in self.blocks: x = b(x, mask)
return self.head(self.ln(x))
def count_params(self):
return sum(p.numel() for p in self.parameters())
def train(model, steps, batch, seq):
opt = torch.optim.AdamW(model.parameters(), lr=1e-4)
mask = causal_mask(seq - 1)
losses, times = [], []
for step in range(steps):
ids = torch.randint(0, VOCAB, (batch, seq), device=DEV)
start = time.time()
opt.zero_grad()
loss = F.cross_entropy(model(ids[:, :-1], mask).view(-1, VOCAB), ids[:, 1:].reshape(-1))
loss.backward()
opt.step()
times.append(time.time() - start)
losses.append(loss.item())
if step % 50 == 0 or step == steps - 1:
tok_s = batch * seq / times[-1]
print(f"Step {step:3d} | Loss {loss.item():.4f} | {tok_s:.0f} tok/s")
return sum(losses[-20:]) / 20, batch * seq / (sum(times[-20:]) / 20)
def main():
print(f"Device: {DEV}")
if torch.cuda.is_available():
print(f"GPU: {torch.cuda.get_device_name()}")
d, h, batch, seq = 256, 8, 16, 128
configs = [
# (name, layers, mode, target_steps)
("Standard-4L", 4, "standard", 500),
("Standard-8L", 8, "standard", 250), # ~2x slower, so half steps
("Standard-16L", 16, "standard", 125), # ~4x slower
("Double-4L", 4, "double", 250), # ~2x slower
("Recurrent-4L", 4, "recurrent", 125), # ~4x slower (k=4 iterations)
]
results = []
for name, layers, mode, steps in configs:
print(f"\n{'='*60}")
print(f"{name}")
print(f"{'='*60}")
model = Model(d, layers, h, mode).to(DEV)
params = model.count_params()
print(f"Parameters: {params:,}")
avg_loss, avg_toks = train(model, steps, batch, seq)
results.append((name, avg_loss, avg_toks, params, steps))
del model
torch.cuda.empty_cache()
print(f"\n{'='*60}")
print("FINAL RESULTS (roughly compute-matched)")
print(f"{'='*60}")
for name, loss, toks, params, steps in results:
total_tok = steps * batch * seq
print(f"{name:15s} | Loss {loss:.4f} | {toks:.0f} tok/s | {params/1e6:.1f}M | {total_tok/1e6:.1f}M tok trained")
if __name__ == "__main__":
main()
|