OpenTransformer commited on
Commit
764896d
·
verified ·
1 Parent(s): 04806b0

Add experiments/final_showdown.py

Browse files
Files changed (1) hide show
  1. experiments/final_showdown.py +172 -0
experiments/final_showdown.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ FINAL SHOWDOWN: Standard depth vs Ultra-heavy mechanisms
4
+ Question: At equal compute budget, does any heavy approach beat just adding layers?
5
+ """
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ import time
11
+ import math
12
+
13
+ DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
+ torch.backends.cuda.matmul.allow_tf32 = True
15
+ VOCAB = 128256
16
+
17
+ def alibi_bias(n_heads, n_tokens):
18
+ def slopes(n):
19
+ start = 2 ** (-2 ** -(math.log2(n) - 3))
20
+ return [start * (start ** i) for i in range(n)]
21
+ s = slopes(n_heads) if math.log2(n_heads).is_integer() else slopes(2 ** math.floor(math.log2(n_heads)))[:n_heads]
22
+ s = torch.tensor(s, device=DEV).view(1, n_heads, 1, 1)
23
+ i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
24
+ j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
25
+ return -s * (j - i).clamp_min(0).float()
26
+
27
+ def causal_mask(n):
28
+ return torch.triu(torch.full((1, 1, n, n), float("-inf"), device=DEV), 1)
29
+
30
+
31
+ class StandardAttn(nn.Module):
32
+ def __init__(self, d, h):
33
+ super().__init__()
34
+ self.h, self.dk = h, d // h
35
+ self.qkv = nn.Linear(d, 3*d, bias=False)
36
+ self.proj = nn.Linear(d, d, bias=False)
37
+
38
+ def forward(self, x, mask=None):
39
+ B, N, _ = x.shape
40
+ qkv = self.qkv(x).reshape(B, N, 3, self.h, self.dk).permute(2, 0, 3, 1, 4)
41
+ q, k, v = qkv[0], qkv[1], qkv[2]
42
+ att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk) + alibi_bias(self.h, N)
43
+ if mask is not None: att = att + mask
44
+ return self.proj((att.softmax(-1) @ v).transpose(1, 2).reshape(B, N, -1))
45
+
46
+
47
+ class DoubleAttn(nn.Module):
48
+ """Simplest heavy: two sequential attention ops"""
49
+ def __init__(self, d, h):
50
+ super().__init__()
51
+ self.attn1 = StandardAttn(d, h)
52
+ self.attn2 = StandardAttn(d, h)
53
+ self.gate = nn.Linear(d * 2, d)
54
+
55
+ def forward(self, x, mask=None):
56
+ o1 = self.attn1(x, mask)
57
+ o2 = self.attn2(x + o1, mask)
58
+ return self.gate(torch.cat([o1, o2], dim=-1))
59
+
60
+
61
+ class RecurrentAttn(nn.Module):
62
+ """Same attention applied k times"""
63
+ def __init__(self, d, h, k=4):
64
+ super().__init__()
65
+ self.attn = StandardAttn(d, h)
66
+ self.depth_emb = nn.Embedding(k, d)
67
+ self.k = k
68
+
69
+ def forward(self, x, mask=None):
70
+ for i in range(self.k):
71
+ x = x + self.attn(x + self.depth_emb.weight[i], mask)
72
+ return x
73
+
74
+
75
+ class Block(nn.Module):
76
+ def __init__(self, d, h, mode="standard"):
77
+ super().__init__()
78
+ self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
79
+ if mode == "standard":
80
+ self.attn = StandardAttn(d, h)
81
+ elif mode == "double":
82
+ self.attn = DoubleAttn(d, h)
83
+ elif mode == "recurrent":
84
+ self.attn = RecurrentAttn(d, h, k=4)
85
+ self.ff = nn.Sequential(nn.Linear(d, 4*d), nn.GELU(), nn.Linear(4*d, d))
86
+
87
+ def forward(self, x, mask=None):
88
+ x = x + self.attn(self.ln1(x), mask)
89
+ return x + self.ff(self.ln2(x))
90
+
91
+
92
+ class Model(nn.Module):
93
+ def __init__(self, d, layers, h, mode="standard"):
94
+ super().__init__()
95
+ self.emb = nn.Embedding(VOCAB, d)
96
+ self.blocks = nn.ModuleList([Block(d, h, mode) for _ in range(layers)])
97
+ self.ln = nn.LayerNorm(d)
98
+ self.head = nn.Linear(d, VOCAB, bias=False)
99
+ self.head.weight = self.emb.weight
100
+
101
+ def forward(self, x, mask=None):
102
+ x = self.emb(x)
103
+ for b in self.blocks: x = b(x, mask)
104
+ return self.head(self.ln(x))
105
+
106
+ def count_params(self):
107
+ return sum(p.numel() for p in self.parameters())
108
+
109
+
110
+ def train(model, steps, batch, seq):
111
+ opt = torch.optim.AdamW(model.parameters(), lr=1e-4)
112
+ mask = causal_mask(seq - 1)
113
+ losses, times = [], []
114
+
115
+ for step in range(steps):
116
+ ids = torch.randint(0, VOCAB, (batch, seq), device=DEV)
117
+ start = time.time()
118
+ opt.zero_grad()
119
+ loss = F.cross_entropy(model(ids[:, :-1], mask).view(-1, VOCAB), ids[:, 1:].reshape(-1))
120
+ loss.backward()
121
+ opt.step()
122
+ times.append(time.time() - start)
123
+ losses.append(loss.item())
124
+
125
+ if step % 50 == 0 or step == steps - 1:
126
+ tok_s = batch * seq / times[-1]
127
+ print(f"Step {step:3d} | Loss {loss.item():.4f} | {tok_s:.0f} tok/s")
128
+
129
+ return sum(losses[-20:]) / 20, batch * seq / (sum(times[-20:]) / 20)
130
+
131
+
132
+ def main():
133
+ print(f"Device: {DEV}")
134
+ if torch.cuda.is_available():
135
+ print(f"GPU: {torch.cuda.get_device_name()}")
136
+
137
+ d, h, batch, seq = 256, 8, 16, 128
138
+
139
+ configs = [
140
+ # (name, layers, mode, target_steps)
141
+ ("Standard-4L", 4, "standard", 500),
142
+ ("Standard-8L", 8, "standard", 250), # ~2x slower, so half steps
143
+ ("Standard-16L", 16, "standard", 125), # ~4x slower
144
+ ("Double-4L", 4, "double", 250), # ~2x slower
145
+ ("Recurrent-4L", 4, "recurrent", 125), # ~4x slower (k=4 iterations)
146
+ ]
147
+
148
+ results = []
149
+ for name, layers, mode, steps in configs:
150
+ print(f"\n{'='*60}")
151
+ print(f"{name}")
152
+ print(f"{'='*60}")
153
+
154
+ model = Model(d, layers, h, mode).to(DEV)
155
+ params = model.count_params()
156
+ print(f"Parameters: {params:,}")
157
+
158
+ avg_loss, avg_toks = train(model, steps, batch, seq)
159
+ results.append((name, avg_loss, avg_toks, params, steps))
160
+ del model
161
+ torch.cuda.empty_cache()
162
+
163
+ print(f"\n{'='*60}")
164
+ print("FINAL RESULTS (roughly compute-matched)")
165
+ print(f"{'='*60}")
166
+ for name, loss, toks, params, steps in results:
167
+ total_tok = steps * batch * seq
168
+ print(f"{name:15s} | Loss {loss:.4f} | {toks:.0f} tok/s | {params/1e6:.1f}M | {total_tok/1e6:.1f}M tok trained")
169
+
170
+
171
+ if __name__ == "__main__":
172
+ main()