andrew-healey commited on
Commit
48b4f15
·
verified ·
1 Parent(s): f402ab3

Upload folder using huggingface_hub

Browse files
lr6e-5_total_batch_size61440_two_masks_4_heads_seed1340/args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"hellaswag": true, "attention_kind": "selective", "log_dir": "wider_is_better_10/lr6e-5_total_batch_size61440_two_masks_4_heads_seed1340", "resume_checkpoint": null, "resume_optimizer": false, "add_a_head": false, "add_head_to_start": true, "new_head_init": "normal", "n_heads": 4, "protect_bos_token": true, "prevent_from_masking_myself": true, "max_steps": 8750, "warmup_steps": 500, "group": "wider_is_better_10", "use_wandb": true, "kill_self_after_run": false, "random_seed": 1340, "memory_penalty_epsilon": 0.1, "selection_head_linear_combo": "two_masks", "selection_head_linear_combo_scale": 1.0, "protection_kind": "none", "leaky_relu_alpha": null, "leaky_relu_bias": null, "use_compile": true, "use_mini_model": false, "upload_to_hf": true, "seq_len": 256, "batch_size": 120, "total_batch_size": 61440, "protection_head_scaling_factor": 1.0, "protection_head_bias": 0.0, "n_sliced_masks": null, "n_latent_masks": null, "mask_layernorm": false, "residual_attention_masks": false, "compute_base_shapes": false, "base_shapes_savefile": null, "mup": true, "disable_selection": false, "mup_enable_coord_check_logging": false, "max_lr": 6e-05, "decay_lr": true, "readout_zero_init": false, "query_zero_init": false, "l1_loss": false, "debugpy": false, "key": "6e-5_61440_1340", "n_embd": 256}
lr6e-5_total_batch_size61440_two_masks_4_heads_seed1340/dataloader_08749.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:953385078aa3787b69fc6857dfd48b0a2cd2f4d27c6f8892e01211aca53d07f5
3
+ size 964
lr6e-5_total_batch_size61440_two_masks_4_heads_seed1340/log2.txt ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ max_steps: 8750
2
+ 0 val loss 11.2942
3
+ 0 val perplexity 80353.6875
4
+ 0 train 11.297427 (lr=1.2000e-07) (hash(x)=164406924)
5
+ 100 val loss 9.9761
6
+ 100 val perplexity 21506.5000
7
+ 100 train 9.939522 (lr=1.2120e-05) (hash(x)=144903932)
8
+ 200 val loss 9.3909
9
+ 200 val perplexity 11979.3281
10
+ 200 train 9.460566 (lr=2.4120e-05) (hash(x)=167734596)
11
+ 300 val loss 8.2824
12
+ 300 val perplexity 3953.7627
13
+ 300 train 8.291067 (lr=3.6120e-05) (hash(x)=149619098)
14
+ 400 val loss 7.7141
15
+ 400 val perplexity 2239.8042
16
+ 400 train 7.606259 (lr=4.8120e-05) (hash(x)=153710890)
17
+ 500 val loss 7.5351
18
+ 500 val perplexity 1872.7146
19
+ 500 train 7.624778 (lr=6.0000e-05) (hash(x)=145450636)
20
+ 600 val loss 7.4382
21
+ 600 val perplexity 1699.6913
22
+ 600 train 7.297028 (lr=5.9980e-05) (hash(x)=145249251)
23
+ 700 val loss 7.3852
24
+ 700 val perplexity 1611.9633
25
+ 700 train 7.491380 (lr=5.9922e-05) (hash(x)=150475545)
26
+ 800 val loss 7.3196
27
+ 800 val perplexity 1509.6628
28
+ 800 train 7.286989 (lr=5.9824e-05) (hash(x)=144483776)
29
+ 900 val loss 7.2250
30
+ 900 val perplexity 1373.3182
31
+ 900 train 7.327229 (lr=5.9687e-05) (hash(x)=157916369)
32
+ 1000 val loss 7.1379
33
+ 1000 val perplexity 1258.7574
34
+ 1000 train 7.157605 (lr=5.9512e-05) (hash(x)=154856891)
35
+ 1100 val loss 7.0439
36
+ 1100 val perplexity 1145.8176
37
+ 1100 train 6.763566 (lr=5.9298e-05) (hash(x)=136191502)
38
+ 1200 val loss 6.9466
39
+ 1200 val perplexity 1039.6229
40
+ 1200 train 6.902252 (lr=5.9046e-05) (hash(x)=148021541)
41
+ 1300 val loss 6.8677
42
+ 1300 val perplexity 960.7319
43
+ 1300 train 6.805964 (lr=5.8757e-05) (hash(x)=146005217)
44
+ 1400 val loss 6.7880
45
+ 1400 val perplexity 887.1609
46
+ 1400 train 6.696857 (lr=5.8430e-05) (hash(x)=146019502)
47
+ 1500 val loss 6.7088
48
+ 1500 val perplexity 819.5461
49
+ 1500 train 6.648109 (lr=5.8066e-05) (hash(x)=150127281)
50
+ 1600 val loss 6.6229
51
+ 1600 val perplexity 752.1389
52
+ 1600 train 6.604319 (lr=5.7666e-05) (hash(x)=154120875)
53
+ 1700 val loss 6.5768
54
+ 1700 val perplexity 718.2361
55
+ 1700 train 6.792406 (lr=5.7230e-05) (hash(x)=155797680)
56
+ 1800 val loss 6.5006
57
+ 1800 val perplexity 665.5312
58
+ 1800 train 6.496136 (lr=5.6759e-05) (hash(x)=156809396)
59
+ 1900 val loss 6.4478
60
+ 1900 val perplexity 631.3382
61
+ 1900 train 6.333206 (lr=5.6253e-05) (hash(x)=144640294)
62
+ 2000 val loss 6.4024
63
+ 2000 val perplexity 603.3206
64
+ 2000 train 6.261527 (lr=5.5714e-05) (hash(x)=162831106)
65
+ 2100 val loss 6.3797
66
+ 2100 val perplexity 589.7534
67
+ 2100 train 6.366537 (lr=5.5142e-05) (hash(x)=158239484)
68
+ 2200 val loss 6.3242
69
+ 2200 val perplexity 557.9207
70
+ 2200 train 6.202650 (lr=5.4537e-05) (hash(x)=140504180)
71
+ 2300 val loss 6.2880
72
+ 2300 val perplexity 538.0495
73
+ 2300 train 6.279479 (lr=5.3902e-05) (hash(x)=142234024)
74
+ 2400 val loss 6.2652
75
+ 2400 val perplexity 525.9250
76
+ 2400 train 6.254510 (lr=5.3236e-05) (hash(x)=143091562)
77
+ 2500 val loss 6.2459
78
+ 2500 val perplexity 515.8730
79
+ 2500 train 6.148150 (lr=5.2541e-05) (hash(x)=149857456)
80
+ 2600 val loss 6.1995
81
+ 2600 val perplexity 492.4807
82
+ 2600 train 6.235091 (lr=5.1817e-05) (hash(x)=146191551)
83
+ 2700 val loss 6.1689
84
+ 2700 val perplexity 477.6422
85
+ 2700 train 6.081195 (lr=5.1067e-05) (hash(x)=145375752)
86
+ 2800 val loss 6.1456
87
+ 2800 val perplexity 466.6726
88
+ 2800 train 6.022756 (lr=5.0290e-05) (hash(x)=151568014)
89
+ 2900 val loss 6.1315
90
+ 2900 val perplexity 460.1299
91
+ 2900 train 5.963450 (lr=4.9487e-05) (hash(x)=149366597)
92
+ 3000 val loss 6.0982
93
+ 3000 val perplexity 445.0338
94
+ 3000 train 6.023782 (lr=4.8662e-05) (hash(x)=150464442)
95
+ 3100 val loss 6.0766
96
+ 3100 val perplexity 435.5521
97
+ 3100 train 6.197667 (lr=4.7813e-05) (hash(x)=182449036)
98
+ 3200 val loss 6.0556
99
+ 3200 val perplexity 426.4871
100
+ 3200 train 6.000808 (lr=4.6943e-05) (hash(x)=140141286)
101
+ 3300 val loss 6.0367
102
+ 3300 val perplexity 418.5168
103
+ 3300 train 6.004710 (lr=4.6052e-05) (hash(x)=148099414)
104
+ 3400 val loss 6.0157
105
+ 3400 val perplexity 409.8305
106
+ 3400 train 5.880077 (lr=4.5143e-05) (hash(x)=142633951)
107
+ 3500 val loss 6.0073
108
+ 3500 val perplexity 406.3983
109
+ 3500 train 5.863472 (lr=4.4216e-05) (hash(x)=148368965)
110
+ 3600 val loss 5.9822
111
+ 3600 val perplexity 396.3187
112
+ 3600 train 6.001503 (lr=4.3273e-05) (hash(x)=152372067)
113
+ 3700 val loss 5.9687
114
+ 3700 val perplexity 390.9832
115
+ 3700 train 6.048810 (lr=4.2315e-05) (hash(x)=168885609)
116
+ 3800 val loss 5.9530
117
+ 3800 val perplexity 384.9036
118
+ 3800 train 5.764369 (lr=4.1343e-05) (hash(x)=141633734)
119
+ 3900 val loss 5.9277
120
+ 3900 val perplexity 375.2776
121
+ 3900 train 5.821568 (lr=4.0360e-05) (hash(x)=153141007)
122
+ 4000 val loss 5.9152
123
+ 4000 val perplexity 370.6133
124
+ 4000 train 5.845716 (lr=3.9365e-05) (hash(x)=160577202)
125
+ 4100 val loss 5.9010
126
+ 4100 val perplexity 365.4027
127
+ 4100 train 5.967554 (lr=3.8362e-05) (hash(x)=153858169)
128
+ 4200 val loss 5.8805
129
+ 4200 val perplexity 357.9999
130
+ 4200 train 5.891509 (lr=3.7351e-05) (hash(x)=155889149)
131
+ 4300 val loss 5.8682
132
+ 4300 val perplexity 353.6169
133
+ 4300 train 5.679297 (lr=3.6333e-05) (hash(x)=152294662)
134
+ 4400 val loss 5.8534
135
+ 4400 val perplexity 348.4212
136
+ 4400 train 5.757539 (lr=3.5311e-05) (hash(x)=141804386)
137
+ 4500 val loss 5.8359
138
+ 4500 val perplexity 342.3681
139
+ 4500 train 5.729671 (lr=3.4285e-05) (hash(x)=151095242)
140
+ 4600 val loss 5.8401
141
+ 4600 val perplexity 343.8197
142
+ 4600 train 5.787620 (lr=3.3257e-05) (hash(x)=156414699)
143
+ 4700 val loss 5.8117
144
+ 4700 val perplexity 334.1749
145
+ 4700 train 5.832815 (lr=3.2229e-05) (hash(x)=161556686)
146
+ 4800 val loss 5.7981
147
+ 4800 val perplexity 329.6756
148
+ 4800 train 5.700453 (lr=3.1202e-05) (hash(x)=149000293)
149
+ 4900 val loss 5.7919
150
+ 4900 val perplexity 327.6476
151
+ 4900 train 5.577726 (lr=3.0178e-05) (hash(x)=154349989)
152
+ 5000 val loss 5.7760
153
+ 5000 val perplexity 322.4744
154
+ 5000 train 5.608658 (lr=2.9157e-05) (hash(x)=131475967)
155
+ 5100 val loss 5.7625
156
+ 5100 val perplexity 318.1514
157
+ 5100 train 5.672650 (lr=2.8143e-05) (hash(x)=149717902)
158
+ 5200 val loss 5.7555
159
+ 5200 val perplexity 315.9159
160
+ 5200 train 5.626917 (lr=2.7135e-05) (hash(x)=151407999)
161
+ 5300 val loss 5.7468
162
+ 5300 val perplexity 313.1746
163
+ 5300 train 5.847970 (lr=2.6136e-05) (hash(x)=168602728)
164
+ 5400 val loss 5.7322
165
+ 5400 val perplexity 308.6395
166
+ 5400 train 5.693013 (lr=2.5147e-05) (hash(x)=158344511)
167
+ 5500 val loss 5.7246
168
+ 5500 val perplexity 306.3096
169
+ 5500 train 5.687911 (lr=2.4169e-05) (hash(x)=148350057)
170
+ 5600 val loss 5.7147
171
+ 5600 val perplexity 303.3056
172
+ 5600 train 5.782882 (lr=2.3204e-05) (hash(x)=153847323)
173
+ 5700 val loss 5.7042
174
+ 5700 val perplexity 300.1135
175
+ 5700 train 5.653192 (lr=2.2253e-05) (hash(x)=156607405)
176
+ 5800 val loss 5.6977
177
+ 5800 val perplexity 298.1892
178
+ 5800 train 5.462691 (lr=2.1318e-05) (hash(x)=145115031)
179
+ 5900 val loss 5.6886
180
+ 5900 val perplexity 295.4914
181
+ 5900 train 5.568099 (lr=2.0400e-05) (hash(x)=141584622)
182
+ 6000 val loss 5.6789
183
+ 6000 val perplexity 292.6378
184
+ 6000 train 5.618869 (lr=1.9500e-05) (hash(x)=146613857)
185
+ 6100 val loss 5.6738
186
+ 6100 val perplexity 291.1289
187
+ 6100 train 5.611949 (lr=1.8620e-05) (hash(x)=144621768)
188
+ 6200 val loss 5.6681
189
+ 6200 val perplexity 289.4983
190
+ 6200 train 5.595115 (lr=1.7760e-05) (hash(x)=146521760)
191
+ 6300 val loss 5.6573
192
+ 6300 val perplexity 286.3828
193
+ 6300 train 5.487076 (lr=1.6923e-05) (hash(x)=161378136)
194
+ 6400 val loss 5.6533
195
+ 6400 val perplexity 285.2439
196
+ 6400 train 5.421412 (lr=1.6108e-05) (hash(x)=141624235)
197
+ 6500 val loss 5.6451
198
+ 6500 val perplexity 282.9144
199
+ 6500 train 5.574306 (lr=1.5319e-05) (hash(x)=151197095)
200
+ 6600 val loss 5.6427
201
+ 6600 val perplexity 282.2242
202
+ 6600 train 5.660608 (lr=1.4555e-05) (hash(x)=153269571)
203
+ 6700 val loss 5.6357
204
+ 6700 val perplexity 280.2599
205
+ 6700 train 5.606679 (lr=1.3817e-05) (hash(x)=146111181)
206
+ 6800 val loss 5.6266
207
+ 6800 val perplexity 277.7189
208
+ 6800 train 5.651731 (lr=1.3108e-05) (hash(x)=147269760)
209
+ 6900 val loss 5.6226
210
+ 6900 val perplexity 276.6003
211
+ 6900 train 5.703611 (lr=1.2427e-05) (hash(x)=152912762)
212
+ 7000 val loss 5.6196
213
+ 7000 val perplexity 275.7923
214
+ 7000 train 5.704011 (lr=1.1777e-05) (hash(x)=165412343)
215
+ 7100 val loss 5.6112
216
+ 7100 val perplexity 273.4796
217
+ 7100 train 5.678514 (lr=1.1157e-05) (hash(x)=162866028)
218
+ 7200 val loss 5.6065
219
+ 7200 val perplexity 272.2015
220
+ 7200 train 5.578472 (lr=1.0568e-05) (hash(x)=142998115)
221
+ 7300 val loss 5.6036
222
+ 7300 val perplexity 271.4095
223
+ 7300 train 5.379413 (lr=1.0012e-05) (hash(x)=145486999)
224
+ 7400 val loss 5.5974
225
+ 7400 val perplexity 269.7222
226
+ 7400 train 5.574456 (lr=9.4899e-06) (hash(x)=155325873)
227
+ 7500 val loss 5.5931
228
+ 7500 val perplexity 268.5547
229
+ 7500 train 5.561210 (lr=9.0014e-06) (hash(x)=145131256)
230
+ 7600 val loss 5.5969
231
+ 7600 val perplexity 269.5966
232
+ 7600 train 5.528515 (lr=8.5478e-06) (hash(x)=144008365)
233
+ 7700 val loss 5.5866
234
+ 7700 val perplexity 266.8315
235
+ 7700 train 5.695362 (lr=8.1297e-06) (hash(x)=148848532)
236
+ 7800 val loss 5.5830
237
+ 7800 val perplexity 265.8582
238
+ 7800 train 5.579540 (lr=7.7476e-06) (hash(x)=150391642)
239
+ 7900 val loss 5.5829
240
+ 7900 val perplexity 265.8516
241
+ 7900 train 5.344047 (lr=7.4021e-06) (hash(x)=152191414)
242
+ 8000 val loss 5.5776
243
+ 8000 val perplexity 264.4469
244
+ 8000 train 5.651704 (lr=7.0937e-06) (hash(x)=159755587)
245
+ 8100 val loss 5.5752
246
+ 8100 val perplexity 263.8116
247
+ 8100 train 5.593820 (lr=6.8229e-06) (hash(x)=156664468)
248
+ 8200 val loss 5.5735
249
+ 8200 val perplexity 263.3431
250
+ 8200 train 5.422441 (lr=6.5900e-06) (hash(x)=139457379)
251
+ 8300 val loss 5.5768
252
+ 8300 val perplexity 264.2146
253
+ 8300 train 5.350660 (lr=6.3954e-06) (hash(x)=145478564)
254
+ 8400 val loss 5.5674
255
+ 8400 val perplexity 261.7447
256
+ 8400 train 5.501937 (lr=6.2395e-06) (hash(x)=154982769)
257
+ 8500 val loss 5.5652
258
+ 8500 val perplexity 261.1683
259
+ 8500 train 5.534875 (lr=6.1223e-06) (hash(x)=145798118)
260
+ 8600 val loss 5.5648
261
+ 8600 val perplexity 261.0634
262
+ 8600 train 5.474912 (lr=6.0440e-06) (hash(x)=143231551)
263
+ 8700 val loss 5.5680
264
+ 8700 val perplexity 261.9062
265
+ 8700 train 5.461046 (lr=6.0049e-06) (hash(x)=154780112)
266
+ 8749 val loss 5.5602
267
+ 8749 val perplexity 259.8715
lr6e-5_total_batch_size61440_two_masks_4_heads_seed1340/model_08749.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e79661bcd01c1c7336c11968faba15d84316cbcac8e549bb072974caf7cfeb32
3
+ size 97580418
lr6e-5_total_batch_size61440_two_masks_4_heads_seed1340/optimizer_08749.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b028b0da0519dfdba5101885e98238633048a3639f19bab29b8b90c0fce1dec
3
+ size 188880262