andrew-healey commited on
Commit
42b6520
·
verified ·
1 Parent(s): 1e29a0c

Upload folder using huggingface_hub

Browse files
lr6e-5_total_batch_size61440_two_masks_4_heads_seed1338/args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"hellaswag": true, "attention_kind": "selective", "log_dir": "wider_is_better_10/lr6e-5_total_batch_size61440_two_masks_4_heads_seed1338", "resume_checkpoint": null, "resume_optimizer": false, "add_a_head": false, "add_head_to_start": true, "new_head_init": "normal", "n_heads": 4, "protect_bos_token": true, "prevent_from_masking_myself": true, "max_steps": 8750, "warmup_steps": 500, "group": "wider_is_better_10", "use_wandb": true, "kill_self_after_run": false, "random_seed": 1338, "memory_penalty_epsilon": 0.1, "selection_head_linear_combo": "two_masks", "selection_head_linear_combo_scale": 1.0, "protection_kind": "none", "leaky_relu_alpha": null, "leaky_relu_bias": null, "use_compile": true, "use_mini_model": false, "upload_to_hf": true, "seq_len": 256, "batch_size": 120, "total_batch_size": 61440, "protection_head_scaling_factor": 1.0, "protection_head_bias": 0.0, "n_sliced_masks": null, "n_latent_masks": null, "mask_layernorm": false, "residual_attention_masks": false, "compute_base_shapes": false, "base_shapes_savefile": null, "mup": true, "disable_selection": false, "mup_enable_coord_check_logging": false, "max_lr": 6e-05, "decay_lr": true, "readout_zero_init": false, "query_zero_init": false, "l1_loss": false, "debugpy": false, "key": "6e-5_61440_1338", "n_embd": 256}
lr6e-5_total_batch_size61440_two_masks_4_heads_seed1338/dataloader_08749.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:953385078aa3787b69fc6857dfd48b0a2cd2f4d27c6f8892e01211aca53d07f5
3
+ size 964
lr6e-5_total_batch_size61440_two_masks_4_heads_seed1338/log2.txt ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ max_steps: 8750
2
+ 0 val loss 11.3155
3
+ 0 val perplexity 82083.8438
4
+ 0 train 11.313581 (lr=1.2000e-07) (hash(x)=150327452)
5
+ 100 val loss 9.8943
6
+ 100 val perplexity 19816.7793
7
+ 100 train 9.923759 (lr=1.2120e-05) (hash(x)=166780046)
8
+ 200 val loss 9.1633
9
+ 200 val perplexity 9540.9580
10
+ 200 train 9.125214 (lr=2.4120e-05) (hash(x)=155040610)
11
+ 300 val loss 8.1136
12
+ 300 val perplexity 3339.5718
13
+ 300 train 8.126803 (lr=3.6120e-05) (hash(x)=155504036)
14
+ 400 val loss 7.6362
15
+ 400 val perplexity 2071.9368
16
+ 400 train 7.502426 (lr=4.8120e-05) (hash(x)=143823248)
17
+ 500 val loss 7.4650
18
+ 500 val perplexity 1745.8350
19
+ 500 train 7.434222 (lr=6.0000e-05) (hash(x)=143734685)
20
+ 600 val loss 7.3870
21
+ 600 val perplexity 1614.8867
22
+ 600 train 7.167676 (lr=5.9980e-05) (hash(x)=150678249)
23
+ 700 val loss 7.2987
24
+ 700 val perplexity 1478.3378
25
+ 700 train 7.449089 (lr=5.9922e-05) (hash(x)=175802021)
26
+ 800 val loss 7.1951
27
+ 800 val perplexity 1332.9005
28
+ 800 train 7.203172 (lr=5.9824e-05) (hash(x)=158681215)
29
+ 900 val loss 7.1224
30
+ 900 val perplexity 1239.4111
31
+ 900 train 7.015157 (lr=5.9687e-05) (hash(x)=146108145)
32
+ 1000 val loss 7.0574
33
+ 1000 val perplexity 1161.4169
34
+ 1000 train 6.892931 (lr=5.9512e-05) (hash(x)=154996086)
35
+ 1100 val loss 6.9621
36
+ 1100 val perplexity 1055.8712
37
+ 1100 train 6.948694 (lr=5.9298e-05) (hash(x)=153885445)
38
+ 1200 val loss 6.8740
39
+ 1200 val perplexity 966.7667
40
+ 1200 train 6.824249 (lr=5.9046e-05) (hash(x)=142353087)
41
+ 1300 val loss 6.8015
42
+ 1300 val perplexity 899.2297
43
+ 1300 train 6.710301 (lr=5.8757e-05) (hash(x)=150750353)
44
+ 1400 val loss 6.7459
45
+ 1400 val perplexity 850.5912
46
+ 1400 train 6.565169 (lr=5.8430e-05) (hash(x)=152767913)
47
+ 1500 val loss 6.6809
48
+ 1500 val perplexity 797.0415
49
+ 1500 train 6.727425 (lr=5.8066e-05) (hash(x)=151562048)
50
+ 1600 val loss 6.6131
51
+ 1600 val perplexity 744.7812
52
+ 1600 train 6.613660 (lr=5.7666e-05) (hash(x)=166486165)
53
+ 1700 val loss 6.5799
54
+ 1700 val perplexity 720.4713
55
+ 1700 train 6.228736 (lr=5.7230e-05) (hash(x)=130835396)
56
+ 1800 val loss 6.5318
57
+ 1800 val perplexity 686.6319
58
+ 1800 train 6.581749 (lr=5.6759e-05) (hash(x)=158851816)
59
+ 1900 val loss 6.5106
60
+ 1900 val perplexity 672.2050
61
+ 1900 train 6.540253 (lr=5.6253e-05) (hash(x)=153313879)
62
+ 2000 val loss 6.4616
63
+ 2000 val perplexity 640.0931
64
+ 2000 train 6.391050 (lr=5.5714e-05) (hash(x)=158245023)
65
+ 2100 val loss 6.4348
66
+ 2100 val perplexity 623.1788
67
+ 2100 train 6.255421 (lr=5.5142e-05) (hash(x)=157204896)
68
+ 2200 val loss 6.4115
69
+ 2200 val perplexity 608.7783
70
+ 2200 train 6.252340 (lr=5.4537e-05) (hash(x)=137541932)
71
+ 2300 val loss 6.3842
72
+ 2300 val perplexity 592.4067
73
+ 2300 train 6.332124 (lr=5.3902e-05) (hash(x)=150149692)
74
+ 2400 val loss 6.3455
75
+ 2400 val perplexity 569.9310
76
+ 2400 train 6.329048 (lr=5.3236e-05) (hash(x)=151730720)
77
+ 2500 val loss 6.3234
78
+ 2500 val perplexity 557.4811
79
+ 2500 train 6.090756 (lr=5.2541e-05) (hash(x)=143406752)
80
+ 2600 val loss 6.3084
81
+ 2600 val perplexity 549.1819
82
+ 2600 train 6.137538 (lr=5.1817e-05) (hash(x)=157272496)
83
+ 2700 val loss 6.2786
84
+ 2700 val perplexity 533.0575
85
+ 2700 train 6.274469 (lr=5.1067e-05) (hash(x)=155342327)
86
+ 2800 val loss 6.2478
87
+ 2800 val perplexity 516.8675
88
+ 2800 train 6.124352 (lr=5.0290e-05) (hash(x)=140626679)
89
+ 2900 val loss 6.2305
90
+ 2900 val perplexity 508.0204
91
+ 2900 train 6.087295 (lr=4.9487e-05) (hash(x)=144953350)
92
+ 3000 val loss 6.2050
93
+ 3000 val perplexity 495.2052
94
+ 3000 train 6.107125 (lr=4.8662e-05) (hash(x)=172449837)
95
+ 3100 val loss 6.1973
96
+ 3100 val perplexity 491.4176
97
+ 3100 train 5.989651 (lr=4.7813e-05) (hash(x)=141710086)
98
+ 3200 val loss 6.1752
99
+ 3200 val perplexity 480.7009
100
+ 3200 train 6.113164 (lr=4.6943e-05) (hash(x)=151299772)
101
+ 3300 val loss 6.1596
102
+ 3300 val perplexity 473.2204
103
+ 3300 train 6.029521 (lr=4.6052e-05) (hash(x)=146473110)
104
+ 3400 val loss 6.1342
105
+ 3400 val perplexity 461.3510
106
+ 3400 train 6.217374 (lr=4.5143e-05) (hash(x)=153954157)
107
+ 3500 val loss 6.1197
108
+ 3500 val perplexity 454.7293
109
+ 3500 train 6.058451 (lr=4.4216e-05) (hash(x)=153717336)
110
+ 3600 val loss 6.1032
111
+ 3600 val perplexity 447.2650
112
+ 3600 train 5.858186 (lr=4.3273e-05) (hash(x)=144965161)
113
+ 3700 val loss 6.0928
114
+ 3700 val perplexity 442.6552
115
+ 3700 train 5.859298 (lr=4.2315e-05) (hash(x)=125969741)
116
+ 3800 val loss 6.0644
117
+ 3800 val perplexity 430.2494
118
+ 3800 train 5.926513 (lr=4.1343e-05) (hash(x)=155070487)
119
+ 3900 val loss 6.0440
120
+ 3900 val perplexity 421.5794
121
+ 3900 train 5.920467 (lr=4.0360e-05) (hash(x)=149444644)
122
+ 4000 val loss 6.0355
123
+ 4000 val perplexity 417.9910
124
+ 4000 train 5.892637 (lr=3.9365e-05) (hash(x)=151663033)
125
+ 4100 val loss 6.0243
126
+ 4100 val perplexity 413.3414
127
+ 4100 train 5.985642 (lr=3.8362e-05) (hash(x)=143688282)
128
+ 4200 val loss 6.0062
129
+ 4200 val perplexity 405.9438
130
+ 4200 train 6.013908 (lr=3.7351e-05) (hash(x)=163361651)
131
+ 4300 val loss 5.9875
132
+ 4300 val perplexity 398.4011
133
+ 4300 train 6.001385 (lr=3.6333e-05) (hash(x)=153619361)
134
+ 4400 val loss 5.9846
135
+ 4400 val perplexity 397.2614
136
+ 4400 train 6.290046 (lr=3.5311e-05) (hash(x)=168527064)
137
+ 4500 val loss 5.9729
138
+ 4500 val perplexity 392.6522
139
+ 4500 train 5.785292 (lr=3.4285e-05) (hash(x)=125588037)
140
+ 4600 val loss 5.9511
141
+ 4600 val perplexity 384.1663
142
+ 4600 train 5.918339 (lr=3.3257e-05) (hash(x)=143710941)
143
+ 4700 val loss 5.9347
144
+ 4700 val perplexity 377.9097
145
+ 4700 train 5.771615 (lr=3.2229e-05) (hash(x)=150952742)
146
+ 4800 val loss 5.9269
147
+ 4800 val perplexity 374.9784
148
+ 4800 train 5.801284 (lr=3.1202e-05) (hash(x)=145323659)
149
+ 4900 val loss 5.9191
150
+ 4900 val perplexity 372.0759
151
+ 4900 train 5.940371 (lr=3.0178e-05) (hash(x)=153151397)
152
+ 5000 val loss 5.9091
153
+ 5000 val perplexity 368.3875
154
+ 5000 train 5.809377 (lr=2.9157e-05) (hash(x)=143182059)
155
+ 5100 val loss 5.8978
156
+ 5100 val perplexity 364.2274
157
+ 5100 train 5.934733 (lr=2.8143e-05) (hash(x)=170083586)
158
+ 5200 val loss 5.8821
159
+ 5200 val perplexity 358.5504
160
+ 5200 train 5.769038 (lr=2.7135e-05) (hash(x)=149363919)
161
+ 5300 val loss 5.8766
162
+ 5300 val perplexity 356.5976
163
+ 5300 train 5.761035 (lr=2.6136e-05) (hash(x)=152033784)
164
+ 5400 val loss 5.8627
165
+ 5400 val perplexity 351.6638
166
+ 5400 train 5.975939 (lr=2.5147e-05) (hash(x)=154614289)
167
+ 5500 val loss 5.8550
168
+ 5500 val perplexity 348.9902
169
+ 5500 train 5.947636 (lr=2.4169e-05) (hash(x)=157745174)
170
+ 5600 val loss 5.8481
171
+ 5600 val perplexity 346.5829
172
+ 5600 train 5.743173 (lr=2.3204e-05) (hash(x)=147693222)
173
+ 5700 val loss 5.8368
174
+ 5700 val perplexity 342.6678
175
+ 5700 train 5.774377 (lr=2.2253e-05) (hash(x)=149784627)
176
+ 5800 val loss 5.8311
177
+ 5800 val perplexity 340.7170
178
+ 5800 train 5.749272 (lr=2.1318e-05) (hash(x)=158620729)
179
+ 5900 val loss 5.8224
180
+ 5900 val perplexity 337.7950
181
+ 5900 train 5.770716 (lr=2.0400e-05) (hash(x)=159763910)
182
+ 6000 val loss 5.8117
183
+ 6000 val perplexity 334.1950
184
+ 6000 train 5.743438 (lr=1.9500e-05) (hash(x)=147640561)
185
+ 6100 val loss 5.8006
186
+ 6100 val perplexity 330.5019
187
+ 6100 train 5.785187 (lr=1.8620e-05) (hash(x)=156613394)
188
+ 6200 val loss 5.7981
189
+ 6200 val perplexity 329.6873
190
+ 6200 train 5.720030 (lr=1.7760e-05) (hash(x)=186221290)
191
+ 6300 val loss 5.7838
192
+ 6300 val perplexity 324.9813
193
+ 6300 train 5.670815 (lr=1.6923e-05) (hash(x)=152081419)
194
+ 6400 val loss 5.7813
195
+ 6400 val perplexity 324.1779
196
+ 6400 train 5.729396 (lr=1.6108e-05) (hash(x)=154808349)
197
+ 6500 val loss 5.7745
198
+ 6500 val perplexity 321.9752
199
+ 6500 train 5.743915 (lr=1.5319e-05) (hash(x)=159437208)
200
+ 6600 val loss 5.7701
201
+ 6600 val perplexity 320.5807
202
+ 6600 train 5.588336 (lr=1.4555e-05) (hash(x)=157933074)
203
+ 6700 val loss 5.7627
204
+ 6700 val perplexity 318.2012
205
+ 6700 train 5.745686 (lr=1.3817e-05) (hash(x)=161560240)
206
+ 6800 val loss 5.7586
207
+ 6800 val perplexity 316.8917
208
+ 6800 train 5.722718 (lr=1.3108e-05) (hash(x)=155424292)
209
+ 6900 val loss 5.7547
210
+ 6900 val perplexity 315.6748
211
+ 6900 train 5.754129 (lr=1.2427e-05) (hash(x)=148561470)
212
+ 7000 val loss 5.7459
213
+ 7000 val perplexity 312.8990
214
+ 7000 train 5.641295 (lr=1.1777e-05) (hash(x)=141527450)
215
+ 7100 val loss 5.7414
216
+ 7100 val perplexity 311.4909
217
+ 7100 train 5.693920 (lr=1.1157e-05) (hash(x)=151066339)
218
+ 7200 val loss 5.7406
219
+ 7200 val perplexity 311.2593
220
+ 7200 train 5.537292 (lr=1.0568e-05) (hash(x)=155231264)
221
+ 7300 val loss 5.7331
222
+ 7300 val perplexity 308.9291
223
+ 7300 train 5.752053 (lr=1.0012e-05) (hash(x)=150281149)
224
+ 7400 val loss 5.7312
225
+ 7400 val perplexity 308.3266
226
+ 7400 train 5.618239 (lr=9.4899e-06) (hash(x)=148421717)
227
+ 7500 val loss 5.7285
228
+ 7500 val perplexity 307.5095
229
+ 7500 train 5.488880 (lr=9.0014e-06) (hash(x)=146921118)
230
+ 7600 val loss 5.7212
231
+ 7600 val perplexity 305.2754
232
+ 7600 train 5.772123 (lr=8.5478e-06) (hash(x)=150660048)
233
+ 7700 val loss 5.7191
234
+ 7700 val perplexity 304.6212
235
+ 7700 train 5.592422 (lr=8.1297e-06) (hash(x)=148059852)
236
+ 7800 val loss 5.7175
237
+ 7800 val perplexity 304.1521
238
+ 7800 train 5.411452 (lr=7.7476e-06) (hash(x)=148331002)
239
+ 7900 val loss 5.7109
240
+ 7900 val perplexity 302.1580
241
+ 7900 train 5.659790 (lr=7.4021e-06) (hash(x)=164923883)
242
+ 8000 val loss 5.7092
243
+ 8000 val perplexity 301.6437
244
+ 8000 train 5.596012 (lr=7.0937e-06) (hash(x)=143545384)
245
+ 8100 val loss 5.7078
246
+ 8100 val perplexity 301.2062
247
+ 8100 train 5.524901 (lr=6.8229e-06) (hash(x)=160686959)
248
+ 8200 val loss 5.7054
249
+ 8200 val perplexity 300.4735
250
+ 8200 train 5.659021 (lr=6.5900e-06) (hash(x)=156501889)
251
+ 8300 val loss 5.7030
252
+ 8300 val perplexity 299.7516
253
+ 8300 train 5.545584 (lr=6.3954e-06) (hash(x)=142716875)
254
+ 8400 val loss 5.6981
255
+ 8400 val perplexity 298.3052
256
+ 8400 train 5.614969 (lr=6.2395e-06) (hash(x)=154436684)
257
+ 8500 val loss 5.6964
258
+ 8500 val perplexity 297.7805
259
+ 8500 train 5.773241 (lr=6.1223e-06) (hash(x)=147965839)
260
+ 8600 val loss 5.6947
261
+ 8600 val perplexity 297.2746
262
+ 8600 train 5.489063 (lr=6.0440e-06) (hash(x)=145228097)
263
+ 8700 val loss 5.6898
264
+ 8700 val perplexity 295.8317
265
+ 8700 train 5.878402 (lr=6.0049e-06) (hash(x)=152910357)
266
+ 8749 val loss 5.6898
267
+ 8749 val perplexity 295.8426
lr6e-5_total_batch_size61440_two_masks_4_heads_seed1338/model_08749.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e68f20a6a7aab2efa3b5adabc260663fbf3a6b55cce699a66ddb94407d856ae4
3
+ size 97580418
lr6e-5_total_batch_size61440_two_masks_4_heads_seed1338/optimizer_08749.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d66294ca3e794a53648bc1c81beabc6aac9b05a3cd4069ae99abf3c38a3c4456
3
+ size 188880262