File size: 8,680 Bytes
ccbeb82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.001588751638400127,
  "global_step": 40,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0,
      "learning_rate": 1.984126984126984e-06,
      "loss": 10.5689,
      "theoretical_loss": 17.59466794495971,
      "tokens_seen": 131072
    },
    {
      "epoch": 0.0,
      "learning_rate": 3.968253968253968e-06,
      "loss": 10.5816,
      "theoretical_loss": 14.920783596619636,
      "tokens_seen": 262144
    },
    {
      "epoch": 0.0,
      "learning_rate": 5.9523809523809525e-06,
      "loss": 10.4187,
      "theoretical_loss": 13.581028313181289,
      "tokens_seen": 393216
    },
    {
      "epoch": 0.0,
      "learning_rate": 7.936507936507936e-06,
      "loss": 10.1647,
      "theoretical_loss": 12.71859646611439,
      "tokens_seen": 524288
    },
    {
      "epoch": 0.0,
      "learning_rate": 9.92063492063492e-06,
      "loss": 9.8414,
      "theoretical_loss": 12.095879447666144,
      "tokens_seen": 655360
    },
    {
      "epoch": 0.0,
      "learning_rate": 1.1904761904761905e-05,
      "loss": 9.5154,
      "theoretical_loss": 11.615186049337796,
      "tokens_seen": 786432
    },
    {
      "epoch": 0.0,
      "learning_rate": 1.3888888888888888e-05,
      "loss": 9.4061,
      "theoretical_loss": 11.227478542742938,
      "tokens_seen": 917504
    },
    {
      "epoch": 0.0,
      "learning_rate": 1.5873015873015872e-05,
      "loss": 9.3243,
      "theoretical_loss": 10.904894927088016,
      "tokens_seen": 1048576
    },
    {
      "epoch": 0.0,
      "learning_rate": 1.7857142857142855e-05,
      "loss": 9.0303,
      "theoretical_loss": 10.630196716861345,
      "tokens_seen": 1179648
    },
    {
      "epoch": 0.0,
      "learning_rate": 1.984126984126984e-05,
      "loss": 8.9144,
      "theoretical_loss": 10.392030784394397,
      "tokens_seen": 1310720
    },
    {
      "epoch": 0.0,
      "learning_rate": 2.1825396825396824e-05,
      "loss": 8.7927,
      "theoretical_loss": 10.182553393901085,
      "tokens_seen": 1441792
    },
    {
      "epoch": 0.0,
      "learning_rate": 2.380952380952381e-05,
      "loss": 8.7473,
      "theoretical_loss": 9.996136019471344,
      "tokens_seen": 1572864
    },
    {
      "epoch": 0.0,
      "objective/train/docs_used": 831,
      "objective/train/instantaneous_batch_size": 16,
      "objective/train/instantaneous_microbatch_size": 16384,
      "objective/train/original_loss": 8.822301864624023,
      "objective/train/theoretical_loss": 9.910229967024176,
      "objective/train/tokens_used": -18841600,
      "theoretical_loss": 9.910229967024176,
      "tokens_seen": 1638400
    },
    {
      "epoch": 0.0,
      "learning_rate": 2.5793650793650793e-05,
      "loss": 8.8575,
      "theoretical_loss": 9.828613432171625,
      "tokens_seen": 1703936
    },
    {
      "epoch": 0.0,
      "learning_rate": 2.7777777777777776e-05,
      "loss": 8.6865,
      "theoretical_loss": 9.676823599712613,
      "tokens_seen": 1835008
    },
    {
      "epoch": 0.0,
      "learning_rate": 2.9761904761904762e-05,
      "loss": 8.6022,
      "theoretical_loss": 9.538313887395919,
      "tokens_seen": 1966080
    },
    {
      "epoch": 0.0,
      "learning_rate": 3.1746031746031745e-05,
      "loss": 8.4823,
      "theoretical_loss": 9.411146631541524,
      "tokens_seen": 2097152
    },
    {
      "epoch": 0.0,
      "learning_rate": 3.373015873015873e-05,
      "loss": 8.4221,
      "theoretical_loss": 9.293766507291341,
      "tokens_seen": 2228224
    },
    {
      "epoch": 0.0,
      "learning_rate": 3.571428571428571e-05,
      "loss": 8.3834,
      "theoretical_loss": 9.184907653139359,
      "tokens_seen": 2359296
    },
    {
      "epoch": 0.0,
      "learning_rate": 3.76984126984127e-05,
      "loss": 8.2756,
      "theoretical_loss": 9.0835271371648,
      "tokens_seen": 2490368
    },
    {
      "epoch": 0.0,
      "learning_rate": 3.968253968253968e-05,
      "loss": 8.3847,
      "theoretical_loss": 8.988756330540422,
      "tokens_seen": 2621440
    },
    {
      "epoch": 0.0,
      "learning_rate": 4.1666666666666665e-05,
      "loss": 8.251,
      "theoretical_loss": 8.89986473310929,
      "tokens_seen": 2752512
    },
    {
      "epoch": 0.0,
      "learning_rate": 4.365079365079365e-05,
      "loss": 8.1076,
      "theoretical_loss": 8.816232633409479,
      "tokens_seen": 2883584
    },
    {
      "epoch": 0.0,
      "learning_rate": 4.563492063492063e-05,
      "loss": 8.1488,
      "theoretical_loss": 8.737330150151898,
      "tokens_seen": 3014656
    },
    {
      "epoch": 0.0,
      "learning_rate": 4.761904761904762e-05,
      "loss": 8.0482,
      "theoretical_loss": 8.662700958366539,
      "tokens_seen": 3145728
    },
    {
      "epoch": 0.0,
      "objective/train/docs_used": 1233,
      "objective/train/instantaneous_batch_size": 16,
      "objective/train/instantaneous_microbatch_size": 16384,
      "objective/train/original_loss": 8.187471389770508,
      "objective/train/theoretical_loss": 8.591949505242134,
      "objective/train/tokens_used": -17203200,
      "theoretical_loss": 8.591949505242134,
      "tokens_seen": 3276800
    },
    {
      "epoch": 0.0,
      "learning_rate": 4.96031746031746e-05,
      "loss": 7.9834,
      "theoretical_loss": 8.591949505242134,
      "tokens_seen": 3276800
    },
    {
      "epoch": 0.0,
      "learning_rate": 5.1587301587301586e-05,
      "loss": 8.1198,
      "theoretical_loss": 8.524730860277067,
      "tokens_seen": 3407872
    },
    {
      "epoch": 0.0,
      "learning_rate": 5.357142857142857e-05,
      "loss": 7.8858,
      "theoretical_loss": 8.460742578303845,
      "tokens_seen": 3538944
    },
    {
      "epoch": 0.0,
      "learning_rate": 5.555555555555555e-05,
      "loss": 8.0049,
      "theoretical_loss": 8.399718117751275,
      "tokens_seen": 3670016
    },
    {
      "epoch": 0.0,
      "learning_rate": 5.753968253968254e-05,
      "loss": 7.81,
      "theoretical_loss": 8.341421472916394,
      "tokens_seen": 3801088
    },
    {
      "epoch": 0.0,
      "learning_rate": 5.9523809523809524e-05,
      "loss": 7.8978,
      "theoretical_loss": 8.28564276288293,
      "tokens_seen": 3932160
    },
    {
      "epoch": 0.0,
      "learning_rate": 6.15079365079365e-05,
      "loss": 7.5006,
      "theoretical_loss": 8.232194580909036,
      "tokens_seen": 4063232
    },
    {
      "epoch": 0.0,
      "learning_rate": 6.349206349206349e-05,
      "loss": 7.6305,
      "theoretical_loss": 8.180908953270682,
      "tokens_seen": 4194304
    },
    {
      "epoch": 0.0,
      "learning_rate": 6.547619047619048e-05,
      "loss": 7.662,
      "theoretical_loss": 8.131634790246775,
      "tokens_seen": 4325376
    },
    {
      "epoch": 0.0,
      "learning_rate": 6.746031746031745e-05,
      "loss": 7.7852,
      "theoretical_loss": 8.084235737332481,
      "tokens_seen": 4456448
    },
    {
      "epoch": 0.0,
      "learning_rate": 6.944444444444444e-05,
      "loss": 7.5064,
      "theoretical_loss": 8.038588354092902,
      "tokens_seen": 4587520
    },
    {
      "epoch": 0.0,
      "learning_rate": 7.142857142857142e-05,
      "loss": 7.5163,
      "theoretical_loss": 7.994580562902867,
      "tokens_seen": 4718592
    },
    {
      "epoch": 0.0,
      "learning_rate": 7.341269841269842e-05,
      "loss": 7.4521,
      "theoretical_loss": 7.952110321298584,
      "tokens_seen": 4849664
    },
    {
      "epoch": 0.0,
      "objective/train/docs_used": 2007,
      "objective/train/instantaneous_batch_size": 16,
      "objective/train/instantaneous_microbatch_size": 16384,
      "objective/train/original_loss": 7.20822286605835,
      "objective/train/theoretical_loss": 7.931422353115133,
      "objective/train/tokens_used": -15564800,
      "theoretical_loss": 7.931422353115133,
      "tokens_seen": 4915200
    },
    {
      "epoch": 0.0,
      "learning_rate": 7.53968253968254e-05,
      "loss": 7.3365,
      "theoretical_loss": 7.911084480620269,
      "tokens_seen": 4980736
    },
    {
      "epoch": 0.0,
      "learning_rate": 7.738095238095239e-05,
      "loss": 7.2213,
      "theoretical_loss": 7.871417800659003,
      "tokens_seen": 5111808
    },
    {
      "epoch": 0.0,
      "learning_rate": 7.936507936507937e-05,
      "loss": 7.2532,
      "theoretical_loss": 7.833032095585231,
      "tokens_seen": 5242880
    }
  ],
  "max_steps": 25177,
  "num_train_epochs": 9223372036854775807,
  "total_flos": 2675630407680000.0,
  "trial_name": null,
  "trial_params": null
}