penfever commited on
Commit
edc15e9
·
verified ·
1 Parent(s): efc3d09

End of training

Browse files
Files changed (5) hide show
  1. README.md +2 -1
  2. all_results.json +16 -0
  3. train_results.json +16 -0
  4. trainer_state.json +674 -0
  5. training_loss.png +0 -0
README.md CHANGED
@@ -4,6 +4,7 @@ license: apache-2.0
4
  base_model: Qwen/Qwen3-8B
5
  tags:
6
  - llama-factory
 
7
  - generated_from_trainer
8
  model-index:
9
  - name: code_contests-Qwen3-Coder-480B-traces
@@ -15,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # code_contests-Qwen3-Coder-480B-traces
17
 
18
- This model is a fine-tuned version of [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B) on an unknown dataset.
19
 
20
  ## Model description
21
 
 
4
  base_model: Qwen/Qwen3-8B
5
  tags:
6
  - llama-factory
7
+ - full
8
  - generated_from_trainer
9
  model-index:
10
  - name: code_contests-Qwen3-Coder-480B-traces
 
16
 
17
  # code_contests-Qwen3-Coder-480B-traces
18
 
19
+ This model is a fine-tuned version of [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B) on the DCAgent/code_contests-Qwen3-Coder-480B-traces dataset.
20
 
21
  ## Model description
22
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "achieved_tflops_per_gpu": 4.949651343267627,
3
+ "achieved_tflops_per_gpu_theoretical": 243.92912852326808,
4
+ "epoch": 5.0,
5
+ "loss_nan_ranks": 0,
6
+ "loss_rank_avg": 0.2248307466506958,
7
+ "mfu_percent": 0.3497986815030125,
8
+ "mfu_percent_theoretical": 17.23880766948891,
9
+ "total_flos": 3.151185311610962e+17,
10
+ "train_loss": 0.24082276695653013,
11
+ "train_runtime": 3979.0496,
12
+ "train_samples_per_second": 4.505,
13
+ "train_steps_per_second": 0.072,
14
+ "valid_targets_mean": 2971.6,
15
+ "valid_targets_min": 1283
16
+ }
train_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "achieved_tflops_per_gpu": 4.949651343267627,
3
+ "achieved_tflops_per_gpu_theoretical": 243.92912852326808,
4
+ "epoch": 5.0,
5
+ "loss_nan_ranks": 0,
6
+ "loss_rank_avg": 0.2248307466506958,
7
+ "mfu_percent": 0.3497986815030125,
8
+ "mfu_percent_theoretical": 17.23880766948891,
9
+ "total_flos": 3.151185311610962e+17,
10
+ "train_loss": 0.24082276695653013,
11
+ "train_runtime": 3979.0496,
12
+ "train_samples_per_second": 4.505,
13
+ "train_steps_per_second": 0.072,
14
+ "valid_targets_mean": 2971.6,
15
+ "valid_targets_min": 1283
16
+ }
trainer_state.json ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 5.0,
6
+ "eval_steps": 500,
7
+ "global_step": 285,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.08888888888888889,
14
+ "grad_norm": 7.583328114734628,
15
+ "learning_rate": 5.517241379310345e-06,
16
+ "loss": 0.5653,
17
+ "loss_nan_ranks": 0,
18
+ "loss_rank_avg": 0.166676864027977,
19
+ "step": 5,
20
+ "valid_targets_mean": 3166.7,
21
+ "valid_targets_min": 1420
22
+ },
23
+ {
24
+ "epoch": 0.17777777777777778,
25
+ "grad_norm": 3.1401618568879677,
26
+ "learning_rate": 1.2413793103448277e-05,
27
+ "loss": 0.4991,
28
+ "loss_nan_ranks": 0,
29
+ "loss_rank_avg": 0.1019982397556305,
30
+ "step": 10,
31
+ "valid_targets_mean": 2351.6,
32
+ "valid_targets_min": 1195
33
+ },
34
+ {
35
+ "epoch": 0.26666666666666666,
36
+ "grad_norm": 1.104245248825641,
37
+ "learning_rate": 1.931034482758621e-05,
38
+ "loss": 0.4163,
39
+ "loss_nan_ranks": 0,
40
+ "loss_rank_avg": 0.11541903018951416,
41
+ "step": 15,
42
+ "valid_targets_mean": 3168.0,
43
+ "valid_targets_min": 1312
44
+ },
45
+ {
46
+ "epoch": 0.35555555555555557,
47
+ "grad_norm": 0.732712115494436,
48
+ "learning_rate": 2.620689655172414e-05,
49
+ "loss": 0.3658,
50
+ "loss_nan_ranks": 0,
51
+ "loss_rank_avg": 0.12487722188234329,
52
+ "step": 20,
53
+ "valid_targets_mean": 3523.8,
54
+ "valid_targets_min": 1517
55
+ },
56
+ {
57
+ "epoch": 0.4444444444444444,
58
+ "grad_norm": 0.5506644308788069,
59
+ "learning_rate": 3.310344827586207e-05,
60
+ "loss": 0.3378,
61
+ "loss_nan_ranks": 0,
62
+ "loss_rank_avg": 0.08351944386959076,
63
+ "step": 25,
64
+ "valid_targets_mean": 3054.6,
65
+ "valid_targets_min": 1110
66
+ },
67
+ {
68
+ "epoch": 0.5333333333333333,
69
+ "grad_norm": 0.5212168935643539,
70
+ "learning_rate": 4e-05,
71
+ "loss": 0.3227,
72
+ "loss_nan_ranks": 0,
73
+ "loss_rank_avg": 0.08309619128704071,
74
+ "step": 30,
75
+ "valid_targets_mean": 2739.4,
76
+ "valid_targets_min": 1178
77
+ },
78
+ {
79
+ "epoch": 0.6222222222222222,
80
+ "grad_norm": 0.37621279442640654,
81
+ "learning_rate": 3.996236225800298e-05,
82
+ "loss": 0.2841,
83
+ "loss_nan_ranks": 0,
84
+ "loss_rank_avg": 0.058637700974941254,
85
+ "step": 35,
86
+ "valid_targets_mean": 3032.6,
87
+ "valid_targets_min": 962
88
+ },
89
+ {
90
+ "epoch": 0.7111111111111111,
91
+ "grad_norm": 0.3740113710680447,
92
+ "learning_rate": 3.9849590691974206e-05,
93
+ "loss": 0.275,
94
+ "loss_nan_ranks": 0,
95
+ "loss_rank_avg": 0.049976594746112823,
96
+ "step": 40,
97
+ "valid_targets_mean": 2221.6,
98
+ "valid_targets_min": 1249
99
+ },
100
+ {
101
+ "epoch": 0.8,
102
+ "grad_norm": 0.3555541947187885,
103
+ "learning_rate": 3.966210974862433e-05,
104
+ "loss": 0.2829,
105
+ "loss_nan_ranks": 0,
106
+ "loss_rank_avg": 0.047423090785741806,
107
+ "step": 45,
108
+ "valid_targets_mean": 2186.9,
109
+ "valid_targets_min": 1183
110
+ },
111
+ {
112
+ "epoch": 0.8888888888888888,
113
+ "grad_norm": 0.3614830806096002,
114
+ "learning_rate": 3.940062506389089e-05,
115
+ "loss": 0.2688,
116
+ "loss_nan_ranks": 0,
117
+ "loss_rank_avg": 0.07531788945198059,
118
+ "step": 50,
119
+ "valid_targets_mean": 2673.9,
120
+ "valid_targets_min": 1141
121
+ },
122
+ {
123
+ "epoch": 0.9777777777777777,
124
+ "grad_norm": 0.35164636779651554,
125
+ "learning_rate": 3.9066120807083875e-05,
126
+ "loss": 0.2583,
127
+ "loss_nan_ranks": 0,
128
+ "loss_rank_avg": 0.0835060328245163,
129
+ "step": 55,
130
+ "valid_targets_mean": 3016.2,
131
+ "valid_targets_min": 1526
132
+ },
133
+ {
134
+ "epoch": 1.0533333333333332,
135
+ "grad_norm": 0.4004740105789504,
136
+ "learning_rate": 3.865985597669478e-05,
137
+ "loss": 0.2609,
138
+ "loss_nan_ranks": 0,
139
+ "loss_rank_avg": 0.05621650442481041,
140
+ "step": 60,
141
+ "valid_targets_mean": 2221.8,
142
+ "valid_targets_min": 1151
143
+ },
144
+ {
145
+ "epoch": 1.1422222222222222,
146
+ "grad_norm": 0.3362478514134072,
147
+ "learning_rate": 3.818335966181045e-05,
148
+ "loss": 0.2418,
149
+ "loss_nan_ranks": 0,
150
+ "loss_rank_avg": 0.06041599065065384,
151
+ "step": 65,
152
+ "valid_targets_mean": 2575.6,
153
+ "valid_targets_min": 1126
154
+ },
155
+ {
156
+ "epoch": 1.231111111111111,
157
+ "grad_norm": 0.327805935004158,
158
+ "learning_rate": 3.76384252869671e-05,
159
+ "loss": 0.2527,
160
+ "loss_nan_ranks": 0,
161
+ "loss_rank_avg": 0.05153035372495651,
162
+ "step": 70,
163
+ "valid_targets_mean": 2401.1,
164
+ "valid_targets_min": 1291
165
+ },
166
+ {
167
+ "epoch": 1.32,
168
+ "grad_norm": 0.3344211876841236,
169
+ "learning_rate": 3.702710386210531e-05,
170
+ "loss": 0.2363,
171
+ "loss_nan_ranks": 0,
172
+ "loss_rank_avg": 0.0525384359061718,
173
+ "step": 75,
174
+ "valid_targets_mean": 2742.1,
175
+ "valid_targets_min": 1147
176
+ },
177
+ {
178
+ "epoch": 1.4088888888888889,
179
+ "grad_norm": 0.3391981121560211,
180
+ "learning_rate": 3.635169626303168e-05,
181
+ "loss": 0.238,
182
+ "loss_nan_ranks": 0,
183
+ "loss_rank_avg": 0.06917819380760193,
184
+ "step": 80,
185
+ "valid_targets_mean": 3123.1,
186
+ "valid_targets_min": 1190
187
+ },
188
+ {
189
+ "epoch": 1.4977777777777779,
190
+ "grad_norm": 0.32922754267793913,
191
+ "learning_rate": 3.561474457144189e-05,
192
+ "loss": 0.2449,
193
+ "loss_nan_ranks": 0,
194
+ "loss_rank_avg": 0.05151129886507988,
195
+ "step": 85,
196
+ "valid_targets_mean": 3233.1,
197
+ "valid_targets_min": 1357
198
+ },
199
+ {
200
+ "epoch": 1.5866666666666667,
201
+ "grad_norm": 0.32706014931505833,
202
+ "learning_rate": 3.4819022507099184e-05,
203
+ "loss": 0.2223,
204
+ "loss_nan_ranks": 0,
205
+ "loss_rank_avg": 0.05639180168509483,
206
+ "step": 90,
207
+ "valid_targets_mean": 2922.6,
208
+ "valid_targets_min": 1310
209
+ },
210
+ {
211
+ "epoch": 1.6755555555555555,
212
+ "grad_norm": 0.341988951356744,
213
+ "learning_rate": 3.3967524988179463e-05,
214
+ "loss": 0.2319,
215
+ "loss_nan_ranks": 0,
216
+ "loss_rank_avg": 0.040721211582422256,
217
+ "step": 95,
218
+ "valid_targets_mean": 2238.1,
219
+ "valid_targets_min": 1199
220
+ },
221
+ {
222
+ "epoch": 1.7644444444444445,
223
+ "grad_norm": 0.3543043397710712,
224
+ "learning_rate": 3.306345685907553e-05,
225
+ "loss": 0.2313,
226
+ "loss_nan_ranks": 0,
227
+ "loss_rank_avg": 0.03705868870019913,
228
+ "step": 100,
229
+ "valid_targets_mean": 2039.1,
230
+ "valid_targets_min": 1054
231
+ },
232
+ {
233
+ "epoch": 1.8533333333333335,
234
+ "grad_norm": 0.3169475427171822,
235
+ "learning_rate": 3.211022082808652e-05,
236
+ "loss": 0.2231,
237
+ "loss_nan_ranks": 0,
238
+ "loss_rank_avg": 0.03866267204284668,
239
+ "step": 105,
240
+ "valid_targets_mean": 2269.2,
241
+ "valid_targets_min": 1244
242
+ },
243
+ {
244
+ "epoch": 1.942222222222222,
245
+ "grad_norm": 0.3626154274513678,
246
+ "learning_rate": 3.111140466039205e-05,
247
+ "loss": 0.2454,
248
+ "loss_nan_ranks": 0,
249
+ "loss_rank_avg": 0.05644143745303154,
250
+ "step": 110,
251
+ "valid_targets_mean": 2740.2,
252
+ "valid_targets_min": 1249
253
+ },
254
+ {
255
+ "epoch": 2.017777777777778,
256
+ "grad_norm": 0.3229168931990422,
257
+ "learning_rate": 3.0070767674514355e-05,
258
+ "loss": 0.231,
259
+ "loss_nan_ranks": 0,
260
+ "loss_rank_avg": 0.045998841524124146,
261
+ "step": 115,
262
+ "valid_targets_mean": 2432.6,
263
+ "valid_targets_min": 1365
264
+ },
265
+ {
266
+ "epoch": 2.1066666666666665,
267
+ "grad_norm": 0.4074227847296487,
268
+ "learning_rate": 2.8992226593092135e-05,
269
+ "loss": 0.2261,
270
+ "loss_nan_ranks": 0,
271
+ "loss_rank_avg": 0.041235923767089844,
272
+ "step": 120,
273
+ "valid_targets_mean": 2494.5,
274
+ "valid_targets_min": 1151
275
+ },
276
+ {
277
+ "epoch": 2.1955555555555555,
278
+ "grad_norm": 0.3124502564340804,
279
+ "learning_rate": 2.7879840801220967e-05,
280
+ "loss": 0.2109,
281
+ "loss_nan_ranks": 0,
282
+ "loss_rank_avg": 0.04943722486495972,
283
+ "step": 125,
284
+ "valid_targets_mean": 2937.8,
285
+ "valid_targets_min": 1162
286
+ },
287
+ {
288
+ "epoch": 2.2844444444444445,
289
+ "grad_norm": 0.36347414667971645,
290
+ "learning_rate": 2.6737797067844403e-05,
291
+ "loss": 0.2205,
292
+ "loss_nan_ranks": 0,
293
+ "loss_rank_avg": 0.041548021137714386,
294
+ "step": 130,
295
+ "valid_targets_mean": 2297.9,
296
+ "valid_targets_min": 1539
297
+ },
298
+ {
299
+ "epoch": 2.3733333333333335,
300
+ "grad_norm": 0.3397949895260685,
301
+ "learning_rate": 2.5570393787701063e-05,
302
+ "loss": 0.2151,
303
+ "loss_nan_ranks": 0,
304
+ "loss_rank_avg": 0.07428301870822906,
305
+ "step": 135,
306
+ "valid_targets_mean": 3256.2,
307
+ "valid_targets_min": 1340
308
+ },
309
+ {
310
+ "epoch": 2.462222222222222,
311
+ "grad_norm": 0.3409669850928538,
312
+ "learning_rate": 2.4382024803137396e-05,
313
+ "loss": 0.214,
314
+ "loss_nan_ranks": 0,
315
+ "loss_rank_avg": 0.05983541160821915,
316
+ "step": 140,
317
+ "valid_targets_mean": 2922.6,
318
+ "valid_targets_min": 1264
319
+ },
320
+ {
321
+ "epoch": 2.551111111111111,
322
+ "grad_norm": 0.33002068625976483,
323
+ "learning_rate": 2.317716286667723e-05,
324
+ "loss": 0.2169,
325
+ "loss_nan_ranks": 0,
326
+ "loss_rank_avg": 0.05062925070524216,
327
+ "step": 145,
328
+ "valid_targets_mean": 2481.7,
329
+ "valid_targets_min": 1088
330
+ },
331
+ {
332
+ "epoch": 2.64,
333
+ "grad_norm": 0.3116815064192726,
334
+ "learning_rate": 2.196034280659122e-05,
335
+ "loss": 0.2099,
336
+ "loss_nan_ranks": 0,
337
+ "loss_rank_avg": 0.03972204774618149,
338
+ "step": 150,
339
+ "valid_targets_mean": 2388.9,
340
+ "valid_targets_min": 1028
341
+ },
342
+ {
343
+ "epoch": 2.728888888888889,
344
+ "grad_norm": 0.32691657273888747,
345
+ "learning_rate": 2.073614445882718e-05,
346
+ "loss": 0.2243,
347
+ "loss_nan_ranks": 0,
348
+ "loss_rank_avg": 0.05966932699084282,
349
+ "step": 155,
350
+ "valid_targets_mean": 2523.2,
351
+ "valid_targets_min": 1338
352
+ },
353
+ {
354
+ "epoch": 2.8177777777777777,
355
+ "grad_norm": 0.32702194667234424,
356
+ "learning_rate": 1.950917542954176e-05,
357
+ "loss": 0.22,
358
+ "loss_nan_ranks": 0,
359
+ "loss_rank_avg": 0.05485967546701431,
360
+ "step": 160,
361
+ "valid_targets_mean": 2835.8,
362
+ "valid_targets_min": 1045
363
+ },
364
+ {
365
+ "epoch": 2.9066666666666667,
366
+ "grad_norm": 0.3371678153197184,
367
+ "learning_rate": 1.8284053753111205e-05,
368
+ "loss": 0.2104,
369
+ "loss_nan_ranks": 0,
370
+ "loss_rank_avg": 0.034001681953668594,
371
+ "step": 165,
372
+ "valid_targets_mean": 2010.9,
373
+ "valid_targets_min": 1203
374
+ },
375
+ {
376
+ "epoch": 2.9955555555555557,
377
+ "grad_norm": 0.3400453584476225,
378
+ "learning_rate": 1.7065390510892767e-05,
379
+ "loss": 0.2065,
380
+ "loss_nan_ranks": 0,
381
+ "loss_rank_avg": 0.04110487177968025,
382
+ "step": 170,
383
+ "valid_targets_mean": 2373.1,
384
+ "valid_targets_min": 1131
385
+ },
386
+ {
387
+ "epoch": 3.071111111111111,
388
+ "grad_norm": 0.29868424312125685,
389
+ "learning_rate": 1.5857772476155634e-05,
390
+ "loss": 0.2018,
391
+ "loss_nan_ranks": 0,
392
+ "loss_rank_avg": 0.05249975621700287,
393
+ "step": 175,
394
+ "valid_targets_mean": 3154.9,
395
+ "valid_targets_min": 1178
396
+ },
397
+ {
398
+ "epoch": 3.16,
399
+ "grad_norm": 0.3706098125869751,
400
+ "learning_rate": 1.4665744850502035e-05,
401
+ "loss": 0.2145,
402
+ "loss_nan_ranks": 0,
403
+ "loss_rank_avg": 0.055659279227256775,
404
+ "step": 180,
405
+ "valid_targets_mean": 2609.7,
406
+ "valid_targets_min": 1283
407
+ },
408
+ {
409
+ "epoch": 3.2488888888888887,
410
+ "grad_norm": 0.34761594185958783,
411
+ "learning_rate": 1.3493794156754744e-05,
412
+ "loss": 0.2129,
413
+ "loss_nan_ranks": 0,
414
+ "loss_rank_avg": 0.03931069374084473,
415
+ "step": 185,
416
+ "valid_targets_mean": 2712.7,
417
+ "valid_targets_min": 1002
418
+ },
419
+ {
420
+ "epoch": 3.3377777777777777,
421
+ "grad_norm": 0.3480472578827022,
422
+ "learning_rate": 1.2346331352698206e-05,
423
+ "loss": 0.2032,
424
+ "loss_nan_ranks": 0,
425
+ "loss_rank_avg": 0.06302663683891296,
426
+ "step": 190,
427
+ "valid_targets_mean": 2542.1,
428
+ "valid_targets_min": 1383
429
+ },
430
+ {
431
+ "epoch": 3.4266666666666667,
432
+ "grad_norm": 0.35674083440708165,
433
+ "learning_rate": 1.1227675229229453e-05,
434
+ "loss": 0.1997,
435
+ "loss_nan_ranks": 0,
436
+ "loss_rank_avg": 0.06049852818250656,
437
+ "step": 195,
438
+ "valid_targets_mean": 2524.8,
439
+ "valid_targets_min": 993
440
+ },
441
+ {
442
+ "epoch": 3.5155555555555553,
443
+ "grad_norm": 0.33660537404764074,
444
+ "learning_rate": 1.0142036155404322e-05,
445
+ "loss": 0.2092,
446
+ "loss_nan_ranks": 0,
447
+ "loss_rank_avg": 0.04671245440840721,
448
+ "step": 200,
449
+ "valid_targets_mean": 2890.4,
450
+ "valid_targets_min": 388
451
+ },
452
+ {
453
+ "epoch": 3.6044444444444443,
454
+ "grad_norm": 0.4234454768211141,
455
+ "learning_rate": 9.093500231559076e-06,
456
+ "loss": 0.1932,
457
+ "loss_nan_ranks": 0,
458
+ "loss_rank_avg": 0.039773087948560715,
459
+ "step": 205,
460
+ "valid_targets_mean": 2687.6,
461
+ "valid_targets_min": 1134
462
+ },
463
+ {
464
+ "epoch": 3.6933333333333334,
465
+ "grad_norm": 0.3462492949486452,
466
+ "learning_rate": 8.086013910151334e-06,
467
+ "loss": 0.206,
468
+ "loss_nan_ranks": 0,
469
+ "loss_rank_avg": 0.0793825015425682,
470
+ "step": 210,
471
+ "valid_targets_mean": 3072.4,
472
+ "valid_targets_min": 1523
473
+ },
474
+ {
475
+ "epoch": 3.7822222222222224,
476
+ "grad_norm": 0.33043166323357076,
477
+ "learning_rate": 7.123369142204175e-06,
478
+ "loss": 0.1978,
479
+ "loss_nan_ranks": 0,
480
+ "loss_rank_avg": 0.05091720446944237,
481
+ "step": 215,
482
+ "valid_targets_mean": 2395.9,
483
+ "valid_targets_min": 1181
484
+ },
485
+ {
486
+ "epoch": 3.871111111111111,
487
+ "grad_norm": 0.3276021134703391,
488
+ "learning_rate": 6.209189105258661e-06,
489
+ "loss": 0.1978,
490
+ "loss_nan_ranks": 0,
491
+ "loss_rank_avg": 0.04009982943534851,
492
+ "step": 220,
493
+ "valid_targets_mean": 2544.8,
494
+ "valid_targets_min": 1459
495
+ },
496
+ {
497
+ "epoch": 3.96,
498
+ "grad_norm": 0.3518358226857207,
499
+ "learning_rate": 5.346914566551746e-06,
500
+ "loss": 0.2057,
501
+ "loss_nan_ranks": 0,
502
+ "loss_rank_avg": 0.05190512537956238,
503
+ "step": 225,
504
+ "valid_targets_mean": 2315.4,
505
+ "valid_targets_min": 1089
506
+ },
507
+ {
508
+ "epoch": 4.035555555555556,
509
+ "grad_norm": 0.446701948663916,
510
+ "learning_rate": 4.53979093274526e-06,
511
+ "loss": 0.1909,
512
+ "loss_nan_ranks": 0,
513
+ "loss_rank_avg": 0.04597758501768112,
514
+ "step": 230,
515
+ "valid_targets_mean": 2736.4,
516
+ "valid_targets_min": 1155
517
+ },
518
+ {
519
+ "epoch": 4.124444444444444,
520
+ "grad_norm": 0.3070256238047938,
521
+ "learning_rate": 3.7908560349481072e-06,
522
+ "loss": 0.1998,
523
+ "loss_nan_ranks": 0,
524
+ "loss_rank_avg": 0.05320999398827553,
525
+ "step": 235,
526
+ "valid_targets_mean": 3201.8,
527
+ "valid_targets_min": 1119
528
+ },
529
+ {
530
+ "epoch": 4.213333333333333,
531
+ "grad_norm": 0.3084555402280151,
532
+ "learning_rate": 3.102928695005858e-06,
533
+ "loss": 0.1975,
534
+ "loss_nan_ranks": 0,
535
+ "loss_rank_avg": 0.029193339869379997,
536
+ "step": 240,
537
+ "valid_targets_mean": 1924.7,
538
+ "valid_targets_min": 1318
539
+ },
540
+ {
541
+ "epoch": 4.302222222222222,
542
+ "grad_norm": 0.33282212562089014,
543
+ "learning_rate": 2.4785981160918703e-06,
544
+ "loss": 0.2001,
545
+ "loss_nan_ranks": 0,
546
+ "loss_rank_avg": 0.04285688325762749,
547
+ "step": 245,
548
+ "valid_targets_mean": 2463.9,
549
+ "valid_targets_min": 1178
550
+ },
551
+ {
552
+ "epoch": 4.391111111111111,
553
+ "grad_norm": 0.3167323683701005,
554
+ "learning_rate": 1.9202141375311335e-06,
555
+ "loss": 0.2013,
556
+ "loss_nan_ranks": 0,
557
+ "loss_rank_avg": 0.04943656548857689,
558
+ "step": 250,
559
+ "valid_targets_mean": 2726.8,
560
+ "valid_targets_min": 1173
561
+ },
562
+ {
563
+ "epoch": 4.48,
564
+ "grad_norm": 0.33351932332643597,
565
+ "learning_rate": 1.4298783905356906e-06,
566
+ "loss": 0.1954,
567
+ "loss_nan_ranks": 0,
568
+ "loss_rank_avg": 0.04813677817583084,
569
+ "step": 255,
570
+ "valid_targets_mean": 2432.7,
571
+ "valid_targets_min": 942
572
+ },
573
+ {
574
+ "epoch": 4.568888888888889,
575
+ "grad_norm": 0.3163118446536364,
576
+ "learning_rate": 1.0094363881392665e-06,
577
+ "loss": 0.1975,
578
+ "loss_nan_ranks": 0,
579
+ "loss_rank_avg": 0.06139428913593292,
580
+ "step": 260,
581
+ "valid_targets_mean": 3001.4,
582
+ "valid_targets_min": 1329
583
+ },
584
+ {
585
+ "epoch": 4.657777777777778,
586
+ "grad_norm": 0.33335357258605675,
587
+ "learning_rate": 6.604705791029586e-07,
588
+ "loss": 0.1987,
589
+ "loss_nan_ranks": 0,
590
+ "loss_rank_avg": 0.044643834233284,
591
+ "step": 265,
592
+ "valid_targets_mean": 2461.6,
593
+ "valid_targets_min": 1178
594
+ },
595
+ {
596
+ "epoch": 4.746666666666667,
597
+ "grad_norm": 0.33713590138912286,
598
+ "learning_rate": 3.842943919353914e-07,
599
+ "loss": 0.1881,
600
+ "loss_nan_ranks": 0,
601
+ "loss_rank_avg": 0.03312596306204796,
602
+ "step": 270,
603
+ "valid_targets_mean": 2348.8,
604
+ "valid_targets_min": 1282
605
+ },
606
+ {
607
+ "epoch": 4.835555555555556,
608
+ "grad_norm": 0.32804179691400653,
609
+ "learning_rate": 1.819472914443998e-07,
610
+ "loss": 0.207,
611
+ "loss_nan_ranks": 0,
612
+ "loss_rank_avg": 0.03083619847893715,
613
+ "step": 275,
614
+ "valid_targets_mean": 2106.2,
615
+ "valid_targets_min": 1274
616
+ },
617
+ {
618
+ "epoch": 4.924444444444444,
619
+ "grad_norm": 0.3250869471276454,
620
+ "learning_rate": 5.4190866426195866e-08,
621
+ "loss": 0.1982,
622
+ "loss_nan_ranks": 0,
623
+ "loss_rank_avg": 0.04578050225973129,
624
+ "step": 280,
625
+ "valid_targets_mean": 2654.6,
626
+ "valid_targets_min": 1454
627
+ },
628
+ {
629
+ "epoch": 5.0,
630
+ "grad_norm": 0.6141969340231087,
631
+ "learning_rate": 1.5059632171099402e-09,
632
+ "loss": 0.2007,
633
+ "loss_nan_ranks": 0,
634
+ "loss_rank_avg": 0.2248307466506958,
635
+ "step": 285,
636
+ "valid_targets_mean": 2971.6,
637
+ "valid_targets_min": 1283
638
+ },
639
+ {
640
+ "epoch": 5.0,
641
+ "loss_nan_ranks": 0,
642
+ "loss_rank_avg": 0.2248307466506958,
643
+ "step": 285,
644
+ "total_flos": 3.151185311610962e+17,
645
+ "train_loss": 0.24082276695653013,
646
+ "train_runtime": 3979.0496,
647
+ "train_samples_per_second": 4.505,
648
+ "train_steps_per_second": 0.072,
649
+ "valid_targets_mean": 2971.6,
650
+ "valid_targets_min": 1283
651
+ }
652
+ ],
653
+ "logging_steps": 5,
654
+ "max_steps": 285,
655
+ "num_input_tokens_seen": 0,
656
+ "num_train_epochs": 5,
657
+ "save_steps": 500,
658
+ "stateful_callbacks": {
659
+ "TrainerControl": {
660
+ "args": {
661
+ "should_epoch_stop": false,
662
+ "should_evaluate": false,
663
+ "should_log": false,
664
+ "should_save": false,
665
+ "should_training_stop": false
666
+ },
667
+ "attributes": {}
668
+ }
669
+ },
670
+ "total_flos": 3.151185311610962e+17,
671
+ "train_batch_size": 1,
672
+ "trial_name": null,
673
+ "trial_params": null
674
+ }
training_loss.png ADDED