penfever commited on
Commit
e97e39a
·
verified ·
1 Parent(s): 75adc52

End of training

Browse files
Files changed (5) hide show
  1. README.md +2 -1
  2. all_results.json +16 -0
  3. train_results.json +16 -0
  4. trainer_state.json +597 -0
  5. training_loss.png +0 -0
README.md CHANGED
@@ -4,6 +4,7 @@ license: apache-2.0
4
  base_model: Qwen/Qwen3-8B
5
  tags:
6
  - llama-factory
 
7
  - generated_from_trainer
8
  model-index:
9
  - name: taskmaster2-3k-traces
@@ -15,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # taskmaster2-3k-traces
17
 
18
- This model is a fine-tuned version of [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B) on an unknown dataset.
19
 
20
  ## Model description
21
 
 
4
  base_model: Qwen/Qwen3-8B
5
  tags:
6
  - llama-factory
7
+ - full
8
  - generated_from_trainer
9
  model-index:
10
  - name: taskmaster2-3k-traces
 
16
 
17
  # taskmaster2-3k-traces
18
 
19
+ This model is a fine-tuned version of [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B) on the DCAgent/taskmaster2-3k-traces dataset.
20
 
21
  ## Model description
22
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "achieved_tflops_per_gpu": 4.955181724833952,
3
+ "achieved_tflops_per_gpu_theoretical": 155.84360988986572,
4
+ "epoch": 5.0,
5
+ "loss_nan_ranks": 0,
6
+ "loss_rank_avg": 0.20247647166252136,
7
+ "mfu_percent": 0.35018952118967855,
8
+ "mfu_percent_theoretical": 11.013682677729026,
9
+ "total_flos": 4.268418036190413e+17,
10
+ "train_loss": 0.4559480676651001,
11
+ "train_runtime": 5383.7809,
12
+ "train_samples_per_second": 2.935,
13
+ "train_steps_per_second": 0.046,
14
+ "valid_targets_mean": 3850.3,
15
+ "valid_targets_min": 771
16
+ }
train_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "achieved_tflops_per_gpu": 4.955181724833952,
3
+ "achieved_tflops_per_gpu_theoretical": 155.84360988986572,
4
+ "epoch": 5.0,
5
+ "loss_nan_ranks": 0,
6
+ "loss_rank_avg": 0.20247647166252136,
7
+ "mfu_percent": 0.35018952118967855,
8
+ "mfu_percent_theoretical": 11.013682677729026,
9
+ "total_flos": 4.268418036190413e+17,
10
+ "train_loss": 0.4559480676651001,
11
+ "train_runtime": 5383.7809,
12
+ "train_samples_per_second": 2.935,
13
+ "train_steps_per_second": 0.046,
14
+ "valid_targets_mean": 3850.3,
15
+ "valid_targets_min": 771
16
+ }
trainer_state.json ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 5.0,
6
+ "eval_steps": 500,
7
+ "global_step": 250,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.10101010101010101,
14
+ "grad_norm": 5.43838668916967,
15
+ "learning_rate": 6.4000000000000006e-06,
16
+ "loss": 0.768,
17
+ "loss_nan_ranks": 0,
18
+ "loss_rank_avg": 0.20029328763484955,
19
+ "step": 5,
20
+ "valid_targets_mean": 3894.4,
21
+ "valid_targets_min": 1551
22
+ },
23
+ {
24
+ "epoch": 0.20202020202020202,
25
+ "grad_norm": 1.8173704060845222,
26
+ "learning_rate": 1.4400000000000001e-05,
27
+ "loss": 0.6809,
28
+ "loss_nan_ranks": 0,
29
+ "loss_rank_avg": 0.12393468618392944,
30
+ "step": 10,
31
+ "valid_targets_mean": 3313.9,
32
+ "valid_targets_min": 658
33
+ },
34
+ {
35
+ "epoch": 0.30303030303030304,
36
+ "grad_norm": 0.8994415488525478,
37
+ "learning_rate": 2.2400000000000002e-05,
38
+ "loss": 0.6367,
39
+ "loss_nan_ranks": 0,
40
+ "loss_rank_avg": 0.15267133712768555,
41
+ "step": 15,
42
+ "valid_targets_mean": 3214.2,
43
+ "valid_targets_min": 748
44
+ },
45
+ {
46
+ "epoch": 0.40404040404040403,
47
+ "grad_norm": 0.5231491285791463,
48
+ "learning_rate": 3.0400000000000004e-05,
49
+ "loss": 0.603,
50
+ "loss_nan_ranks": 0,
51
+ "loss_rank_avg": 0.1451815664768219,
52
+ "step": 20,
53
+ "valid_targets_mean": 4062.5,
54
+ "valid_targets_min": 752
55
+ },
56
+ {
57
+ "epoch": 0.5050505050505051,
58
+ "grad_norm": 0.48610136532802667,
59
+ "learning_rate": 3.8400000000000005e-05,
60
+ "loss": 0.5739,
61
+ "loss_nan_ranks": 0,
62
+ "loss_rank_avg": 0.15977895259857178,
63
+ "step": 25,
64
+ "valid_targets_mean": 4920.6,
65
+ "valid_targets_min": 689
66
+ },
67
+ {
68
+ "epoch": 0.6060606060606061,
69
+ "grad_norm": 0.3657562059313739,
70
+ "learning_rate": 3.9968815283639625e-05,
71
+ "loss": 0.531,
72
+ "loss_nan_ranks": 0,
73
+ "loss_rank_avg": 0.12183522433042526,
74
+ "step": 30,
75
+ "valid_targets_mean": 4170.1,
76
+ "valid_targets_min": 848
77
+ },
78
+ {
79
+ "epoch": 0.7070707070707071,
80
+ "grad_norm": 0.3117441303599555,
81
+ "learning_rate": 3.9842294026289565e-05,
82
+ "loss": 0.5137,
83
+ "loss_nan_ranks": 0,
84
+ "loss_rank_avg": 0.11094912886619568,
85
+ "step": 35,
86
+ "valid_targets_mean": 4246.0,
87
+ "valid_targets_min": 626
88
+ },
89
+ {
90
+ "epoch": 0.8080808080808081,
91
+ "grad_norm": 0.31427213112004465,
92
+ "learning_rate": 3.9619103106983835e-05,
93
+ "loss": 0.5033,
94
+ "loss_nan_ranks": 0,
95
+ "loss_rank_avg": 0.11524395644664764,
96
+ "step": 40,
97
+ "valid_targets_mean": 3335.3,
98
+ "valid_targets_min": 589
99
+ },
100
+ {
101
+ "epoch": 0.9090909090909091,
102
+ "grad_norm": 0.27843711420334,
103
+ "learning_rate": 3.930032988944623e-05,
104
+ "loss": 0.4921,
105
+ "loss_nan_ranks": 0,
106
+ "loss_rank_avg": 0.12719261646270752,
107
+ "step": 45,
108
+ "valid_targets_mean": 4673.1,
109
+ "valid_targets_min": 842
110
+ },
111
+ {
112
+ "epoch": 1.0,
113
+ "grad_norm": 0.35336756908591643,
114
+ "learning_rate": 3.888752740474962e-05,
115
+ "loss": 0.4725,
116
+ "loss_nan_ranks": 0,
117
+ "loss_rank_avg": 0.23522385954856873,
118
+ "step": 50,
119
+ "valid_targets_mean": 4525.1,
120
+ "valid_targets_min": 618
121
+ },
122
+ {
123
+ "epoch": 1.101010101010101,
124
+ "grad_norm": 0.2618047514835116,
125
+ "learning_rate": 3.838270678510469e-05,
126
+ "loss": 0.4631,
127
+ "loss_nan_ranks": 0,
128
+ "loss_rank_avg": 0.0997854694724083,
129
+ "step": 55,
130
+ "valid_targets_mean": 3254.4,
131
+ "valid_targets_min": 620
132
+ },
133
+ {
134
+ "epoch": 1.202020202020202,
135
+ "grad_norm": 0.2596681731982267,
136
+ "learning_rate": 3.778832746582596e-05,
137
+ "loss": 0.4695,
138
+ "loss_nan_ranks": 0,
139
+ "loss_rank_avg": 0.15060186386108398,
140
+ "step": 60,
141
+ "valid_targets_mean": 5336.2,
142
+ "valid_targets_min": 994
143
+ },
144
+ {
145
+ "epoch": 1.303030303030303,
146
+ "grad_norm": 0.26134191299357834,
147
+ "learning_rate": 3.710728520321014e-05,
148
+ "loss": 0.4622,
149
+ "loss_nan_ranks": 0,
150
+ "loss_rank_avg": 0.10998662561178207,
151
+ "step": 65,
152
+ "valid_targets_mean": 3828.9,
153
+ "valid_targets_min": 813
154
+ },
155
+ {
156
+ "epoch": 1.404040404040404,
157
+ "grad_norm": 0.2505910129541225,
158
+ "learning_rate": 3.634289796670257e-05,
159
+ "loss": 0.4639,
160
+ "loss_nan_ranks": 0,
161
+ "loss_rank_avg": 0.11412525177001953,
162
+ "step": 70,
163
+ "valid_targets_mean": 4118.6,
164
+ "valid_targets_min": 589
165
+ },
166
+ {
167
+ "epoch": 1.5050505050505052,
168
+ "grad_norm": 0.28469306377718256,
169
+ "learning_rate": 3.549888977408359e-05,
170
+ "loss": 0.4547,
171
+ "loss_nan_ranks": 0,
172
+ "loss_rank_avg": 0.10224676132202148,
173
+ "step": 75,
174
+ "valid_targets_mean": 3621.8,
175
+ "valid_targets_min": 807
176
+ },
177
+ {
178
+ "epoch": 1.606060606060606,
179
+ "grad_norm": 0.26506094775038846,
180
+ "learning_rate": 3.457937254842823e-05,
181
+ "loss": 0.4478,
182
+ "loss_nan_ranks": 0,
183
+ "loss_rank_avg": 0.1216074526309967,
184
+ "step": 80,
185
+ "valid_targets_mean": 4121.2,
186
+ "valid_targets_min": 900
187
+ },
188
+ {
189
+ "epoch": 1.7070707070707072,
190
+ "grad_norm": 0.27422041538604486,
191
+ "learning_rate": 3.3588826085230336e-05,
192
+ "loss": 0.4405,
193
+ "loss_nan_ranks": 0,
194
+ "loss_rank_avg": 0.09807954728603363,
195
+ "step": 85,
196
+ "valid_targets_mean": 3189.9,
197
+ "valid_targets_min": 777
198
+ },
199
+ {
200
+ "epoch": 1.808080808080808,
201
+ "grad_norm": 0.25008604850493343,
202
+ "learning_rate": 3.253207622728921e-05,
203
+ "loss": 0.441,
204
+ "loss_nan_ranks": 0,
205
+ "loss_rank_avg": 0.11396118998527527,
206
+ "step": 90,
207
+ "valid_targets_mean": 4577.4,
208
+ "valid_targets_min": 875
209
+ },
210
+ {
211
+ "epoch": 1.9090909090909092,
212
+ "grad_norm": 0.26410650818418535,
213
+ "learning_rate": 3.141427135368864e-05,
214
+ "loss": 0.4447,
215
+ "loss_nan_ranks": 0,
216
+ "loss_rank_avg": 0.10761921852827072,
217
+ "step": 95,
218
+ "valid_targets_mean": 3803.5,
219
+ "valid_targets_min": 613
220
+ },
221
+ {
222
+ "epoch": 2.0,
223
+ "grad_norm": 0.37011104936880673,
224
+ "learning_rate": 3.024085729741143e-05,
225
+ "loss": 0.4481,
226
+ "loss_nan_ranks": 0,
227
+ "loss_rank_avg": 0.254130095243454,
228
+ "step": 100,
229
+ "valid_targets_mean": 4326.2,
230
+ "valid_targets_min": 550
231
+ },
232
+ {
233
+ "epoch": 2.101010101010101,
234
+ "grad_norm": 0.2633444829921788,
235
+ "learning_rate": 2.9017550813788616e-05,
236
+ "loss": 0.4286,
237
+ "loss_nan_ranks": 0,
238
+ "loss_rank_avg": 0.10730044543743134,
239
+ "step": 105,
240
+ "valid_targets_mean": 4681.4,
241
+ "valid_targets_min": 808
242
+ },
243
+ {
244
+ "epoch": 2.202020202020202,
245
+ "grad_norm": 0.27947047290967925,
246
+ "learning_rate": 2.7750311729042062e-05,
247
+ "loss": 0.4249,
248
+ "loss_nan_ranks": 0,
249
+ "loss_rank_avg": 0.11274366080760956,
250
+ "step": 110,
251
+ "valid_targets_mean": 4363.5,
252
+ "valid_targets_min": 721
253
+ },
254
+ {
255
+ "epoch": 2.303030303030303,
256
+ "grad_norm": 0.26878537952096837,
257
+ "learning_rate": 2.6445313904610227e-05,
258
+ "loss": 0.4319,
259
+ "loss_nan_ranks": 0,
260
+ "loss_rank_avg": 0.10825060307979584,
261
+ "step": 115,
262
+ "valid_targets_mean": 3570.9,
263
+ "valid_targets_min": 740
264
+ },
265
+ {
266
+ "epoch": 2.404040404040404,
267
+ "grad_norm": 0.27801838465637635,
268
+ "learning_rate": 2.510891515871581e-05,
269
+ "loss": 0.4335,
270
+ "loss_nan_ranks": 0,
271
+ "loss_rank_avg": 0.09939642250537872,
272
+ "step": 120,
273
+ "valid_targets_mean": 3389.2,
274
+ "valid_targets_min": 712
275
+ },
276
+ {
277
+ "epoch": 2.505050505050505,
278
+ "grad_norm": 0.26781305081846457,
279
+ "learning_rate": 2.37476262917145e-05,
280
+ "loss": 0.4247,
281
+ "loss_nan_ranks": 0,
282
+ "loss_rank_avg": 0.109991654753685,
283
+ "step": 125,
284
+ "valid_targets_mean": 4646.0,
285
+ "valid_targets_min": 748
286
+ },
287
+ {
288
+ "epoch": 2.606060606060606,
289
+ "grad_norm": 0.26237181084548494,
290
+ "learning_rate": 2.2368079366130028e-05,
291
+ "loss": 0.4187,
292
+ "loss_nan_ranks": 0,
293
+ "loss_rank_avg": 0.11524119973182678,
294
+ "step": 130,
295
+ "valid_targets_mean": 4932.1,
296
+ "valid_targets_min": 845
297
+ },
298
+ {
299
+ "epoch": 2.707070707070707,
300
+ "grad_norm": 0.2585522405239844,
301
+ "learning_rate": 2.097699539591227e-05,
302
+ "loss": 0.4274,
303
+ "loss_nan_ranks": 0,
304
+ "loss_rank_avg": 0.08941829204559326,
305
+ "step": 135,
306
+ "valid_targets_mean": 3169.6,
307
+ "valid_targets_min": 832
308
+ },
309
+ {
310
+ "epoch": 2.808080808080808,
311
+ "grad_norm": 0.26016350721342923,
312
+ "learning_rate": 1.9581151602332865e-05,
313
+ "loss": 0.4177,
314
+ "loss_nan_ranks": 0,
315
+ "loss_rank_avg": 0.1133737787604332,
316
+ "step": 140,
317
+ "valid_targets_mean": 5011.3,
318
+ "valid_targets_min": 1002
319
+ },
320
+ {
321
+ "epoch": 2.909090909090909,
322
+ "grad_norm": 0.27030558024007334,
323
+ "learning_rate": 1.8187348396044402e-05,
324
+ "loss": 0.4231,
325
+ "loss_nan_ranks": 0,
326
+ "loss_rank_avg": 0.10732856392860413,
327
+ "step": 145,
328
+ "valid_targets_mean": 4244.3,
329
+ "valid_targets_min": 849
330
+ },
331
+ {
332
+ "epoch": 3.0,
333
+ "grad_norm": 0.3695239483214702,
334
+ "learning_rate": 1.6802376246163307e-05,
335
+ "loss": 0.4307,
336
+ "loss_nan_ranks": 0,
337
+ "loss_rank_avg": 0.2080117017030716,
338
+ "step": 150,
339
+ "valid_targets_mean": 3466.7,
340
+ "valid_targets_min": 600
341
+ },
342
+ {
343
+ "epoch": 3.101010101010101,
344
+ "grad_norm": 0.2516175673157399,
345
+ "learning_rate": 1.5432982597786886e-05,
346
+ "loss": 0.4228,
347
+ "loss_nan_ranks": 0,
348
+ "loss_rank_avg": 0.11081644892692566,
349
+ "step": 155,
350
+ "valid_targets_mean": 4376.6,
351
+ "valid_targets_min": 1393
352
+ },
353
+ {
354
+ "epoch": 3.202020202020202,
355
+ "grad_norm": 0.25899041127457506,
356
+ "learning_rate": 1.4085838999119075e-05,
357
+ "loss": 0.4183,
358
+ "loss_nan_ranks": 0,
359
+ "loss_rank_avg": 0.10279497504234314,
360
+ "step": 160,
361
+ "valid_targets_mean": 3922.6,
362
+ "valid_targets_min": 1037
363
+ },
364
+ {
365
+ "epoch": 3.303030303030303,
366
+ "grad_norm": 0.2568302854985244,
367
+ "learning_rate": 1.2767508598358158e-05,
368
+ "loss": 0.4119,
369
+ "loss_nan_ranks": 0,
370
+ "loss_rank_avg": 0.10400432348251343,
371
+ "step": 165,
372
+ "valid_targets_mean": 4147.3,
373
+ "valid_targets_min": 840
374
+ },
375
+ {
376
+ "epoch": 3.404040404040404,
377
+ "grad_norm": 0.2575745984749214,
378
+ "learning_rate": 1.1484414168698547e-05,
379
+ "loss": 0.4109,
380
+ "loss_nan_ranks": 0,
381
+ "loss_rank_avg": 0.1014273464679718,
382
+ "step": 170,
383
+ "valid_targets_mean": 3870.5,
384
+ "valid_targets_min": 738
385
+ },
386
+ {
387
+ "epoch": 3.505050505050505,
388
+ "grad_norm": 0.2708052183398422,
389
+ "learning_rate": 1.0242806817225344e-05,
390
+ "loss": 0.4183,
391
+ "loss_nan_ranks": 0,
392
+ "loss_rank_avg": 0.0942554920911789,
393
+ "step": 175,
394
+ "valid_targets_mean": 4129.5,
395
+ "valid_targets_min": 691
396
+ },
397
+ {
398
+ "epoch": 3.606060606060606,
399
+ "grad_norm": 0.39589597035376406,
400
+ "learning_rate": 9.048735530148998e-06,
401
+ "loss": 0.4057,
402
+ "loss_nan_ranks": 0,
403
+ "loss_rank_avg": 0.10414694994688034,
404
+ "step": 180,
405
+ "valid_targets_mean": 3647.8,
406
+ "valid_targets_min": 1355
407
+ },
408
+ {
409
+ "epoch": 3.707070707070707,
410
+ "grad_norm": 0.2754352204699097,
411
+ "learning_rate": 7.908017702752504e-06,
412
+ "loss": 0.4167,
413
+ "loss_nan_ranks": 0,
414
+ "loss_rank_avg": 0.11268728971481323,
415
+ "step": 185,
416
+ "valid_targets_mean": 4025.8,
417
+ "valid_targets_min": 631
418
+ },
419
+ {
420
+ "epoch": 3.808080808080808,
421
+ "grad_norm": 0.26307739642578865,
422
+ "learning_rate": 6.826210797626389e-06,
423
+ "loss": 0.4158,
424
+ "loss_nan_ranks": 0,
425
+ "loss_rank_avg": 0.08769197762012482,
426
+ "step": 190,
427
+ "valid_targets_mean": 3541.8,
428
+ "valid_targets_min": 1132
429
+ },
430
+ {
431
+ "epoch": 3.909090909090909,
432
+ "grad_norm": 0.25965429680059315,
433
+ "learning_rate": 5.8085852692695864e-06,
434
+ "loss": 0.4088,
435
+ "loss_nan_ranks": 0,
436
+ "loss_rank_avg": 0.10568307340145111,
437
+ "step": 195,
438
+ "valid_targets_mean": 4072.8,
439
+ "valid_targets_min": 658
440
+ },
441
+ {
442
+ "epoch": 4.0,
443
+ "grad_norm": 0.37112181378749864,
444
+ "learning_rate": 4.8600988869648745e-06,
445
+ "loss": 0.4077,
446
+ "loss_nan_ranks": 0,
447
+ "loss_rank_avg": 0.18167036771774292,
448
+ "step": 200,
449
+ "valid_targets_mean": 3388.4,
450
+ "valid_targets_min": 757
451
+ },
452
+ {
453
+ "epoch": 4.101010101010101,
454
+ "grad_norm": 0.24190724490489265,
455
+ "learning_rate": 3.985372581025333e-06,
456
+ "loss": 0.3992,
457
+ "loss_nan_ranks": 0,
458
+ "loss_rank_avg": 0.09219323098659515,
459
+ "step": 205,
460
+ "valid_targets_mean": 3395.9,
461
+ "valid_targets_min": 1280
462
+ },
463
+ {
464
+ "epoch": 4.202020202020202,
465
+ "grad_norm": 0.26150260454768026,
466
+ "learning_rate": 3.1886679300863156e-06,
467
+ "loss": 0.407,
468
+ "loss_nan_ranks": 0,
469
+ "loss_rank_avg": 0.09850673377513885,
470
+ "step": 210,
471
+ "valid_targets_mean": 3216.6,
472
+ "valid_targets_min": 784
473
+ },
474
+ {
475
+ "epoch": 4.303030303030303,
476
+ "grad_norm": 0.26872823184659334,
477
+ "learning_rate": 2.473866399122733e-06,
478
+ "loss": 0.4133,
479
+ "loss_nan_ranks": 0,
480
+ "loss_rank_avg": 0.10747270286083221,
481
+ "step": 215,
482
+ "valid_targets_mean": 3487.2,
483
+ "valid_targets_min": 773
484
+ },
485
+ {
486
+ "epoch": 4.404040404040404,
487
+ "grad_norm": 0.2549088626047512,
488
+ "learning_rate": 1.8444504293418286e-06,
489
+ "loss": 0.4124,
490
+ "loss_nan_ranks": 0,
491
+ "loss_rank_avg": 0.10171965509653091,
492
+ "step": 220,
493
+ "valid_targets_mean": 3727.9,
494
+ "valid_targets_min": 928
495
+ },
496
+ {
497
+ "epoch": 4.505050505050505,
498
+ "grad_norm": 0.32301587125223463,
499
+ "learning_rate": 1.3034864720797112e-06,
500
+ "loss": 0.4125,
501
+ "loss_nan_ranks": 0,
502
+ "loss_rank_avg": 0.11444351077079773,
503
+ "step": 225,
504
+ "valid_targets_mean": 3867.6,
505
+ "valid_targets_min": 791
506
+ },
507
+ {
508
+ "epoch": 4.606060606060606,
509
+ "grad_norm": 0.24811664458418323,
510
+ "learning_rate": 8.536100493586552e-07,
511
+ "loss": 0.4023,
512
+ "loss_nan_ranks": 0,
513
+ "loss_rank_avg": 0.0989425927400589,
514
+ "step": 230,
515
+ "valid_targets_mean": 3649.8,
516
+ "valid_targets_min": 675
517
+ },
518
+ {
519
+ "epoch": 4.707070707070707,
520
+ "grad_norm": 0.24999205236156857,
521
+ "learning_rate": 4.970129138887347e-07,
522
+ "loss": 0.4078,
523
+ "loss_nan_ranks": 0,
524
+ "loss_rank_avg": 0.09448540210723877,
525
+ "step": 235,
526
+ "valid_targets_mean": 3779.3,
527
+ "valid_targets_min": 613
528
+ },
529
+ {
530
+ "epoch": 4.808080808080808,
531
+ "grad_norm": 0.2533525398749002,
532
+ "learning_rate": 2.3543237106894434e-07,
533
+ "loss": 0.4138,
534
+ "loss_nan_ranks": 0,
535
+ "loss_rank_avg": 0.10717539489269257,
536
+ "step": 240,
537
+ "valid_targets_mean": 3630.1,
538
+ "valid_targets_min": 1185
539
+ },
540
+ {
541
+ "epoch": 4.909090909090909,
542
+ "grad_norm": 0.2324383918083103,
543
+ "learning_rate": 7.01428150099126e-08,
544
+ "loss": 0.4099,
545
+ "loss_nan_ranks": 0,
546
+ "loss_rank_avg": 0.09807487577199936,
547
+ "step": 245,
548
+ "valid_targets_mean": 4431.1,
549
+ "valid_targets_min": 1588
550
+ },
551
+ {
552
+ "epoch": 5.0,
553
+ "grad_norm": 0.3564637724019629,
554
+ "learning_rate": 1.949519813915224e-09,
555
+ "loss": 0.4103,
556
+ "loss_nan_ranks": 0,
557
+ "loss_rank_avg": 0.20247647166252136,
558
+ "step": 250,
559
+ "valid_targets_mean": 3850.3,
560
+ "valid_targets_min": 771
561
+ },
562
+ {
563
+ "epoch": 5.0,
564
+ "loss_nan_ranks": 0,
565
+ "loss_rank_avg": 0.20247647166252136,
566
+ "step": 250,
567
+ "total_flos": 4.268418036190413e+17,
568
+ "train_loss": 0.4559480676651001,
569
+ "train_runtime": 5383.7809,
570
+ "train_samples_per_second": 2.935,
571
+ "train_steps_per_second": 0.046,
572
+ "valid_targets_mean": 3850.3,
573
+ "valid_targets_min": 771
574
+ }
575
+ ],
576
+ "logging_steps": 5,
577
+ "max_steps": 250,
578
+ "num_input_tokens_seen": 0,
579
+ "num_train_epochs": 5,
580
+ "save_steps": 500,
581
+ "stateful_callbacks": {
582
+ "TrainerControl": {
583
+ "args": {
584
+ "should_epoch_stop": false,
585
+ "should_evaluate": false,
586
+ "should_log": false,
587
+ "should_save": false,
588
+ "should_training_stop": false
589
+ },
590
+ "attributes": {}
591
+ }
592
+ },
593
+ "total_flos": 4.268418036190413e+17,
594
+ "train_batch_size": 1,
595
+ "trial_name": null,
596
+ "trial_params": null
597
+ }
training_loss.png ADDED