shuheng commited on
Commit
cfecde8
verified
1 Parent(s): f7eb380

End of training

Browse files
README.md CHANGED
@@ -4,6 +4,8 @@ license: apache-2.0
4
  base_model: albert/albert-base-v2
5
  tags:
6
  - generated_from_trainer
 
 
7
  model-index:
8
  - name: squad_albert_finetuned
9
  results: []
@@ -14,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # squad_albert_finetuned
16
 
17
- This model is a fine-tuned version of [albert/albert-base-v2](https://huggingface.co/albert/albert-base-v2) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
4
  base_model: albert/albert-base-v2
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - squad
9
  model-index:
10
  - name: squad_albert_finetuned
11
  results: []
 
16
 
17
  # squad_albert_finetuned
18
 
19
+ This model is a fine-tuned version of [albert/albert-base-v2](https://huggingface.co/albert/albert-base-v2) on the squad dataset.
20
 
21
  ## Model description
22
 
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
- "epoch": 6.0,
3
- "eval_exact_match": 79.30936613055819,
4
- "eval_f1": 88.03839918996134,
5
- "eval_runtime": 37.1649,
6
  "eval_samples": 10808,
7
- "eval_samples_per_second": 290.812,
8
- "eval_steps_per_second": 36.351,
9
- "total_flos": 8808588640659456.0,
10
- "train_loss": 0.4993116462560624,
11
- "train_runtime": 4288.5384,
12
  "train_samples": 88638,
13
- "train_samples_per_second": 124.011,
14
- "train_steps_per_second": 3.875
15
  }
 
1
  {
2
+ "epoch": 10.0,
3
+ "eval_exact_match": 78.23084200567644,
4
+ "eval_f1": 87.49820647381922,
5
+ "eval_runtime": 36.1971,
6
  "eval_samples": 10808,
7
+ "eval_samples_per_second": 298.588,
8
+ "eval_steps_per_second": 37.323,
9
+ "total_flos": 1.468098106776576e+16,
10
+ "train_loss": 0.3405133172148832,
11
+ "train_runtime": 6971.8302,
12
  "train_samples": 88638,
13
+ "train_samples_per_second": 127.137,
14
+ "train_steps_per_second": 3.973
15
  }
eval_nbest_predictions.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:503e0e42fd89214862dba2efd2fd5bed48232796cb1ef8fb1f702b3933cf1300
3
- size 48528698
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9418b1b4eadc20d7bbd888a9ce103ab9967bcf7a2029a72ec4196614fd782925
3
+ size 49124229
eval_predictions.json CHANGED
The diff for this file is too large to render. See raw diff
 
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 6.0,
3
- "eval_exact_match": 79.30936613055819,
4
- "eval_f1": 88.03839918996134,
5
- "eval_runtime": 37.1649,
6
  "eval_samples": 10808,
7
- "eval_samples_per_second": 290.812,
8
- "eval_steps_per_second": 36.351
9
  }
 
1
  {
2
+ "epoch": 10.0,
3
+ "eval_exact_match": 78.23084200567644,
4
+ "eval_f1": 87.49820647381922,
5
+ "eval_runtime": 36.1971,
6
  "eval_samples": 10808,
7
+ "eval_samples_per_second": 298.588,
8
+ "eval_steps_per_second": 37.323
9
  }
runs/Feb07_00-35-09_xgpi5/events.out.tfevents.1738866795.xgpi5.32501.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12f3fa65d1eadffdc85e71c2f9b05d990cfc42a63727827b12e74f07ce8e6ba7
3
+ size 418
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 6.0,
3
- "total_flos": 8808588640659456.0,
4
- "train_loss": 0.4993116462560624,
5
- "train_runtime": 4288.5384,
6
  "train_samples": 88638,
7
- "train_samples_per_second": 124.011,
8
- "train_steps_per_second": 3.875
9
  }
 
1
  {
2
+ "epoch": 10.0,
3
+ "total_flos": 1.468098106776576e+16,
4
+ "train_loss": 0.3405133172148832,
5
+ "train_runtime": 6971.8302,
6
  "train_samples": 88638,
7
+ "train_samples_per_second": 127.137,
8
+ "train_steps_per_second": 3.973
9
  }
trainer_state.json CHANGED
@@ -1,258 +1,412 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 6.0,
5
  "eval_steps": 500,
6
- "global_step": 16620,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.18050541516245489,
13
- "grad_norm": 31.118467330932617,
14
- "learning_rate": 2.909747292418773e-05,
15
- "loss": 1.3055,
16
  "step": 500
17
  },
18
  {
19
  "epoch": 0.36101083032490977,
20
- "grad_norm": 16.015565872192383,
21
- "learning_rate": 2.819494584837545e-05,
22
- "loss": 1.0185,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 0.5415162454873647,
27
- "grad_norm": 16.465599060058594,
28
- "learning_rate": 2.729241877256318e-05,
29
- "loss": 0.9546,
30
  "step": 1500
31
  },
32
  {
33
  "epoch": 0.7220216606498195,
34
- "grad_norm": 11.709087371826172,
35
- "learning_rate": 2.6389891696750903e-05,
36
- "loss": 0.9259,
37
  "step": 2000
38
  },
39
  {
40
  "epoch": 0.9025270758122743,
41
- "grad_norm": 22.127365112304688,
42
- "learning_rate": 2.548736462093863e-05,
43
- "loss": 0.8966,
44
  "step": 2500
45
  },
46
  {
47
  "epoch": 1.0830324909747293,
48
- "grad_norm": 14.972929954528809,
49
- "learning_rate": 2.4584837545126353e-05,
50
- "loss": 0.7928,
51
  "step": 3000
52
  },
53
  {
54
  "epoch": 1.263537906137184,
55
- "grad_norm": 16.17904281616211,
56
- "learning_rate": 2.368231046931408e-05,
57
- "loss": 0.6899,
58
  "step": 3500
59
  },
60
  {
61
  "epoch": 1.444043321299639,
62
- "grad_norm": 48.85356521606445,
63
- "learning_rate": 2.2779783393501805e-05,
64
- "loss": 0.7077,
65
  "step": 4000
66
  },
67
  {
68
  "epoch": 1.6245487364620939,
69
- "grad_norm": 26.525949478149414,
70
- "learning_rate": 2.1877256317689534e-05,
71
- "loss": 0.6929,
72
  "step": 4500
73
  },
74
  {
75
  "epoch": 1.8050541516245486,
76
- "grad_norm": 17.478593826293945,
77
- "learning_rate": 2.0974729241877255e-05,
78
- "loss": 0.6838,
79
  "step": 5000
80
  },
81
  {
82
  "epoch": 1.9855595667870036,
83
- "grad_norm": 28.06412124633789,
84
- "learning_rate": 2.0072202166064983e-05,
85
- "loss": 0.7085,
86
  "step": 5500
87
  },
88
  {
89
  "epoch": 2.1660649819494586,
90
- "grad_norm": 13.594704627990723,
91
- "learning_rate": 1.9169675090252708e-05,
92
- "loss": 0.5315,
93
  "step": 6000
94
  },
95
  {
96
  "epoch": 2.3465703971119134,
97
- "grad_norm": 18.916522979736328,
98
- "learning_rate": 1.8267148014440436e-05,
99
- "loss": 0.5234,
100
  "step": 6500
101
  },
102
  {
103
  "epoch": 2.527075812274368,
104
- "grad_norm": 27.207935333251953,
105
- "learning_rate": 1.7364620938628157e-05,
106
- "loss": 0.5258,
107
  "step": 7000
108
  },
109
  {
110
  "epoch": 2.707581227436823,
111
- "grad_norm": 14.564919471740723,
112
- "learning_rate": 1.6462093862815885e-05,
113
- "loss": 0.5304,
114
  "step": 7500
115
  },
116
  {
117
  "epoch": 2.888086642599278,
118
- "grad_norm": 26.674354553222656,
119
- "learning_rate": 1.555956678700361e-05,
120
- "loss": 0.5306,
121
  "step": 8000
122
  },
123
  {
124
  "epoch": 3.068592057761733,
125
- "grad_norm": 17.092002868652344,
126
- "learning_rate": 1.4657039711191336e-05,
127
- "loss": 0.4566,
128
  "step": 8500
129
  },
130
  {
131
  "epoch": 3.2490974729241877,
132
- "grad_norm": 26.201404571533203,
133
- "learning_rate": 1.3754512635379063e-05,
134
- "loss": 0.3703,
135
  "step": 9000
136
  },
137
  {
138
  "epoch": 3.4296028880866425,
139
- "grad_norm": 7.88193416595459,
140
- "learning_rate": 1.2851985559566788e-05,
141
- "loss": 0.3834,
142
  "step": 9500
143
  },
144
  {
145
  "epoch": 3.6101083032490973,
146
- "grad_norm": 17.27417755126953,
147
- "learning_rate": 1.1949458483754514e-05,
148
- "loss": 0.3878,
149
  "step": 10000
150
  },
151
  {
152
  "epoch": 3.7906137184115525,
153
- "grad_norm": 19.228748321533203,
154
- "learning_rate": 1.1046931407942239e-05,
155
- "loss": 0.3887,
156
  "step": 10500
157
  },
158
  {
159
  "epoch": 3.9711191335740073,
160
- "grad_norm": 25.48954963684082,
161
- "learning_rate": 1.0144404332129965e-05,
162
- "loss": 0.3818,
163
  "step": 11000
164
  },
165
  {
166
  "epoch": 4.1516245487364625,
167
- "grad_norm": 27.043731689453125,
168
- "learning_rate": 9.24187725631769e-06,
169
- "loss": 0.2671,
170
  "step": 11500
171
  },
172
  {
173
  "epoch": 4.332129963898917,
174
- "grad_norm": 19.578210830688477,
175
- "learning_rate": 8.339350180505416e-06,
176
- "loss": 0.2483,
177
  "step": 12000
178
  },
179
  {
180
  "epoch": 4.512635379061372,
181
- "grad_norm": 21.307836532592773,
182
- "learning_rate": 7.436823104693141e-06,
183
- "loss": 0.2513,
184
  "step": 12500
185
  },
186
  {
187
  "epoch": 4.693140794223827,
188
- "grad_norm": 21.601924896240234,
189
- "learning_rate": 6.534296028880867e-06,
190
- "loss": 0.2517,
191
  "step": 13000
192
  },
193
  {
194
  "epoch": 4.873646209386282,
195
- "grad_norm": 20.635147094726562,
196
- "learning_rate": 5.631768953068592e-06,
197
- "loss": 0.2527,
198
  "step": 13500
199
  },
200
  {
201
  "epoch": 5.054151624548736,
202
- "grad_norm": 21.017452239990234,
203
- "learning_rate": 4.729241877256318e-06,
204
- "loss": 0.2183,
205
  "step": 14000
206
  },
207
  {
208
  "epoch": 5.234657039711191,
209
- "grad_norm": 18.656475067138672,
210
- "learning_rate": 3.826714801444043e-06,
211
- "loss": 0.1441,
212
  "step": 14500
213
  },
214
  {
215
  "epoch": 5.415162454873646,
216
- "grad_norm": 16.917116165161133,
217
- "learning_rate": 2.924187725631769e-06,
218
- "loss": 0.1417,
219
  "step": 15000
220
  },
221
  {
222
  "epoch": 5.595667870036101,
223
- "grad_norm": 25.11797332763672,
224
- "learning_rate": 2.0216606498194946e-06,
225
- "loss": 0.1363,
226
  "step": 15500
227
  },
228
  {
229
  "epoch": 5.776173285198556,
230
- "grad_norm": 25.358491897583008,
231
- "learning_rate": 1.1191335740072204e-06,
232
- "loss": 0.1341,
233
  "step": 16000
234
  },
235
  {
236
  "epoch": 5.956678700361011,
237
- "grad_norm": 24.253881454467773,
238
- "learning_rate": 2.1660649819494586e-07,
239
- "loss": 0.1334,
240
  "step": 16500
241
  },
242
  {
243
- "epoch": 6.0,
244
- "step": 16620,
245
- "total_flos": 8808588640659456.0,
246
- "train_loss": 0.4993116462560624,
247
- "train_runtime": 4288.5384,
248
- "train_samples_per_second": 124.011,
249
- "train_steps_per_second": 3.875
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  }
251
  ],
252
  "logging_steps": 500,
253
- "max_steps": 16620,
254
  "num_input_tokens_seen": 0,
255
- "num_train_epochs": 6,
256
  "save_steps": 500,
257
  "stateful_callbacks": {
258
  "TrainerControl": {
@@ -266,7 +420,7 @@
266
  "attributes": {}
267
  }
268
  },
269
- "total_flos": 8808588640659456.0,
270
  "train_batch_size": 32,
271
  "trial_name": null,
272
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 10.0,
5
  "eval_steps": 500,
6
+ "global_step": 27700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.18050541516245489,
13
+ "grad_norm": 31.159393310546875,
14
+ "learning_rate": 2.9458483754512636e-05,
15
+ "loss": 1.3045,
16
  "step": 500
17
  },
18
  {
19
  "epoch": 0.36101083032490977,
20
+ "grad_norm": 20.769277572631836,
21
+ "learning_rate": 2.8916967509025274e-05,
22
+ "loss": 1.0186,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 0.5415162454873647,
27
+ "grad_norm": 16.63530731201172,
28
+ "learning_rate": 2.837545126353791e-05,
29
+ "loss": 0.9621,
30
  "step": 1500
31
  },
32
  {
33
  "epoch": 0.7220216606498195,
34
+ "grad_norm": 13.377410888671875,
35
+ "learning_rate": 2.783393501805054e-05,
36
+ "loss": 0.9288,
37
  "step": 2000
38
  },
39
  {
40
  "epoch": 0.9025270758122743,
41
+ "grad_norm": 20.618640899658203,
42
+ "learning_rate": 2.729241877256318e-05,
43
+ "loss": 0.9028,
44
  "step": 2500
45
  },
46
  {
47
  "epoch": 1.0830324909747293,
48
+ "grad_norm": 14.885836601257324,
49
+ "learning_rate": 2.6750902527075813e-05,
50
+ "loss": 0.796,
51
  "step": 3000
52
  },
53
  {
54
  "epoch": 1.263537906137184,
55
+ "grad_norm": 14.349594116210938,
56
+ "learning_rate": 2.6209386281588448e-05,
57
+ "loss": 0.6996,
58
  "step": 3500
59
  },
60
  {
61
  "epoch": 1.444043321299639,
62
+ "grad_norm": 19.31614875793457,
63
+ "learning_rate": 2.5667870036101083e-05,
64
+ "loss": 0.7184,
65
  "step": 4000
66
  },
67
  {
68
  "epoch": 1.6245487364620939,
69
+ "grad_norm": 40.80540466308594,
70
+ "learning_rate": 2.512635379061372e-05,
71
+ "loss": 0.7076,
72
  "step": 4500
73
  },
74
  {
75
  "epoch": 1.8050541516245486,
76
+ "grad_norm": 13.077240943908691,
77
+ "learning_rate": 2.4584837545126353e-05,
78
+ "loss": 0.6985,
79
  "step": 5000
80
  },
81
  {
82
  "epoch": 1.9855595667870036,
83
+ "grad_norm": 15.288841247558594,
84
+ "learning_rate": 2.4043321299638987e-05,
85
+ "loss": 0.7239,
86
  "step": 5500
87
  },
88
  {
89
  "epoch": 2.1660649819494586,
90
+ "grad_norm": 13.094775199890137,
91
+ "learning_rate": 2.3501805054151626e-05,
92
+ "loss": 0.5435,
93
  "step": 6000
94
  },
95
  {
96
  "epoch": 2.3465703971119134,
97
+ "grad_norm": 15.337054252624512,
98
+ "learning_rate": 2.296028880866426e-05,
99
+ "loss": 0.5383,
100
  "step": 6500
101
  },
102
  {
103
  "epoch": 2.527075812274368,
104
+ "grad_norm": 18.61672019958496,
105
+ "learning_rate": 2.2418772563176895e-05,
106
+ "loss": 0.5502,
107
  "step": 7000
108
  },
109
  {
110
  "epoch": 2.707581227436823,
111
+ "grad_norm": 30.570707321166992,
112
+ "learning_rate": 2.1877256317689534e-05,
113
+ "loss": 0.5557,
114
  "step": 7500
115
  },
116
  {
117
  "epoch": 2.888086642599278,
118
+ "grad_norm": 23.822866439819336,
119
+ "learning_rate": 2.133574007220217e-05,
120
+ "loss": 0.5791,
121
  "step": 8000
122
  },
123
  {
124
  "epoch": 3.068592057761733,
125
+ "grad_norm": 22.35167121887207,
126
+ "learning_rate": 2.07942238267148e-05,
127
+ "loss": 0.5003,
128
  "step": 8500
129
  },
130
  {
131
  "epoch": 3.2490974729241877,
132
+ "grad_norm": 27.621000289916992,
133
+ "learning_rate": 2.0252707581227438e-05,
134
+ "loss": 0.4026,
135
  "step": 9000
136
  },
137
  {
138
  "epoch": 3.4296028880866425,
139
+ "grad_norm": 9.574864387512207,
140
+ "learning_rate": 1.9711191335740073e-05,
141
+ "loss": 0.4173,
142
  "step": 9500
143
  },
144
  {
145
  "epoch": 3.6101083032490973,
146
+ "grad_norm": 15.56059741973877,
147
+ "learning_rate": 1.9169675090252708e-05,
148
+ "loss": 0.4248,
149
  "step": 10000
150
  },
151
  {
152
  "epoch": 3.7906137184115525,
153
+ "grad_norm": 24.032983779907227,
154
+ "learning_rate": 1.8628158844765346e-05,
155
+ "loss": 0.4293,
156
  "step": 10500
157
  },
158
  {
159
  "epoch": 3.9711191335740073,
160
+ "grad_norm": 22.849510192871094,
161
+ "learning_rate": 1.808664259927798e-05,
162
+ "loss": 0.4242,
163
  "step": 11000
164
  },
165
  {
166
  "epoch": 4.1516245487364625,
167
+ "grad_norm": 20.892309188842773,
168
+ "learning_rate": 1.7545126353790612e-05,
169
+ "loss": 0.3113,
170
  "step": 11500
171
  },
172
  {
173
  "epoch": 4.332129963898917,
174
+ "grad_norm": 30.581384658813477,
175
+ "learning_rate": 1.7003610108303247e-05,
176
+ "loss": 0.2946,
177
  "step": 12000
178
  },
179
  {
180
  "epoch": 4.512635379061372,
181
+ "grad_norm": 21.06529426574707,
182
+ "learning_rate": 1.6462093862815885e-05,
183
+ "loss": 0.3022,
184
  "step": 12500
185
  },
186
  {
187
  "epoch": 4.693140794223827,
188
+ "grad_norm": 28.848838806152344,
189
+ "learning_rate": 1.592057761732852e-05,
190
+ "loss": 0.3097,
191
  "step": 13000
192
  },
193
  {
194
  "epoch": 4.873646209386282,
195
+ "grad_norm": 22.211469650268555,
196
+ "learning_rate": 1.5379061371841155e-05,
197
+ "loss": 0.3149,
198
  "step": 13500
199
  },
200
  {
201
  "epoch": 5.054151624548736,
202
+ "grad_norm": 15.2411470413208,
203
+ "learning_rate": 1.483754512635379e-05,
204
+ "loss": 0.2761,
205
  "step": 14000
206
  },
207
  {
208
  "epoch": 5.234657039711191,
209
+ "grad_norm": 22.11151885986328,
210
+ "learning_rate": 1.4296028880866426e-05,
211
+ "loss": 0.1936,
212
  "step": 14500
213
  },
214
  {
215
  "epoch": 5.415162454873646,
216
+ "grad_norm": 19.266048431396484,
217
+ "learning_rate": 1.3754512635379063e-05,
218
+ "loss": 0.1974,
219
  "step": 15000
220
  },
221
  {
222
  "epoch": 5.595667870036101,
223
+ "grad_norm": 31.92010498046875,
224
+ "learning_rate": 1.3212996389891696e-05,
225
+ "loss": 0.2037,
226
  "step": 15500
227
  },
228
  {
229
  "epoch": 5.776173285198556,
230
+ "grad_norm": 23.033552169799805,
231
+ "learning_rate": 1.2671480144404333e-05,
232
+ "loss": 0.1987,
233
  "step": 16000
234
  },
235
  {
236
  "epoch": 5.956678700361011,
237
+ "grad_norm": 26.96350860595703,
238
+ "learning_rate": 1.2129963898916967e-05,
239
+ "loss": 0.2043,
240
  "step": 16500
241
  },
242
  {
243
+ "epoch": 6.137184115523466,
244
+ "grad_norm": 7.384120941162109,
245
+ "learning_rate": 1.1588447653429602e-05,
246
+ "loss": 0.141,
247
+ "step": 17000
248
+ },
249
+ {
250
+ "epoch": 6.317689530685921,
251
+ "grad_norm": 9.360136985778809,
252
+ "learning_rate": 1.1046931407942239e-05,
253
+ "loss": 0.1172,
254
+ "step": 17500
255
+ },
256
+ {
257
+ "epoch": 6.498194945848375,
258
+ "grad_norm": 12.749518394470215,
259
+ "learning_rate": 1.0505415162454874e-05,
260
+ "loss": 0.1122,
261
+ "step": 18000
262
+ },
263
+ {
264
+ "epoch": 6.67870036101083,
265
+ "grad_norm": 22.48105812072754,
266
+ "learning_rate": 9.96389891696751e-06,
267
+ "loss": 0.115,
268
+ "step": 18500
269
+ },
270
+ {
271
+ "epoch": 6.859205776173285,
272
+ "grad_norm": 17.317697525024414,
273
+ "learning_rate": 9.422382671480145e-06,
274
+ "loss": 0.1235,
275
+ "step": 19000
276
+ },
277
+ {
278
+ "epoch": 7.03971119133574,
279
+ "grad_norm": 14.928522109985352,
280
+ "learning_rate": 8.88086642599278e-06,
281
+ "loss": 0.1057,
282
+ "step": 19500
283
+ },
284
+ {
285
+ "epoch": 7.2202166064981945,
286
+ "grad_norm": 32.07418441772461,
287
+ "learning_rate": 8.339350180505416e-06,
288
+ "loss": 0.0537,
289
+ "step": 20000
290
+ },
291
+ {
292
+ "epoch": 7.40072202166065,
293
+ "grad_norm": 27.14509391784668,
294
+ "learning_rate": 7.79783393501805e-06,
295
+ "loss": 0.0562,
296
+ "step": 20500
297
+ },
298
+ {
299
+ "epoch": 7.581227436823105,
300
+ "grad_norm": 8.308027267456055,
301
+ "learning_rate": 7.256317689530686e-06,
302
+ "loss": 0.0635,
303
+ "step": 21000
304
+ },
305
+ {
306
+ "epoch": 7.76173285198556,
307
+ "grad_norm": 33.829307556152344,
308
+ "learning_rate": 6.714801444043321e-06,
309
+ "loss": 0.0589,
310
+ "step": 21500
311
+ },
312
+ {
313
+ "epoch": 7.9422382671480145,
314
+ "grad_norm": 38.503910064697266,
315
+ "learning_rate": 6.173285198555957e-06,
316
+ "loss": 0.0612,
317
+ "step": 22000
318
+ },
319
+ {
320
+ "epoch": 8.12274368231047,
321
+ "grad_norm": 55.769901275634766,
322
+ "learning_rate": 5.631768953068592e-06,
323
+ "loss": 0.0355,
324
+ "step": 22500
325
+ },
326
+ {
327
+ "epoch": 8.303249097472925,
328
+ "grad_norm": 35.5570182800293,
329
+ "learning_rate": 5.090252707581227e-06,
330
+ "loss": 0.0256,
331
+ "step": 23000
332
+ },
333
+ {
334
+ "epoch": 8.483754512635379,
335
+ "grad_norm": 2.204684019088745,
336
+ "learning_rate": 4.548736462093863e-06,
337
+ "loss": 0.028,
338
+ "step": 23500
339
+ },
340
+ {
341
+ "epoch": 8.664259927797834,
342
+ "grad_norm": 0.09754960983991623,
343
+ "learning_rate": 4.0072202166064985e-06,
344
+ "loss": 0.0287,
345
+ "step": 24000
346
+ },
347
+ {
348
+ "epoch": 8.844765342960288,
349
+ "grad_norm": 5.562902927398682,
350
+ "learning_rate": 3.4657039711191337e-06,
351
+ "loss": 0.0284,
352
+ "step": 24500
353
+ },
354
+ {
355
+ "epoch": 9.025270758122744,
356
+ "grad_norm": 0.08126406371593475,
357
+ "learning_rate": 2.924187725631769e-06,
358
+ "loss": 0.0247,
359
+ "step": 25000
360
+ },
361
+ {
362
+ "epoch": 9.205776173285198,
363
+ "grad_norm": 1.6277568340301514,
364
+ "learning_rate": 2.3826714801444047e-06,
365
+ "loss": 0.0104,
366
+ "step": 25500
367
+ },
368
+ {
369
+ "epoch": 9.386281588447654,
370
+ "grad_norm": 0.16050289571285248,
371
+ "learning_rate": 1.8411552346570397e-06,
372
+ "loss": 0.0105,
373
+ "step": 26000
374
+ },
375
+ {
376
+ "epoch": 9.566787003610107,
377
+ "grad_norm": 1.6261656284332275,
378
+ "learning_rate": 1.299638989169675e-06,
379
+ "loss": 0.0108,
380
+ "step": 26500
381
+ },
382
+ {
383
+ "epoch": 9.747292418772563,
384
+ "grad_norm": 0.2381311058998108,
385
+ "learning_rate": 7.581227436823105e-07,
386
+ "loss": 0.0093,
387
+ "step": 27000
388
+ },
389
+ {
390
+ "epoch": 9.927797833935019,
391
+ "grad_norm": 1.3903249502182007,
392
+ "learning_rate": 2.1660649819494586e-07,
393
+ "loss": 0.0086,
394
+ "step": 27500
395
+ },
396
+ {
397
+ "epoch": 10.0,
398
+ "step": 27700,
399
+ "total_flos": 1.468098106776576e+16,
400
+ "train_loss": 0.3405133172148832,
401
+ "train_runtime": 6971.8302,
402
+ "train_samples_per_second": 127.137,
403
+ "train_steps_per_second": 3.973
404
  }
405
  ],
406
  "logging_steps": 500,
407
+ "max_steps": 27700,
408
  "num_input_tokens_seen": 0,
409
+ "num_train_epochs": 10,
410
  "save_steps": 500,
411
  "stateful_callbacks": {
412
  "TrainerControl": {
 
420
  "attributes": {}
421
  }
422
  },
423
+ "total_flos": 1.468098106776576e+16,
424
  "train_batch_size": 32,
425
  "trial_name": null,
426
  "trial_params": null