JennnDexter commited on
Commit
739e079
·
1 Parent(s): afb430f

End of training

Browse files
Files changed (5) hide show
  1. README.md +5 -5
  2. all_results.json +16 -0
  3. eval_results.json +10 -0
  4. train_results.json +9 -0
  5. trainer_state.json +325 -0
README.md CHANGED
@@ -14,7 +14,7 @@ model-index:
14
  name: Masked Language Modeling
15
  type: fill-mask
16
  dataset:
17
- name: wikitext
18
  type: wikitext
19
  config: wikitext-2-raw-v1
20
  split: validation
@@ -22,7 +22,7 @@ model-index:
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.7288328898061153
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -30,10 +30,10 @@ should probably proofread and complete it, then remove this comment. -->
30
 
31
  # mlm
32
 
33
- This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the wikitext dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 1.2628
36
- - Accuracy: 0.7288
37
 
38
  ## Model description
39
 
 
14
  name: Masked Language Modeling
15
  type: fill-mask
16
  dataset:
17
+ name: wikitext wikitext-2-raw-v1
18
  type: wikitext
19
  config: wikitext-2-raw-v1
20
  split: validation
 
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
+ value: 0.7255275697753574
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
30
 
31
  # mlm
32
 
33
+ This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the wikitext wikitext-2-raw-v1 dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 1.2799
36
+ - Accuracy: 0.7255
37
 
38
  ## Model description
39
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.7255275697753574,
4
+ "eval_loss": 1.2798649072647095,
5
+ "eval_runtime": 13.3316,
6
+ "eval_samples": 496,
7
+ "eval_samples_per_second": 37.205,
8
+ "eval_steps_per_second": 4.651,
9
+ "perplexity": 3.596153878488844,
10
+ "total_flos": 3789443078682624.0,
11
+ "train_loss": 1.420832945505778,
12
+ "train_runtime": 1064.4338,
13
+ "train_samples": 4798,
14
+ "train_samples_per_second": 13.523,
15
+ "train_steps_per_second": 0.423
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.7255275697753574,
4
+ "eval_loss": 1.2798649072647095,
5
+ "eval_runtime": 13.3316,
6
+ "eval_samples": 496,
7
+ "eval_samples_per_second": 37.205,
8
+ "eval_steps_per_second": 4.651,
9
+ "perplexity": 3.596153878488844
10
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "total_flos": 3789443078682624.0,
4
+ "train_loss": 1.420832945505778,
5
+ "train_runtime": 1064.4338,
6
+ "train_samples": 4798,
7
+ "train_samples_per_second": 13.523,
8
+ "train_steps_per_second": 0.423
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7289605637620273,
3
+ "best_model_checkpoint": "D:/1_SyscoPY_D/NLP/Data/Transformers_Hug/checkpoint/roberta-base\\checkpoint-150",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 450,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.07,
13
+ "learning_rate": 6.666666666666667e-06,
14
+ "loss": 1.8568,
15
+ "step": 10
16
+ },
17
+ {
18
+ "epoch": 0.13,
19
+ "learning_rate": 1.3333333333333333e-05,
20
+ "loss": 1.7794,
21
+ "step": 20
22
+ },
23
+ {
24
+ "epoch": 0.2,
25
+ "learning_rate": 1.9999999999999998e-05,
26
+ "loss": 1.5937,
27
+ "step": 30
28
+ },
29
+ {
30
+ "epoch": 0.27,
31
+ "learning_rate": 2.6666666666666667e-05,
32
+ "loss": 1.557,
33
+ "step": 40
34
+ },
35
+ {
36
+ "epoch": 0.33,
37
+ "learning_rate": 2.962962962962963e-05,
38
+ "loss": 1.5075,
39
+ "step": 50
40
+ },
41
+ {
42
+ "epoch": 0.4,
43
+ "learning_rate": 2.8888888888888888e-05,
44
+ "loss": 1.4945,
45
+ "step": 60
46
+ },
47
+ {
48
+ "epoch": 0.47,
49
+ "learning_rate": 2.8148148148148147e-05,
50
+ "loss": 1.4764,
51
+ "step": 70
52
+ },
53
+ {
54
+ "epoch": 0.53,
55
+ "learning_rate": 2.7481481481481482e-05,
56
+ "loss": 1.4568,
57
+ "step": 80
58
+ },
59
+ {
60
+ "epoch": 0.6,
61
+ "learning_rate": 2.6740740740740743e-05,
62
+ "loss": 1.4276,
63
+ "step": 90
64
+ },
65
+ {
66
+ "epoch": 0.67,
67
+ "learning_rate": 2.6000000000000002e-05,
68
+ "loss": 1.4126,
69
+ "step": 100
70
+ },
71
+ {
72
+ "epoch": 0.73,
73
+ "learning_rate": 2.525925925925926e-05,
74
+ "loss": 1.3712,
75
+ "step": 110
76
+ },
77
+ {
78
+ "epoch": 0.8,
79
+ "learning_rate": 2.451851851851852e-05,
80
+ "loss": 1.4503,
81
+ "step": 120
82
+ },
83
+ {
84
+ "epoch": 0.87,
85
+ "learning_rate": 2.377777777777778e-05,
86
+ "loss": 1.4199,
87
+ "step": 130
88
+ },
89
+ {
90
+ "epoch": 0.93,
91
+ "learning_rate": 2.303703703703704e-05,
92
+ "loss": 1.469,
93
+ "step": 140
94
+ },
95
+ {
96
+ "epoch": 1.0,
97
+ "learning_rate": 2.2296296296296297e-05,
98
+ "loss": 1.3784,
99
+ "step": 150
100
+ },
101
+ {
102
+ "epoch": 1.0,
103
+ "eval_accuracy": 0.7289605637620273,
104
+ "eval_loss": 1.282206416130066,
105
+ "eval_runtime": 13.5027,
106
+ "eval_samples_per_second": 36.733,
107
+ "eval_steps_per_second": 4.592,
108
+ "step": 150
109
+ },
110
+ {
111
+ "epoch": 1.07,
112
+ "learning_rate": 2.155555555555556e-05,
113
+ "loss": 1.3744,
114
+ "step": 160
115
+ },
116
+ {
117
+ "epoch": 1.13,
118
+ "learning_rate": 2.0814814814814817e-05,
119
+ "loss": 1.4279,
120
+ "step": 170
121
+ },
122
+ {
123
+ "epoch": 1.2,
124
+ "learning_rate": 2.014814814814815e-05,
125
+ "loss": 1.4016,
126
+ "step": 180
127
+ },
128
+ {
129
+ "epoch": 1.27,
130
+ "learning_rate": 1.9407407407407407e-05,
131
+ "loss": 1.3812,
132
+ "step": 190
133
+ },
134
+ {
135
+ "epoch": 1.33,
136
+ "learning_rate": 1.866666666666667e-05,
137
+ "loss": 1.415,
138
+ "step": 200
139
+ },
140
+ {
141
+ "epoch": 1.4,
142
+ "learning_rate": 1.7925925925925927e-05,
143
+ "loss": 1.3856,
144
+ "step": 210
145
+ },
146
+ {
147
+ "epoch": 1.47,
148
+ "learning_rate": 1.7185185185185185e-05,
149
+ "loss": 1.3874,
150
+ "step": 220
151
+ },
152
+ {
153
+ "epoch": 1.53,
154
+ "learning_rate": 1.6444444444444444e-05,
155
+ "loss": 1.3965,
156
+ "step": 230
157
+ },
158
+ {
159
+ "epoch": 1.6,
160
+ "learning_rate": 1.5703703703703705e-05,
161
+ "loss": 1.4211,
162
+ "step": 240
163
+ },
164
+ {
165
+ "epoch": 1.67,
166
+ "learning_rate": 1.4962962962962964e-05,
167
+ "loss": 1.3866,
168
+ "step": 250
169
+ },
170
+ {
171
+ "epoch": 1.73,
172
+ "learning_rate": 1.4222222222222224e-05,
173
+ "loss": 1.3759,
174
+ "step": 260
175
+ },
176
+ {
177
+ "epoch": 1.8,
178
+ "learning_rate": 1.3481481481481482e-05,
179
+ "loss": 1.4049,
180
+ "step": 270
181
+ },
182
+ {
183
+ "epoch": 1.87,
184
+ "learning_rate": 1.2740740740740742e-05,
185
+ "loss": 1.3919,
186
+ "step": 280
187
+ },
188
+ {
189
+ "epoch": 1.93,
190
+ "learning_rate": 1.2e-05,
191
+ "loss": 1.3781,
192
+ "step": 290
193
+ },
194
+ {
195
+ "epoch": 2.0,
196
+ "learning_rate": 1.125925925925926e-05,
197
+ "loss": 1.3804,
198
+ "step": 300
199
+ },
200
+ {
201
+ "epoch": 2.0,
202
+ "eval_accuracy": 0.7273364801078894,
203
+ "eval_loss": 1.275496482849121,
204
+ "eval_runtime": 13.2216,
205
+ "eval_samples_per_second": 37.514,
206
+ "eval_steps_per_second": 4.689,
207
+ "step": 300
208
+ },
209
+ {
210
+ "epoch": 2.07,
211
+ "learning_rate": 1.051851851851852e-05,
212
+ "loss": 1.3812,
213
+ "step": 310
214
+ },
215
+ {
216
+ "epoch": 2.13,
217
+ "learning_rate": 9.777777777777779e-06,
218
+ "loss": 1.4056,
219
+ "step": 320
220
+ },
221
+ {
222
+ "epoch": 2.2,
223
+ "learning_rate": 9.037037037037039e-06,
224
+ "loss": 1.3889,
225
+ "step": 330
226
+ },
227
+ {
228
+ "epoch": 2.27,
229
+ "learning_rate": 8.296296296296295e-06,
230
+ "loss": 1.3811,
231
+ "step": 340
232
+ },
233
+ {
234
+ "epoch": 2.33,
235
+ "learning_rate": 7.555555555555555e-06,
236
+ "loss": 1.3451,
237
+ "step": 350
238
+ },
239
+ {
240
+ "epoch": 2.4,
241
+ "learning_rate": 6.814814814814815e-06,
242
+ "loss": 1.3535,
243
+ "step": 360
244
+ },
245
+ {
246
+ "epoch": 2.47,
247
+ "learning_rate": 6.0740740740740745e-06,
248
+ "loss": 1.3702,
249
+ "step": 370
250
+ },
251
+ {
252
+ "epoch": 2.53,
253
+ "learning_rate": 5.333333333333334e-06,
254
+ "loss": 1.3499,
255
+ "step": 380
256
+ },
257
+ {
258
+ "epoch": 2.6,
259
+ "learning_rate": 4.592592592592593e-06,
260
+ "loss": 1.3318,
261
+ "step": 390
262
+ },
263
+ {
264
+ "epoch": 2.67,
265
+ "learning_rate": 3.851851851851852e-06,
266
+ "loss": 1.3391,
267
+ "step": 400
268
+ },
269
+ {
270
+ "epoch": 2.73,
271
+ "learning_rate": 3.111111111111111e-06,
272
+ "loss": 1.3648,
273
+ "step": 410
274
+ },
275
+ {
276
+ "epoch": 2.8,
277
+ "learning_rate": 2.3703703703703703e-06,
278
+ "loss": 1.3147,
279
+ "step": 420
280
+ },
281
+ {
282
+ "epoch": 2.87,
283
+ "learning_rate": 1.6296296296296297e-06,
284
+ "loss": 1.3406,
285
+ "step": 430
286
+ },
287
+ {
288
+ "epoch": 2.93,
289
+ "learning_rate": 8.88888888888889e-07,
290
+ "loss": 1.3526,
291
+ "step": 440
292
+ },
293
+ {
294
+ "epoch": 3.0,
295
+ "learning_rate": 1.4814814814814815e-07,
296
+ "loss": 1.3586,
297
+ "step": 450
298
+ },
299
+ {
300
+ "epoch": 3.0,
301
+ "eval_accuracy": 0.7288328898061153,
302
+ "eval_loss": 1.2627531290054321,
303
+ "eval_runtime": 13.2416,
304
+ "eval_samples_per_second": 37.458,
305
+ "eval_steps_per_second": 4.682,
306
+ "step": 450
307
+ },
308
+ {
309
+ "epoch": 3.0,
310
+ "step": 450,
311
+ "total_flos": 3789443078682624.0,
312
+ "train_loss": 1.420832945505778,
313
+ "train_runtime": 1064.4338,
314
+ "train_samples_per_second": 13.523,
315
+ "train_steps_per_second": 0.423
316
+ }
317
+ ],
318
+ "logging_steps": 10,
319
+ "max_steps": 450,
320
+ "num_train_epochs": 3,
321
+ "save_steps": 100,
322
+ "total_flos": 3789443078682624.0,
323
+ "trial_name": null,
324
+ "trial_params": null
325
+ }