gokulsrinivasagan commited on
Commit
aba0551
·
verified ·
1 Parent(s): 392806f

End of training

Browse files
Files changed (5) hide show
  1. README.md +14 -2
  2. all_results.json +16 -0
  3. eval_results.json +10 -0
  4. train_results.json +9 -0
  5. trainer_state.json +460 -0
README.md CHANGED
@@ -4,11 +4,23 @@ license: apache-2.0
4
  base_model: distilbert-base-uncased
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - accuracy
9
  model-index:
10
  - name: tinybert_book_ent_15p_b
11
- results: []
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,7 +28,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # tinybert_book_ent_15p_b
18
 
19
- This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 2.3713
22
  - Accuracy: 0.6418
 
4
  base_model: distilbert-base-uncased
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - gokulsrinivasagan/processed_wikitext-103-raw-v1-ld
9
  metrics:
10
  - accuracy
11
  model-index:
12
  - name: tinybert_book_ent_15p_b
13
+ results:
14
+ - task:
15
+ name: Masked Language Modeling
16
+ type: fill-mask
17
+ dataset:
18
+ name: gokulsrinivasagan/processed_wikitext-103-raw-v1-ld
19
+ type: gokulsrinivasagan/processed_wikitext-103-raw-v1-ld
20
+ metrics:
21
+ - name: Accuracy
22
+ type: accuracy
23
+ value: 0.6417701351499835
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
28
 
29
  # tinybert_book_ent_15p_b
30
 
31
+ This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the gokulsrinivasagan/processed_wikitext-103-raw-v1-ld dataset.
32
  It achieves the following results on the evaluation set:
33
  - Loss: 2.3713
34
  - Accuracy: 0.6418
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 25.0,
3
+ "eval_accuracy": 0.6417701351499835,
4
+ "eval_loss": 2.371307849884033,
5
+ "eval_runtime": 6.438,
6
+ "eval_samples": 479,
7
+ "eval_samples_per_second": 74.403,
8
+ "eval_steps_per_second": 0.466,
9
+ "perplexity": 10.71139202015035,
10
+ "total_flos": 3.003225489805824e+17,
11
+ "train_loss": 3.287461715378128,
12
+ "train_runtime": 79951.4195,
13
+ "train_samples": 228639,
14
+ "train_samples_per_second": 71.493,
15
+ "train_steps_per_second": 0.358
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 25.0,
3
+ "eval_accuracy": 0.6417701351499835,
4
+ "eval_loss": 2.371307849884033,
5
+ "eval_runtime": 6.438,
6
+ "eval_samples": 479,
7
+ "eval_samples_per_second": 74.403,
8
+ "eval_steps_per_second": 0.466,
9
+ "perplexity": 10.71139202015035
10
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 25.0,
3
+ "total_flos": 3.003225489805824e+17,
4
+ "train_loss": 3.287461715378128,
5
+ "train_runtime": 79951.4195,
6
+ "train_samples": 228639,
7
+ "train_samples_per_second": 71.493,
8
+ "train_steps_per_second": 0.358
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 20000,
3
+ "best_metric": 2.371307849884033,
4
+ "best_model_checkpoint": "tinybert_book_ent_15p_b/checkpoint-20000",
5
+ "epoch": 25.0,
6
+ "eval_steps": 10000,
7
+ "global_step": 28600,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.4370629370629371,
14
+ "grad_norm": 4.4531779289245605,
15
+ "learning_rate": 4.9900000000000005e-06,
16
+ "loss": 8.9785,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 0.8741258741258742,
21
+ "grad_norm": 1.7696887254714966,
22
+ "learning_rate": 9.990000000000001e-06,
23
+ "loss": 6.6226,
24
+ "step": 1000
25
+ },
26
+ {
27
+ "epoch": 1.3111888111888113,
28
+ "grad_norm": 0.5621947050094604,
29
+ "learning_rate": 1.499e-05,
30
+ "loss": 5.5121,
31
+ "step": 1500
32
+ },
33
+ {
34
+ "epoch": 1.7482517482517483,
35
+ "grad_norm": 0.48923590779304504,
36
+ "learning_rate": 1.999e-05,
37
+ "loss": 5.4059,
38
+ "step": 2000
39
+ },
40
+ {
41
+ "epoch": 2.1853146853146854,
42
+ "grad_norm": 0.8969066143035889,
43
+ "learning_rate": 2.4990000000000003e-05,
44
+ "loss": 5.3694,
45
+ "step": 2500
46
+ },
47
+ {
48
+ "epoch": 2.6223776223776225,
49
+ "grad_norm": 0.5264173150062561,
50
+ "learning_rate": 2.9990000000000003e-05,
51
+ "loss": 5.3372,
52
+ "step": 3000
53
+ },
54
+ {
55
+ "epoch": 3.0594405594405596,
56
+ "grad_norm": 0.8283560276031494,
57
+ "learning_rate": 3.499e-05,
58
+ "loss": 5.2569,
59
+ "step": 3500
60
+ },
61
+ {
62
+ "epoch": 3.4965034965034967,
63
+ "grad_norm": 0.8435366153717041,
64
+ "learning_rate": 3.999e-05,
65
+ "loss": 5.1864,
66
+ "step": 4000
67
+ },
68
+ {
69
+ "epoch": 3.9335664335664333,
70
+ "grad_norm": 1.0385633707046509,
71
+ "learning_rate": 4.499e-05,
72
+ "loss": 5.0573,
73
+ "step": 4500
74
+ },
75
+ {
76
+ "epoch": 4.370629370629371,
77
+ "grad_norm": 1.3719793558120728,
78
+ "learning_rate": 4.999e-05,
79
+ "loss": 4.7748,
80
+ "step": 5000
81
+ },
82
+ {
83
+ "epoch": 4.8076923076923075,
84
+ "grad_norm": 1.8106296062469482,
85
+ "learning_rate": 5.499000000000001e-05,
86
+ "loss": 4.532,
87
+ "step": 5500
88
+ },
89
+ {
90
+ "epoch": 5.244755244755245,
91
+ "grad_norm": 1.7491868734359741,
92
+ "learning_rate": 5.999e-05,
93
+ "loss": 4.3431,
94
+ "step": 6000
95
+ },
96
+ {
97
+ "epoch": 5.681818181818182,
98
+ "grad_norm": 1.5972195863723755,
99
+ "learning_rate": 6.499000000000001e-05,
100
+ "loss": 4.1977,
101
+ "step": 6500
102
+ },
103
+ {
104
+ "epoch": 6.118881118881119,
105
+ "grad_norm": 1.8990917205810547,
106
+ "learning_rate": 6.999e-05,
107
+ "loss": 4.0613,
108
+ "step": 7000
109
+ },
110
+ {
111
+ "epoch": 6.555944055944056,
112
+ "grad_norm": 1.9757366180419922,
113
+ "learning_rate": 7.499e-05,
114
+ "loss": 3.8787,
115
+ "step": 7500
116
+ },
117
+ {
118
+ "epoch": 6.993006993006993,
119
+ "grad_norm": 1.5962578058242798,
120
+ "learning_rate": 7.999000000000001e-05,
121
+ "loss": 3.6969,
122
+ "step": 8000
123
+ },
124
+ {
125
+ "epoch": 7.43006993006993,
126
+ "grad_norm": 1.664330244064331,
127
+ "learning_rate": 8.499e-05,
128
+ "loss": 3.5414,
129
+ "step": 8500
130
+ },
131
+ {
132
+ "epoch": 7.867132867132867,
133
+ "grad_norm": 1.4954487085342407,
134
+ "learning_rate": 8.999000000000001e-05,
135
+ "loss": 3.4353,
136
+ "step": 9000
137
+ },
138
+ {
139
+ "epoch": 8.304195804195805,
140
+ "grad_norm": 1.5357409715652466,
141
+ "learning_rate": 9.499e-05,
142
+ "loss": 3.3237,
143
+ "step": 9500
144
+ },
145
+ {
146
+ "epoch": 8.741258741258742,
147
+ "grad_norm": 1.411277174949646,
148
+ "learning_rate": 9.999000000000001e-05,
149
+ "loss": 3.2253,
150
+ "step": 10000
151
+ },
152
+ {
153
+ "epoch": 8.741258741258742,
154
+ "eval_accuracy": 0.5513130425227997,
155
+ "eval_loss": 3.0387790203094482,
156
+ "eval_runtime": 6.6329,
157
+ "eval_samples_per_second": 72.216,
158
+ "eval_steps_per_second": 0.452,
159
+ "step": 10000
160
+ },
161
+ {
162
+ "epoch": 9.178321678321678,
163
+ "grad_norm": 1.1632554531097412,
164
+ "learning_rate": 9.731720430107527e-05,
165
+ "loss": 3.1309,
166
+ "step": 10500
167
+ },
168
+ {
169
+ "epoch": 9.615384615384615,
170
+ "grad_norm": 1.1414564847946167,
171
+ "learning_rate": 9.462903225806452e-05,
172
+ "loss": 3.044,
173
+ "step": 11000
174
+ },
175
+ {
176
+ "epoch": 10.052447552447552,
177
+ "grad_norm": 1.1085693836212158,
178
+ "learning_rate": 9.194086021505377e-05,
179
+ "loss": 2.9744,
180
+ "step": 11500
181
+ },
182
+ {
183
+ "epoch": 10.48951048951049,
184
+ "grad_norm": 0.956007719039917,
185
+ "learning_rate": 8.925268817204302e-05,
186
+ "loss": 2.8925,
187
+ "step": 12000
188
+ },
189
+ {
190
+ "epoch": 10.926573426573427,
191
+ "grad_norm": 0.9377016425132751,
192
+ "learning_rate": 8.656451612903226e-05,
193
+ "loss": 2.8498,
194
+ "step": 12500
195
+ },
196
+ {
197
+ "epoch": 11.363636363636363,
198
+ "grad_norm": 1.0721501111984253,
199
+ "learning_rate": 8.387634408602151e-05,
200
+ "loss": 2.78,
201
+ "step": 13000
202
+ },
203
+ {
204
+ "epoch": 11.8006993006993,
205
+ "grad_norm": 0.906058132648468,
206
+ "learning_rate": 8.118817204301076e-05,
207
+ "loss": 2.7461,
208
+ "step": 13500
209
+ },
210
+ {
211
+ "epoch": 12.237762237762238,
212
+ "grad_norm": 0.9045028686523438,
213
+ "learning_rate": 7.850000000000001e-05,
214
+ "loss": 2.693,
215
+ "step": 14000
216
+ },
217
+ {
218
+ "epoch": 12.674825174825175,
219
+ "grad_norm": 0.8730345368385315,
220
+ "learning_rate": 7.581182795698926e-05,
221
+ "loss": 2.66,
222
+ "step": 14500
223
+ },
224
+ {
225
+ "epoch": 13.111888111888112,
226
+ "grad_norm": 0.8663029670715332,
227
+ "learning_rate": 7.31236559139785e-05,
228
+ "loss": 2.6265,
229
+ "step": 15000
230
+ },
231
+ {
232
+ "epoch": 13.548951048951048,
233
+ "grad_norm": 0.9472974538803101,
234
+ "learning_rate": 7.043548387096775e-05,
235
+ "loss": 2.5889,
236
+ "step": 15500
237
+ },
238
+ {
239
+ "epoch": 13.986013986013987,
240
+ "grad_norm": 0.7939944863319397,
241
+ "learning_rate": 6.7747311827957e-05,
242
+ "loss": 2.568,
243
+ "step": 16000
244
+ },
245
+ {
246
+ "epoch": 14.423076923076923,
247
+ "grad_norm": 0.8526738286018372,
248
+ "learning_rate": 6.505913978494625e-05,
249
+ "loss": 2.5224,
250
+ "step": 16500
251
+ },
252
+ {
253
+ "epoch": 14.86013986013986,
254
+ "grad_norm": 0.7846645712852478,
255
+ "learning_rate": 6.237096774193548e-05,
256
+ "loss": 2.5134,
257
+ "step": 17000
258
+ },
259
+ {
260
+ "epoch": 15.297202797202797,
261
+ "grad_norm": 0.7795770764350891,
262
+ "learning_rate": 5.9682795698924734e-05,
263
+ "loss": 2.4795,
264
+ "step": 17500
265
+ },
266
+ {
267
+ "epoch": 15.734265734265735,
268
+ "grad_norm": 0.8610605001449585,
269
+ "learning_rate": 5.6994623655913984e-05,
270
+ "loss": 2.4604,
271
+ "step": 18000
272
+ },
273
+ {
274
+ "epoch": 16.17132867132867,
275
+ "grad_norm": 0.8211326003074646,
276
+ "learning_rate": 5.4306451612903234e-05,
277
+ "loss": 2.4408,
278
+ "step": 18500
279
+ },
280
+ {
281
+ "epoch": 16.60839160839161,
282
+ "grad_norm": 0.8932536840438843,
283
+ "learning_rate": 5.161827956989248e-05,
284
+ "loss": 2.4163,
285
+ "step": 19000
286
+ },
287
+ {
288
+ "epoch": 17.045454545454547,
289
+ "grad_norm": 0.8186784982681274,
290
+ "learning_rate": 4.893010752688173e-05,
291
+ "loss": 2.4056,
292
+ "step": 19500
293
+ },
294
+ {
295
+ "epoch": 17.482517482517483,
296
+ "grad_norm": 0.8498294353485107,
297
+ "learning_rate": 4.624193548387097e-05,
298
+ "loss": 2.3768,
299
+ "step": 20000
300
+ },
301
+ {
302
+ "epoch": 17.482517482517483,
303
+ "eval_accuracy": 0.6417701351499835,
304
+ "eval_loss": 2.371307849884033,
305
+ "eval_runtime": 6.6242,
306
+ "eval_samples_per_second": 72.311,
307
+ "eval_steps_per_second": 0.453,
308
+ "step": 20000
309
+ },
310
+ {
311
+ "epoch": 17.91958041958042,
312
+ "grad_norm": 0.8237965106964111,
313
+ "learning_rate": 4.355376344086022e-05,
314
+ "loss": 2.3705,
315
+ "step": 20500
316
+ },
317
+ {
318
+ "epoch": 18.356643356643357,
319
+ "grad_norm": 0.8978115916252136,
320
+ "learning_rate": 4.0865591397849464e-05,
321
+ "loss": 2.3441,
322
+ "step": 21000
323
+ },
324
+ {
325
+ "epoch": 18.793706293706293,
326
+ "grad_norm": 0.8014247417449951,
327
+ "learning_rate": 3.8177419354838714e-05,
328
+ "loss": 2.3378,
329
+ "step": 21500
330
+ },
331
+ {
332
+ "epoch": 19.23076923076923,
333
+ "grad_norm": 0.8307129144668579,
334
+ "learning_rate": 3.5489247311827964e-05,
335
+ "loss": 2.3192,
336
+ "step": 22000
337
+ },
338
+ {
339
+ "epoch": 19.667832167832167,
340
+ "grad_norm": 0.8164204359054565,
341
+ "learning_rate": 3.280107526881721e-05,
342
+ "loss": 2.3075,
343
+ "step": 22500
344
+ },
345
+ {
346
+ "epoch": 20.104895104895103,
347
+ "grad_norm": 0.8482570648193359,
348
+ "learning_rate": 3.0112903225806454e-05,
349
+ "loss": 2.3018,
350
+ "step": 23000
351
+ },
352
+ {
353
+ "epoch": 20.541958041958043,
354
+ "grad_norm": 0.7965030670166016,
355
+ "learning_rate": 2.74247311827957e-05,
356
+ "loss": 2.282,
357
+ "step": 23500
358
+ },
359
+ {
360
+ "epoch": 20.97902097902098,
361
+ "grad_norm": 0.8354547023773193,
362
+ "learning_rate": 2.4736559139784947e-05,
363
+ "loss": 2.2817,
364
+ "step": 24000
365
+ },
366
+ {
367
+ "epoch": 21.416083916083917,
368
+ "grad_norm": 0.8033874034881592,
369
+ "learning_rate": 2.2048387096774193e-05,
370
+ "loss": 2.2635,
371
+ "step": 24500
372
+ },
373
+ {
374
+ "epoch": 21.853146853146853,
375
+ "grad_norm": 0.8140653967857361,
376
+ "learning_rate": 1.9360215053763444e-05,
377
+ "loss": 2.2591,
378
+ "step": 25000
379
+ },
380
+ {
381
+ "epoch": 22.29020979020979,
382
+ "grad_norm": 0.8408477306365967,
383
+ "learning_rate": 1.667204301075269e-05,
384
+ "loss": 2.2466,
385
+ "step": 25500
386
+ },
387
+ {
388
+ "epoch": 22.727272727272727,
389
+ "grad_norm": 0.7920801639556885,
390
+ "learning_rate": 1.3983870967741935e-05,
391
+ "loss": 2.244,
392
+ "step": 26000
393
+ },
394
+ {
395
+ "epoch": 23.164335664335663,
396
+ "grad_norm": 0.8369796872138977,
397
+ "learning_rate": 1.1295698924731183e-05,
398
+ "loss": 2.2394,
399
+ "step": 26500
400
+ },
401
+ {
402
+ "epoch": 23.6013986013986,
403
+ "grad_norm": 0.7803018689155579,
404
+ "learning_rate": 8.60752688172043e-06,
405
+ "loss": 2.2262,
406
+ "step": 27000
407
+ },
408
+ {
409
+ "epoch": 24.03846153846154,
410
+ "grad_norm": 0.8060789108276367,
411
+ "learning_rate": 5.9193548387096776e-06,
412
+ "loss": 2.2298,
413
+ "step": 27500
414
+ },
415
+ {
416
+ "epoch": 24.475524475524477,
417
+ "grad_norm": 0.7823363542556763,
418
+ "learning_rate": 3.2311827956989246e-06,
419
+ "loss": 2.2209,
420
+ "step": 28000
421
+ },
422
+ {
423
+ "epoch": 24.912587412587413,
424
+ "grad_norm": 0.8148164749145508,
425
+ "learning_rate": 5.430107526881721e-07,
426
+ "loss": 2.2191,
427
+ "step": 28500
428
+ },
429
+ {
430
+ "epoch": 25.0,
431
+ "step": 28600,
432
+ "total_flos": 3.003225489805824e+17,
433
+ "train_loss": 3.287461715378128,
434
+ "train_runtime": 79951.4195,
435
+ "train_samples_per_second": 71.493,
436
+ "train_steps_per_second": 0.358
437
+ }
438
+ ],
439
+ "logging_steps": 500,
440
+ "max_steps": 28600,
441
+ "num_input_tokens_seen": 0,
442
+ "num_train_epochs": 25,
443
+ "save_steps": 10000,
444
+ "stateful_callbacks": {
445
+ "TrainerControl": {
446
+ "args": {
447
+ "should_epoch_stop": false,
448
+ "should_evaluate": false,
449
+ "should_log": false,
450
+ "should_save": true,
451
+ "should_training_stop": true
452
+ },
453
+ "attributes": {}
454
+ }
455
+ },
456
+ "total_flos": 3.003225489805824e+17,
457
+ "train_batch_size": 200,
458
+ "trial_name": null,
459
+ "trial_params": null
460
+ }