UMCU commited on
Commit
f9bccef
·
verified ·
1 Parent(s): 48071b0

Upload 10 files

Browse files
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "problem_type": "single_label_classification",
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.53.3",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 52000
28
+ }
label_mappings.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "label_to_id": {
3
+ "0": 0,
4
+ "1": 1
5
+ },
6
+ "id_to_label": {
7
+ "0": 0,
8
+ "1": 1
9
+ },
10
+ "num_labels": 2
11
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:765858bc8d2b85435aba93a0212b2c498e194ab3fe38b431942387f4986acf27
3
+ size 251983588
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "extra_special_tokens": {},
51
+ "mask_token": "<mask>",
52
+ "model_max_length": 1000000000000000019884624838656,
53
+ "pad_token": "<pad>",
54
+ "sep_token": "</s>",
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": "<unk>"
58
+ }
trainer_state.json ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 344,
3
+ "best_metric": 0.8545918367346939,
4
+ "best_model_checkpoint": "../results\\checkpoint-344",
5
+ "epoch": 6.0,
6
+ "eval_steps": 500,
7
+ "global_step": 516,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.11627906976744186,
14
+ "grad_norm": 3.748704671859741,
15
+ "learning_rate": 1.2000000000000002e-06,
16
+ "loss": 0.7308,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.23255813953488372,
21
+ "grad_norm": 4.270473957061768,
22
+ "learning_rate": 2.5333333333333338e-06,
23
+ "loss": 0.711,
24
+ "step": 20
25
+ },
26
+ {
27
+ "epoch": 0.3488372093023256,
28
+ "grad_norm": 3.427567481994629,
29
+ "learning_rate": 3.866666666666667e-06,
30
+ "loss": 0.6948,
31
+ "step": 30
32
+ },
33
+ {
34
+ "epoch": 0.46511627906976744,
35
+ "grad_norm": 2.623924732208252,
36
+ "learning_rate": 5.2e-06,
37
+ "loss": 0.6786,
38
+ "step": 40
39
+ },
40
+ {
41
+ "epoch": 0.5813953488372093,
42
+ "grad_norm": 3.8385608196258545,
43
+ "learning_rate": 6.533333333333334e-06,
44
+ "loss": 0.6393,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.6976744186046512,
49
+ "grad_norm": 6.035278797149658,
50
+ "learning_rate": 7.866666666666667e-06,
51
+ "loss": 0.6001,
52
+ "step": 60
53
+ },
54
+ {
55
+ "epoch": 0.813953488372093,
56
+ "grad_norm": 12.787239074707031,
57
+ "learning_rate": 9.200000000000002e-06,
58
+ "loss": 0.5629,
59
+ "step": 70
60
+ },
61
+ {
62
+ "epoch": 0.9302325581395349,
63
+ "grad_norm": 5.702328681945801,
64
+ "learning_rate": 1.0533333333333333e-05,
65
+ "loss": 0.5313,
66
+ "step": 80
67
+ },
68
+ {
69
+ "epoch": 1.0,
70
+ "eval_accuracy": 0.7,
71
+ "eval_f1": 0.7009200572480064,
72
+ "eval_loss": 0.5837136507034302,
73
+ "eval_precision": 0.7089869281045751,
74
+ "eval_recall": 0.7,
75
+ "eval_runtime": 41.9795,
76
+ "eval_samples_per_second": 1.667,
77
+ "eval_steps_per_second": 0.071,
78
+ "step": 86
79
+ },
80
+ {
81
+ "epoch": 1.0465116279069768,
82
+ "grad_norm": 9.588857650756836,
83
+ "learning_rate": 1.186666666666667e-05,
84
+ "loss": 0.5636,
85
+ "step": 90
86
+ },
87
+ {
88
+ "epoch": 1.1627906976744187,
89
+ "grad_norm": 11.674090385437012,
90
+ "learning_rate": 1.3200000000000002e-05,
91
+ "loss": 0.5236,
92
+ "step": 100
93
+ },
94
+ {
95
+ "epoch": 1.2790697674418605,
96
+ "grad_norm": 6.487068176269531,
97
+ "learning_rate": 1.4533333333333335e-05,
98
+ "loss": 0.4575,
99
+ "step": 110
100
+ },
101
+ {
102
+ "epoch": 1.3953488372093024,
103
+ "grad_norm": 7.364016532897949,
104
+ "learning_rate": 1.586666666666667e-05,
105
+ "loss": 0.436,
106
+ "step": 120
107
+ },
108
+ {
109
+ "epoch": 1.5116279069767442,
110
+ "grad_norm": 3.313822031021118,
111
+ "learning_rate": 1.72e-05,
112
+ "loss": 0.4582,
113
+ "step": 130
114
+ },
115
+ {
116
+ "epoch": 1.627906976744186,
117
+ "grad_norm": 9.990741729736328,
118
+ "learning_rate": 1.8533333333333334e-05,
119
+ "loss": 0.4913,
120
+ "step": 140
121
+ },
122
+ {
123
+ "epoch": 1.744186046511628,
124
+ "grad_norm": 3.886936664581299,
125
+ "learning_rate": 1.9866666666666667e-05,
126
+ "loss": 0.4572,
127
+ "step": 150
128
+ },
129
+ {
130
+ "epoch": 1.8604651162790697,
131
+ "grad_norm": 20.496986389160156,
132
+ "learning_rate": 1.9601769911504427e-05,
133
+ "loss": 0.4328,
134
+ "step": 160
135
+ },
136
+ {
137
+ "epoch": 1.9767441860465116,
138
+ "grad_norm": 4.413027286529541,
139
+ "learning_rate": 1.915929203539823e-05,
140
+ "loss": 0.4989,
141
+ "step": 170
142
+ },
143
+ {
144
+ "epoch": 2.0,
145
+ "eval_accuracy": 0.7,
146
+ "eval_f1": 0.6979758740543855,
147
+ "eval_loss": 0.5208796858787537,
148
+ "eval_precision": 0.7318027210884354,
149
+ "eval_recall": 0.7,
150
+ "eval_runtime": 40.632,
151
+ "eval_samples_per_second": 1.723,
152
+ "eval_steps_per_second": 0.074,
153
+ "step": 172
154
+ },
155
+ {
156
+ "epoch": 2.0930232558139537,
157
+ "grad_norm": 4.327018737792969,
158
+ "learning_rate": 1.8716814159292035e-05,
159
+ "loss": 0.4098,
160
+ "step": 180
161
+ },
162
+ {
163
+ "epoch": 2.2093023255813953,
164
+ "grad_norm": 3.8148539066314697,
165
+ "learning_rate": 1.8274336283185843e-05,
166
+ "loss": 0.4117,
167
+ "step": 190
168
+ },
169
+ {
170
+ "epoch": 2.3255813953488373,
171
+ "grad_norm": 10.718646049499512,
172
+ "learning_rate": 1.7831858407079646e-05,
173
+ "loss": 0.4328,
174
+ "step": 200
175
+ },
176
+ {
177
+ "epoch": 2.441860465116279,
178
+ "grad_norm": 4.208929538726807,
179
+ "learning_rate": 1.7389380530973454e-05,
180
+ "loss": 0.3792,
181
+ "step": 210
182
+ },
183
+ {
184
+ "epoch": 2.558139534883721,
185
+ "grad_norm": 7.7923479080200195,
186
+ "learning_rate": 1.6946902654867258e-05,
187
+ "loss": 0.3999,
188
+ "step": 220
189
+ },
190
+ {
191
+ "epoch": 2.6744186046511627,
192
+ "grad_norm": 4.283344745635986,
193
+ "learning_rate": 1.6504424778761065e-05,
194
+ "loss": 0.3568,
195
+ "step": 230
196
+ },
197
+ {
198
+ "epoch": 2.7906976744186047,
199
+ "grad_norm": 5.532216548919678,
200
+ "learning_rate": 1.606194690265487e-05,
201
+ "loss": 0.4055,
202
+ "step": 240
203
+ },
204
+ {
205
+ "epoch": 2.9069767441860463,
206
+ "grad_norm": 3.663770914077759,
207
+ "learning_rate": 1.5619469026548676e-05,
208
+ "loss": 0.3422,
209
+ "step": 250
210
+ },
211
+ {
212
+ "epoch": 3.0,
213
+ "eval_accuracy": 0.7571428571428571,
214
+ "eval_f1": 0.7574933003504432,
215
+ "eval_loss": 0.4062376022338867,
216
+ "eval_precision": 0.7581531954887218,
217
+ "eval_recall": 0.7571428571428571,
218
+ "eval_runtime": 39.0639,
219
+ "eval_samples_per_second": 1.792,
220
+ "eval_steps_per_second": 0.077,
221
+ "step": 258
222
+ },
223
+ {
224
+ "epoch": 3.0232558139534884,
225
+ "grad_norm": 4.565627098083496,
226
+ "learning_rate": 1.517699115044248e-05,
227
+ "loss": 0.3082,
228
+ "step": 260
229
+ },
230
+ {
231
+ "epoch": 3.13953488372093,
232
+ "grad_norm": 14.768342971801758,
233
+ "learning_rate": 1.4734513274336284e-05,
234
+ "loss": 0.3585,
235
+ "step": 270
236
+ },
237
+ {
238
+ "epoch": 3.255813953488372,
239
+ "grad_norm": 6.304065704345703,
240
+ "learning_rate": 1.429203539823009e-05,
241
+ "loss": 0.3781,
242
+ "step": 280
243
+ },
244
+ {
245
+ "epoch": 3.3720930232558137,
246
+ "grad_norm": 8.55212688446045,
247
+ "learning_rate": 1.3849557522123895e-05,
248
+ "loss": 0.3061,
249
+ "step": 290
250
+ },
251
+ {
252
+ "epoch": 3.488372093023256,
253
+ "grad_norm": 10.989974975585938,
254
+ "learning_rate": 1.34070796460177e-05,
255
+ "loss": 0.3507,
256
+ "step": 300
257
+ },
258
+ {
259
+ "epoch": 3.604651162790698,
260
+ "grad_norm": 8.045317649841309,
261
+ "learning_rate": 1.2964601769911504e-05,
262
+ "loss": 0.3536,
263
+ "step": 310
264
+ },
265
+ {
266
+ "epoch": 3.7209302325581395,
267
+ "grad_norm": 4.802742958068848,
268
+ "learning_rate": 1.2522123893805312e-05,
269
+ "loss": 0.3107,
270
+ "step": 320
271
+ },
272
+ {
273
+ "epoch": 3.8372093023255816,
274
+ "grad_norm": 2.4994394779205322,
275
+ "learning_rate": 1.2079646017699115e-05,
276
+ "loss": 0.3118,
277
+ "step": 330
278
+ },
279
+ {
280
+ "epoch": 3.953488372093023,
281
+ "grad_norm": 7.95784854888916,
282
+ "learning_rate": 1.1637168141592921e-05,
283
+ "loss": 0.305,
284
+ "step": 340
285
+ },
286
+ {
287
+ "epoch": 4.0,
288
+ "eval_accuracy": 0.8571428571428571,
289
+ "eval_f1": 0.8545918367346939,
290
+ "eval_loss": 0.34065258502960205,
291
+ "eval_precision": 0.8655238095238094,
292
+ "eval_recall": 0.8571428571428571,
293
+ "eval_runtime": 39.7031,
294
+ "eval_samples_per_second": 1.763,
295
+ "eval_steps_per_second": 0.076,
296
+ "step": 344
297
+ },
298
+ {
299
+ "epoch": 4.069767441860465,
300
+ "grad_norm": 8.617964744567871,
301
+ "learning_rate": 1.1194690265486727e-05,
302
+ "loss": 0.3376,
303
+ "step": 350
304
+ },
305
+ {
306
+ "epoch": 4.186046511627907,
307
+ "grad_norm": 13.136701583862305,
308
+ "learning_rate": 1.0752212389380532e-05,
309
+ "loss": 0.3829,
310
+ "step": 360
311
+ },
312
+ {
313
+ "epoch": 4.3023255813953485,
314
+ "grad_norm": 3.352994203567505,
315
+ "learning_rate": 1.0309734513274336e-05,
316
+ "loss": 0.3315,
317
+ "step": 370
318
+ },
319
+ {
320
+ "epoch": 4.4186046511627906,
321
+ "grad_norm": 4.276778697967529,
322
+ "learning_rate": 9.867256637168142e-06,
323
+ "loss": 0.2992,
324
+ "step": 380
325
+ },
326
+ {
327
+ "epoch": 4.534883720930233,
328
+ "grad_norm": 4.883634090423584,
329
+ "learning_rate": 9.424778761061947e-06,
330
+ "loss": 0.2683,
331
+ "step": 390
332
+ },
333
+ {
334
+ "epoch": 4.651162790697675,
335
+ "grad_norm": 7.341738700866699,
336
+ "learning_rate": 8.982300884955753e-06,
337
+ "loss": 0.262,
338
+ "step": 400
339
+ },
340
+ {
341
+ "epoch": 4.767441860465116,
342
+ "grad_norm": 8.630583763122559,
343
+ "learning_rate": 8.539823008849558e-06,
344
+ "loss": 0.2658,
345
+ "step": 410
346
+ },
347
+ {
348
+ "epoch": 4.883720930232558,
349
+ "grad_norm": 11.391676902770996,
350
+ "learning_rate": 8.097345132743364e-06,
351
+ "loss": 0.3288,
352
+ "step": 420
353
+ },
354
+ {
355
+ "epoch": 5.0,
356
+ "grad_norm": 44.6805419921875,
357
+ "learning_rate": 7.654867256637168e-06,
358
+ "loss": 0.314,
359
+ "step": 430
360
+ },
361
+ {
362
+ "epoch": 5.0,
363
+ "eval_accuracy": 0.7714285714285715,
364
+ "eval_f1": 0.7714285714285715,
365
+ "eval_loss": 0.43682464957237244,
366
+ "eval_precision": 0.7714285714285715,
367
+ "eval_recall": 0.7714285714285715,
368
+ "eval_runtime": 38.7605,
369
+ "eval_samples_per_second": 1.806,
370
+ "eval_steps_per_second": 0.077,
371
+ "step": 430
372
+ },
373
+ {
374
+ "epoch": 5.116279069767442,
375
+ "grad_norm": 3.780876398086548,
376
+ "learning_rate": 7.212389380530974e-06,
377
+ "loss": 0.3121,
378
+ "step": 440
379
+ },
380
+ {
381
+ "epoch": 5.232558139534884,
382
+ "grad_norm": 4.6331000328063965,
383
+ "learning_rate": 6.76991150442478e-06,
384
+ "loss": 0.3032,
385
+ "step": 450
386
+ },
387
+ {
388
+ "epoch": 5.348837209302325,
389
+ "grad_norm": 3.838736057281494,
390
+ "learning_rate": 6.3274336283185845e-06,
391
+ "loss": 0.1971,
392
+ "step": 460
393
+ },
394
+ {
395
+ "epoch": 5.465116279069767,
396
+ "grad_norm": 7.0051469802856445,
397
+ "learning_rate": 5.88495575221239e-06,
398
+ "loss": 0.2727,
399
+ "step": 470
400
+ },
401
+ {
402
+ "epoch": 5.5813953488372094,
403
+ "grad_norm": 6.933923721313477,
404
+ "learning_rate": 5.442477876106195e-06,
405
+ "loss": 0.2669,
406
+ "step": 480
407
+ },
408
+ {
409
+ "epoch": 5.6976744186046515,
410
+ "grad_norm": 4.192832946777344,
411
+ "learning_rate": 5e-06,
412
+ "loss": 0.2535,
413
+ "step": 490
414
+ },
415
+ {
416
+ "epoch": 5.813953488372093,
417
+ "grad_norm": 4.221975803375244,
418
+ "learning_rate": 4.557522123893805e-06,
419
+ "loss": 0.2963,
420
+ "step": 500
421
+ },
422
+ {
423
+ "epoch": 5.930232558139535,
424
+ "grad_norm": 7.972716331481934,
425
+ "learning_rate": 4.115044247787611e-06,
426
+ "loss": 0.2709,
427
+ "step": 510
428
+ },
429
+ {
430
+ "epoch": 6.0,
431
+ "eval_accuracy": 0.8142857142857143,
432
+ "eval_f1": 0.814553700267986,
433
+ "eval_loss": 0.3767072856426239,
434
+ "eval_precision": 0.8151550751879698,
435
+ "eval_recall": 0.8142857142857143,
436
+ "eval_runtime": 40.2015,
437
+ "eval_samples_per_second": 1.741,
438
+ "eval_steps_per_second": 0.075,
439
+ "step": 516
440
+ }
441
+ ],
442
+ "logging_steps": 10,
443
+ "max_steps": 602,
444
+ "num_input_tokens_seen": 0,
445
+ "num_train_epochs": 7,
446
+ "save_steps": 500,
447
+ "stateful_callbacks": {
448
+ "EarlyStoppingCallback": {
449
+ "args": {
450
+ "early_stopping_patience": 2,
451
+ "early_stopping_threshold": 0.0
452
+ },
453
+ "attributes": {
454
+ "early_stopping_patience_counter": 2
455
+ }
456
+ },
457
+ "TrainerControl": {
458
+ "args": {
459
+ "should_epoch_stop": false,
460
+ "should_evaluate": false,
461
+ "should_log": false,
462
+ "should_save": true,
463
+ "should_training_stop": true
464
+ },
465
+ "attributes": {}
466
+ }
467
+ },
468
+ "total_flos": 4137555984396360.0,
469
+ "train_batch_size": 32,
470
+ "trial_name": null,
471
+ "trial_params": null
472
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb5061143adc12fc81a253248f802d4776e5e609c983ee6538cb8492632e8444
3
+ size 5304
vocab.json ADDED
The diff for this file is too large to render. See raw diff