BeketML commited on
Commit
a305e62
·
verified ·
1 Parent(s): c98eac0

Upload folder using huggingface_hub

Browse files
Files changed (40) hide show
  1. checkpoint-7000/config.json +28 -0
  2. checkpoint-7000/model.safetensors +3 -0
  3. checkpoint-7000/optimizer.pt +3 -0
  4. checkpoint-7000/rng_state.pth +3 -0
  5. checkpoint-7000/scaler.pt +3 -0
  6. checkpoint-7000/scheduler.pt +3 -0
  7. checkpoint-7000/sentencepiece.bpe.model +3 -0
  8. checkpoint-7000/special_tokens_map.json +15 -0
  9. checkpoint-7000/tokenizer_config.json +56 -0
  10. checkpoint-7000/trainer_state.json +701 -0
  11. checkpoint-7000/training_args.bin +3 -0
  12. checkpoint-8000/config.json +28 -0
  13. checkpoint-8000/model.safetensors +3 -0
  14. checkpoint-8000/optimizer.pt +3 -0
  15. checkpoint-8000/rng_state.pth +3 -0
  16. checkpoint-8000/scaler.pt +3 -0
  17. checkpoint-8000/scheduler.pt +3 -0
  18. checkpoint-8000/sentencepiece.bpe.model +3 -0
  19. checkpoint-8000/special_tokens_map.json +15 -0
  20. checkpoint-8000/tokenizer_config.json +56 -0
  21. checkpoint-8000/trainer_state.json +795 -0
  22. checkpoint-8000/training_args.bin +3 -0
  23. checkpoint-8500/config.json +28 -0
  24. checkpoint-8500/model.safetensors +3 -0
  25. checkpoint-8500/optimizer.pt +3 -0
  26. checkpoint-8500/rng_state.pth +3 -0
  27. checkpoint-8500/scaler.pt +3 -0
  28. checkpoint-8500/scheduler.pt +3 -0
  29. checkpoint-8500/sentencepiece.bpe.model +3 -0
  30. checkpoint-8500/special_tokens_map.json +15 -0
  31. checkpoint-8500/tokenizer_config.json +56 -0
  32. checkpoint-8500/trainer_state.json +842 -0
  33. checkpoint-8500/training_args.bin +3 -0
  34. config.json +28 -0
  35. model.safetensors +3 -0
  36. sentencepiece.bpe.model +3 -0
  37. special_tokens_map.json +15 -0
  38. test.py +32 -0
  39. tokenizer_config.json +56 -0
  40. training_args.bin +3 -0
checkpoint-7000/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "dtype": "float32",
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "problem_type": "single_label_classification",
24
+ "transformers_version": "4.57.3",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250002
28
+ }
checkpoint-7000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11581fcf59ed6effc51225813116b13565a436317e8381e00444ac3ce13d695e
3
+ size 1112205008
checkpoint-7000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbb0fcda173d1ae2f94dda534c96ed9a99e55c74784223db39e40e717eb61036
3
+ size 2224532875
checkpoint-7000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6dae84c904d708c130984bf940365e51dce8b1de42aca9c5e004b79c2964a51
3
+ size 14645
checkpoint-7000/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67a4ac7aeda2e5e7d6451b732beb252e7b8b9aabde03ae3d1c084770435aed1b
3
+ size 1383
checkpoint-7000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4eaf7c4b5bbb5cc75cb44eda3f89bce1142fde81a196f090dfe5a1d7e6253341
3
+ size 1465
checkpoint-7000/sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
checkpoint-7000/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
checkpoint-7000/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
50
+ "model_max_length": 512,
51
+ "pad_token": "<pad>",
52
+ "sep_token": "</s>",
53
+ "sp_model_kwargs": {},
54
+ "tokenizer_class": "XLMRobertaTokenizer",
55
+ "unk_token": "<unk>"
56
+ }
checkpoint-7000/trainer_state.json ADDED
@@ -0,0 +1,701 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 7000,
3
+ "best_metric": 0.9318026741133476,
4
+ "best_model_checkpoint": "./results/checkpoint-7000",
5
+ "epoch": 0.18054265965129476,
6
+ "eval_steps": 500,
7
+ "global_step": 7000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0025791808521613536,
14
+ "grad_norm": 8.55178451538086,
15
+ "learning_rate": 3.96e-06,
16
+ "loss": 0.6948,
17
+ "step": 100
18
+ },
19
+ {
20
+ "epoch": 0.005158361704322707,
21
+ "grad_norm": 6.15341329574585,
22
+ "learning_rate": 7.960000000000002e-06,
23
+ "loss": 0.6256,
24
+ "step": 200
25
+ },
26
+ {
27
+ "epoch": 0.00773754255648406,
28
+ "grad_norm": 7.876453876495361,
29
+ "learning_rate": 1.196e-05,
30
+ "loss": 0.4996,
31
+ "step": 300
32
+ },
33
+ {
34
+ "epoch": 0.010316723408645414,
35
+ "grad_norm": 25.46845817565918,
36
+ "learning_rate": 1.5960000000000003e-05,
37
+ "loss": 0.4267,
38
+ "step": 400
39
+ },
40
+ {
41
+ "epoch": 0.012895904260806768,
42
+ "grad_norm": 12.137995719909668,
43
+ "learning_rate": 1.9960000000000002e-05,
44
+ "loss": 0.3932,
45
+ "step": 500
46
+ },
47
+ {
48
+ "epoch": 0.012895904260806768,
49
+ "eval_accuracy": 0.8497975290810141,
50
+ "eval_f1": 0.8492073290894184,
51
+ "eval_loss": 0.36694180965423584,
52
+ "eval_precision": 0.8566847895515343,
53
+ "eval_recall": 0.8497975290810141,
54
+ "eval_runtime": 81.3998,
55
+ "eval_samples_per_second": 952.607,
56
+ "eval_steps_per_second": 59.546,
57
+ "step": 500
58
+ },
59
+ {
60
+ "epoch": 0.01547508511296812,
61
+ "grad_norm": 9.444395065307617,
62
+ "learning_rate": 1.99829039165573e-05,
63
+ "loss": 0.3438,
64
+ "step": 600
65
+ },
66
+ {
67
+ "epoch": 0.018054265965129476,
68
+ "grad_norm": 12.698025703430176,
69
+ "learning_rate": 1.9965635145403055e-05,
70
+ "loss": 0.3897,
71
+ "step": 700
72
+ },
73
+ {
74
+ "epoch": 0.02063344681729083,
75
+ "grad_norm": 7.648699760437012,
76
+ "learning_rate": 1.994836637424881e-05,
77
+ "loss": 0.3197,
78
+ "step": 800
79
+ },
80
+ {
81
+ "epoch": 0.02321262766945218,
82
+ "grad_norm": 11.770590782165527,
83
+ "learning_rate": 1.9931097603094565e-05,
84
+ "loss": 0.2641,
85
+ "step": 900
86
+ },
87
+ {
88
+ "epoch": 0.025791808521613536,
89
+ "grad_norm": 1.2174805402755737,
90
+ "learning_rate": 1.991382883194032e-05,
91
+ "loss": 0.3093,
92
+ "step": 1000
93
+ },
94
+ {
95
+ "epoch": 0.025791808521613536,
96
+ "eval_accuracy": 0.8805163653245983,
97
+ "eval_f1": 0.8798423409034712,
98
+ "eval_loss": 0.29810118675231934,
99
+ "eval_precision": 0.8908826043373803,
100
+ "eval_recall": 0.8805163653245983,
101
+ "eval_runtime": 82.3661,
102
+ "eval_samples_per_second": 941.431,
103
+ "eval_steps_per_second": 58.847,
104
+ "step": 1000
105
+ },
106
+ {
107
+ "epoch": 0.02837098937377489,
108
+ "grad_norm": 35.91494369506836,
109
+ "learning_rate": 1.9896560060786075e-05,
110
+ "loss": 0.2701,
111
+ "step": 1100
112
+ },
113
+ {
114
+ "epoch": 0.03095017022593624,
115
+ "grad_norm": 10.724688529968262,
116
+ "learning_rate": 1.9879291289631834e-05,
117
+ "loss": 0.2723,
118
+ "step": 1200
119
+ },
120
+ {
121
+ "epoch": 0.0335293510780976,
122
+ "grad_norm": 11.826756477355957,
123
+ "learning_rate": 1.9862022518477585e-05,
124
+ "loss": 0.2669,
125
+ "step": 1300
126
+ },
127
+ {
128
+ "epoch": 0.03610853193025895,
129
+ "grad_norm": 13.958097457885742,
130
+ "learning_rate": 1.984475374732334e-05,
131
+ "loss": 0.2563,
132
+ "step": 1400
133
+ },
134
+ {
135
+ "epoch": 0.0386877127824203,
136
+ "grad_norm": 32.344303131103516,
137
+ "learning_rate": 1.98274849761691e-05,
138
+ "loss": 0.2717,
139
+ "step": 1500
140
+ },
141
+ {
142
+ "epoch": 0.0386877127824203,
143
+ "eval_accuracy": 0.9098424079853499,
144
+ "eval_f1": 0.9098439407901873,
145
+ "eval_loss": 0.2351153939962387,
146
+ "eval_precision": 0.9100045564108226,
147
+ "eval_recall": 0.9098424079853499,
148
+ "eval_runtime": 80.9696,
149
+ "eval_samples_per_second": 957.669,
150
+ "eval_steps_per_second": 59.862,
151
+ "step": 1500
152
+ },
153
+ {
154
+ "epoch": 0.04126689363458166,
155
+ "grad_norm": 41.87895584106445,
156
+ "learning_rate": 1.9810216205014854e-05,
157
+ "loss": 0.2775,
158
+ "step": 1600
159
+ },
160
+ {
161
+ "epoch": 0.04384607448674301,
162
+ "grad_norm": 9.029143333435059,
163
+ "learning_rate": 1.9792947433860606e-05,
164
+ "loss": 0.2747,
165
+ "step": 1700
166
+ },
167
+ {
168
+ "epoch": 0.04642525533890436,
169
+ "grad_norm": 0.5453509092330933,
170
+ "learning_rate": 1.9775678662706365e-05,
171
+ "loss": 0.2333,
172
+ "step": 1800
173
+ },
174
+ {
175
+ "epoch": 0.04900443619106572,
176
+ "grad_norm": 14.877126693725586,
177
+ "learning_rate": 1.975840989155212e-05,
178
+ "loss": 0.2335,
179
+ "step": 1900
180
+ },
181
+ {
182
+ "epoch": 0.05158361704322707,
183
+ "grad_norm": 9.152324676513672,
184
+ "learning_rate": 1.974114112039787e-05,
185
+ "loss": 0.2501,
186
+ "step": 2000
187
+ },
188
+ {
189
+ "epoch": 0.05158361704322707,
190
+ "eval_accuracy": 0.915400686079802,
191
+ "eval_f1": 0.9153786857019529,
192
+ "eval_loss": 0.24714471399784088,
193
+ "eval_precision": 0.9155981210913413,
194
+ "eval_recall": 0.915400686079802,
195
+ "eval_runtime": 80.7839,
196
+ "eval_samples_per_second": 959.869,
197
+ "eval_steps_per_second": 60.0,
198
+ "step": 2000
199
+ },
200
+ {
201
+ "epoch": 0.05416279789538842,
202
+ "grad_norm": 3.9494690895080566,
203
+ "learning_rate": 1.972387234924363e-05,
204
+ "loss": 0.2432,
205
+ "step": 2100
206
+ },
207
+ {
208
+ "epoch": 0.05674197874754978,
209
+ "grad_norm": 7.518190383911133,
210
+ "learning_rate": 1.9706603578089385e-05,
211
+ "loss": 0.2179,
212
+ "step": 2200
213
+ },
214
+ {
215
+ "epoch": 0.05932115959971113,
216
+ "grad_norm": 55.069461822509766,
217
+ "learning_rate": 1.968933480693514e-05,
218
+ "loss": 0.2603,
219
+ "step": 2300
220
+ },
221
+ {
222
+ "epoch": 0.06190034045187248,
223
+ "grad_norm": 21.235607147216797,
224
+ "learning_rate": 1.9672066035780896e-05,
225
+ "loss": 0.2363,
226
+ "step": 2400
227
+ },
228
+ {
229
+ "epoch": 0.06447952130403384,
230
+ "grad_norm": 30.956710815429688,
231
+ "learning_rate": 1.965479726462665e-05,
232
+ "loss": 0.264,
233
+ "step": 2500
234
+ },
235
+ {
236
+ "epoch": 0.06447952130403384,
237
+ "eval_accuracy": 0.9174769802171726,
238
+ "eval_f1": 0.9174732815674826,
239
+ "eval_loss": 0.22409705817699432,
240
+ "eval_precision": 0.9178450257324412,
241
+ "eval_recall": 0.9174769802171726,
242
+ "eval_runtime": 82.4939,
243
+ "eval_samples_per_second": 939.972,
244
+ "eval_steps_per_second": 58.756,
245
+ "step": 2500
246
+ },
247
+ {
248
+ "epoch": 0.0670587021561952,
249
+ "grad_norm": 3.0290513038635254,
250
+ "learning_rate": 1.9637528493472406e-05,
251
+ "loss": 0.2388,
252
+ "step": 2600
253
+ },
254
+ {
255
+ "epoch": 0.06963788300835655,
256
+ "grad_norm": 4.662931442260742,
257
+ "learning_rate": 1.962025972231816e-05,
258
+ "loss": 0.2132,
259
+ "step": 2700
260
+ },
261
+ {
262
+ "epoch": 0.0722170638605179,
263
+ "grad_norm": 31.94866180419922,
264
+ "learning_rate": 1.9602990951163916e-05,
265
+ "loss": 0.2576,
266
+ "step": 2800
267
+ },
268
+ {
269
+ "epoch": 0.07479624471267925,
270
+ "grad_norm": 3.5957560539245605,
271
+ "learning_rate": 1.958572218000967e-05,
272
+ "loss": 0.2404,
273
+ "step": 2900
274
+ },
275
+ {
276
+ "epoch": 0.0773754255648406,
277
+ "grad_norm": 6.48631477355957,
278
+ "learning_rate": 1.9568453408855427e-05,
279
+ "loss": 0.2514,
280
+ "step": 3000
281
+ },
282
+ {
283
+ "epoch": 0.0773754255648406,
284
+ "eval_accuracy": 0.9169998194526837,
285
+ "eval_f1": 0.9170017211060641,
286
+ "eval_loss": 0.21086551249027252,
287
+ "eval_precision": 0.9171348873594787,
288
+ "eval_recall": 0.9169998194526837,
289
+ "eval_runtime": 80.1992,
290
+ "eval_samples_per_second": 966.867,
291
+ "eval_steps_per_second": 60.437,
292
+ "step": 3000
293
+ },
294
+ {
295
+ "epoch": 0.07995460641700196,
296
+ "grad_norm": 5.789911270141602,
297
+ "learning_rate": 1.9551184637701185e-05,
298
+ "loss": 0.2138,
299
+ "step": 3100
300
+ },
301
+ {
302
+ "epoch": 0.08253378726916331,
303
+ "grad_norm": 8.613458633422852,
304
+ "learning_rate": 1.9533915866546937e-05,
305
+ "loss": 0.2323,
306
+ "step": 3200
307
+ },
308
+ {
309
+ "epoch": 0.08511296812132467,
310
+ "grad_norm": 5.5360822677612305,
311
+ "learning_rate": 1.9516647095392692e-05,
312
+ "loss": 0.2474,
313
+ "step": 3300
314
+ },
315
+ {
316
+ "epoch": 0.08769214897348603,
317
+ "grad_norm": 15.3199462890625,
318
+ "learning_rate": 1.949937832423845e-05,
319
+ "loss": 0.1964,
320
+ "step": 3400
321
+ },
322
+ {
323
+ "epoch": 0.09027132982564738,
324
+ "grad_norm": 0.23756052553653717,
325
+ "learning_rate": 1.9482109553084202e-05,
326
+ "loss": 0.2048,
327
+ "step": 3500
328
+ },
329
+ {
330
+ "epoch": 0.09027132982564738,
331
+ "eval_accuracy": 0.9212555776224498,
332
+ "eval_f1": 0.9212388888583593,
333
+ "eval_loss": 0.217011496424675,
334
+ "eval_precision": 0.9220491698721403,
335
+ "eval_recall": 0.9212555776224498,
336
+ "eval_runtime": 82.2451,
337
+ "eval_samples_per_second": 942.816,
338
+ "eval_steps_per_second": 58.934,
339
+ "step": 3500
340
+ },
341
+ {
342
+ "epoch": 0.09285051067780872,
343
+ "grad_norm": 3.4589858055114746,
344
+ "learning_rate": 1.9464840781929957e-05,
345
+ "loss": 0.2335,
346
+ "step": 3600
347
+ },
348
+ {
349
+ "epoch": 0.09542969152997008,
350
+ "grad_norm": 6.5932111740112305,
351
+ "learning_rate": 1.9447572010775716e-05,
352
+ "loss": 0.2239,
353
+ "step": 3700
354
+ },
355
+ {
356
+ "epoch": 0.09800887238213143,
357
+ "grad_norm": 2.6831107139587402,
358
+ "learning_rate": 1.943030323962147e-05,
359
+ "loss": 0.2049,
360
+ "step": 3800
361
+ },
362
+ {
363
+ "epoch": 0.10058805323429279,
364
+ "grad_norm": 26.725677490234375,
365
+ "learning_rate": 1.9413034468467223e-05,
366
+ "loss": 0.2404,
367
+ "step": 3900
368
+ },
369
+ {
370
+ "epoch": 0.10316723408645415,
371
+ "grad_norm": 26.966718673706055,
372
+ "learning_rate": 1.939576569731298e-05,
373
+ "loss": 0.241,
374
+ "step": 4000
375
+ },
376
+ {
377
+ "epoch": 0.10316723408645415,
378
+ "eval_accuracy": 0.9104227386448634,
379
+ "eval_f1": 0.9102303033501772,
380
+ "eval_loss": 0.22579680383205414,
381
+ "eval_precision": 0.9150459252068764,
382
+ "eval_recall": 0.9104227386448634,
383
+ "eval_runtime": 81.0753,
384
+ "eval_samples_per_second": 956.419,
385
+ "eval_steps_per_second": 59.784,
386
+ "step": 4000
387
+ },
388
+ {
389
+ "epoch": 0.1057464149386155,
390
+ "grad_norm": 17.290939331054688,
391
+ "learning_rate": 1.9378496926158737e-05,
392
+ "loss": 0.2298,
393
+ "step": 4100
394
+ },
395
+ {
396
+ "epoch": 0.10832559579077684,
397
+ "grad_norm": 8.844673156738281,
398
+ "learning_rate": 1.9361228155004492e-05,
399
+ "loss": 0.224,
400
+ "step": 4200
401
+ },
402
+ {
403
+ "epoch": 0.1109047766429382,
404
+ "grad_norm": 2.0837674140930176,
405
+ "learning_rate": 1.9343959383850247e-05,
406
+ "loss": 0.2367,
407
+ "step": 4300
408
+ },
409
+ {
410
+ "epoch": 0.11348395749509955,
411
+ "grad_norm": 7.639578819274902,
412
+ "learning_rate": 1.9326690612696002e-05,
413
+ "loss": 0.2194,
414
+ "step": 4400
415
+ },
416
+ {
417
+ "epoch": 0.11606313834726091,
418
+ "grad_norm": 9.39929485321045,
419
+ "learning_rate": 1.9309421841541757e-05,
420
+ "loss": 0.2297,
421
+ "step": 4500
422
+ },
423
+ {
424
+ "epoch": 0.11606313834726091,
425
+ "eval_accuracy": 0.9185473678780531,
426
+ "eval_f1": 0.918488362019573,
427
+ "eval_loss": 0.24766957759857178,
428
+ "eval_precision": 0.9204437423697496,
429
+ "eval_recall": 0.9185473678780531,
430
+ "eval_runtime": 80.6546,
431
+ "eval_samples_per_second": 961.408,
432
+ "eval_steps_per_second": 60.096,
433
+ "step": 4500
434
+ },
435
+ {
436
+ "epoch": 0.11864231919942227,
437
+ "grad_norm": 63.414676666259766,
438
+ "learning_rate": 1.9292153070387512e-05,
439
+ "loss": 0.2067,
440
+ "step": 4600
441
+ },
442
+ {
443
+ "epoch": 0.12122150005158362,
444
+ "grad_norm": 27.895687103271484,
445
+ "learning_rate": 1.9274884299233268e-05,
446
+ "loss": 0.1975,
447
+ "step": 4700
448
+ },
449
+ {
450
+ "epoch": 0.12380068090374496,
451
+ "grad_norm": 5.536584377288818,
452
+ "learning_rate": 1.9257615528079023e-05,
453
+ "loss": 0.2141,
454
+ "step": 4800
455
+ },
456
+ {
457
+ "epoch": 0.12637986175590632,
458
+ "grad_norm": 18.6892147064209,
459
+ "learning_rate": 1.9240346756924778e-05,
460
+ "loss": 0.2549,
461
+ "step": 4900
462
+ },
463
+ {
464
+ "epoch": 0.12895904260806768,
465
+ "grad_norm": 3.705787420272827,
466
+ "learning_rate": 1.9223077985770536e-05,
467
+ "loss": 0.2187,
468
+ "step": 5000
469
+ },
470
+ {
471
+ "epoch": 0.12895904260806768,
472
+ "eval_accuracy": 0.9244409481313353,
473
+ "eval_f1": 0.9243951823390982,
474
+ "eval_loss": 0.22254011034965515,
475
+ "eval_precision": 0.9250643288957023,
476
+ "eval_recall": 0.9244409481313353,
477
+ "eval_runtime": 84.6134,
478
+ "eval_samples_per_second": 916.427,
479
+ "eval_steps_per_second": 57.284,
480
+ "step": 5000
481
+ },
482
+ {
483
+ "epoch": 0.13153822346022903,
484
+ "grad_norm": 10.738173484802246,
485
+ "learning_rate": 1.9205809214616288e-05,
486
+ "loss": 0.2143,
487
+ "step": 5100
488
+ },
489
+ {
490
+ "epoch": 0.1341174043123904,
491
+ "grad_norm": 7.39179801940918,
492
+ "learning_rate": 1.9188540443462043e-05,
493
+ "loss": 0.205,
494
+ "step": 5200
495
+ },
496
+ {
497
+ "epoch": 0.13669658516455174,
498
+ "grad_norm": 14.402508735656738,
499
+ "learning_rate": 1.9171271672307802e-05,
500
+ "loss": 0.2027,
501
+ "step": 5300
502
+ },
503
+ {
504
+ "epoch": 0.1392757660167131,
505
+ "grad_norm": 8.077102661132812,
506
+ "learning_rate": 1.9154002901153554e-05,
507
+ "loss": 0.1909,
508
+ "step": 5400
509
+ },
510
+ {
511
+ "epoch": 0.14185494686887445,
512
+ "grad_norm": 2.036444664001465,
513
+ "learning_rate": 1.913673412999931e-05,
514
+ "loss": 0.2285,
515
+ "step": 5500
516
+ },
517
+ {
518
+ "epoch": 0.14185494686887445,
519
+ "eval_accuracy": 0.9220164555982564,
520
+ "eval_f1": 0.9220066809051832,
521
+ "eval_loss": 0.2105225920677185,
522
+ "eval_precision": 0.9220853946451525,
523
+ "eval_recall": 0.9220164555982564,
524
+ "eval_runtime": 85.408,
525
+ "eval_samples_per_second": 907.901,
526
+ "eval_steps_per_second": 56.751,
527
+ "step": 5500
528
+ },
529
+ {
530
+ "epoch": 0.1444341277210358,
531
+ "grad_norm": 3.172539472579956,
532
+ "learning_rate": 1.9119465358845067e-05,
533
+ "loss": 0.1839,
534
+ "step": 5600
535
+ },
536
+ {
537
+ "epoch": 0.14701330857319717,
538
+ "grad_norm": 4.246190547943115,
539
+ "learning_rate": 1.9102196587690823e-05,
540
+ "loss": 0.2248,
541
+ "step": 5700
542
+ },
543
+ {
544
+ "epoch": 0.1495924894253585,
545
+ "grad_norm": 9.676543235778809,
546
+ "learning_rate": 1.9084927816536574e-05,
547
+ "loss": 0.2003,
548
+ "step": 5800
549
+ },
550
+ {
551
+ "epoch": 0.15217167027751985,
552
+ "grad_norm": 14.093317031860352,
553
+ "learning_rate": 1.9067659045382333e-05,
554
+ "loss": 0.2283,
555
+ "step": 5900
556
+ },
557
+ {
558
+ "epoch": 0.1547508511296812,
559
+ "grad_norm": 6.108678817749023,
560
+ "learning_rate": 1.9050390274228088e-05,
561
+ "loss": 0.2001,
562
+ "step": 6000
563
+ },
564
+ {
565
+ "epoch": 0.1547508511296812,
566
+ "eval_accuracy": 0.9288514611436383,
567
+ "eval_f1": 0.9288285810713337,
568
+ "eval_loss": 0.19547687470912933,
569
+ "eval_precision": 0.9291294558172835,
570
+ "eval_recall": 0.9288514611436383,
571
+ "eval_runtime": 85.1832,
572
+ "eval_samples_per_second": 910.297,
573
+ "eval_steps_per_second": 56.901,
574
+ "step": 6000
575
+ },
576
+ {
577
+ "epoch": 0.15733003198184256,
578
+ "grad_norm": 25.980791091918945,
579
+ "learning_rate": 1.9033121503073843e-05,
580
+ "loss": 0.2045,
581
+ "step": 6100
582
+ },
583
+ {
584
+ "epoch": 0.15990921283400392,
585
+ "grad_norm": 3.697967767715454,
586
+ "learning_rate": 1.90158527319196e-05,
587
+ "loss": 0.1921,
588
+ "step": 6200
589
+ },
590
+ {
591
+ "epoch": 0.16248839368616527,
592
+ "grad_norm": 6.734536647796631,
593
+ "learning_rate": 1.8998583960765353e-05,
594
+ "loss": 0.2398,
595
+ "step": 6300
596
+ },
597
+ {
598
+ "epoch": 0.16506757453832663,
599
+ "grad_norm": 4.171863555908203,
600
+ "learning_rate": 1.898131518961111e-05,
601
+ "loss": 0.1851,
602
+ "step": 6400
603
+ },
604
+ {
605
+ "epoch": 0.16764675539048798,
606
+ "grad_norm": 8.22290325164795,
607
+ "learning_rate": 1.8964046418456864e-05,
608
+ "loss": 0.2048,
609
+ "step": 6500
610
+ },
611
+ {
612
+ "epoch": 0.16764675539048798,
613
+ "eval_accuracy": 0.9237316551030409,
614
+ "eval_f1": 0.9237108101584136,
615
+ "eval_loss": 0.21942158043384552,
616
+ "eval_precision": 0.9246712025171661,
617
+ "eval_recall": 0.9237316551030409,
618
+ "eval_runtime": 87.4195,
619
+ "eval_samples_per_second": 887.01,
620
+ "eval_steps_per_second": 55.445,
621
+ "step": 6500
622
+ },
623
+ {
624
+ "epoch": 0.17022593624264934,
625
+ "grad_norm": 1.4064490795135498,
626
+ "learning_rate": 1.894677764730262e-05,
627
+ "loss": 0.2192,
628
+ "step": 6600
629
+ },
630
+ {
631
+ "epoch": 0.1728051170948107,
632
+ "grad_norm": 0.8423879146575928,
633
+ "learning_rate": 1.8929508876148374e-05,
634
+ "loss": 0.2296,
635
+ "step": 6700
636
+ },
637
+ {
638
+ "epoch": 0.17538429794697205,
639
+ "grad_norm": 2.1388964653015137,
640
+ "learning_rate": 1.891224010499413e-05,
641
+ "loss": 0.2397,
642
+ "step": 6800
643
+ },
644
+ {
645
+ "epoch": 0.1779634787991334,
646
+ "grad_norm": 6.909358501434326,
647
+ "learning_rate": 1.8894971333839888e-05,
648
+ "loss": 0.1902,
649
+ "step": 6900
650
+ },
651
+ {
652
+ "epoch": 0.18054265965129476,
653
+ "grad_norm": 1.044767141342163,
654
+ "learning_rate": 1.887770256268564e-05,
655
+ "loss": 0.1931,
656
+ "step": 7000
657
+ },
658
+ {
659
+ "epoch": 0.18054265965129476,
660
+ "eval_accuracy": 0.9318046993887183,
661
+ "eval_f1": 0.9318026741133476,
662
+ "eval_loss": 0.18849702179431915,
663
+ "eval_precision": 0.9318098201446341,
664
+ "eval_recall": 0.9318046993887183,
665
+ "eval_runtime": 85.7797,
666
+ "eval_samples_per_second": 903.967,
667
+ "eval_steps_per_second": 56.505,
668
+ "step": 7000
669
+ }
670
+ ],
671
+ "logging_steps": 100,
672
+ "max_steps": 116316,
673
+ "num_input_tokens_seen": 0,
674
+ "num_train_epochs": 3,
675
+ "save_steps": 500,
676
+ "stateful_callbacks": {
677
+ "EarlyStoppingCallback": {
678
+ "args": {
679
+ "early_stopping_patience": 3,
680
+ "early_stopping_threshold": 0.0
681
+ },
682
+ "attributes": {
683
+ "early_stopping_patience_counter": 0
684
+ }
685
+ },
686
+ "TrainerControl": {
687
+ "args": {
688
+ "should_epoch_stop": false,
689
+ "should_evaluate": false,
690
+ "should_log": false,
691
+ "should_save": true,
692
+ "should_training_stop": false
693
+ },
694
+ "attributes": {}
695
+ }
696
+ },
697
+ "total_flos": 2.534886729544608e+16,
698
+ "train_batch_size": 16,
699
+ "trial_name": null,
700
+ "trial_params": null
701
+ }
checkpoint-7000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce8abd265ced130b3a60da4969f0ccef6e504e9f0ee20e35110ba851d1b32cb3
3
+ size 5777
checkpoint-8000/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "dtype": "float32",
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "problem_type": "single_label_classification",
24
+ "transformers_version": "4.57.3",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250002
28
+ }
checkpoint-8000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bf7bf3f6d9df4b677428e8787a2e9c23a1eed357393a2749bd1b331d815c577
3
+ size 1112205008
checkpoint-8000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b16044d5b6739847bb763d6e7b4df2ceebc97beff514912aa55dd7766cb9491c
3
+ size 2224532875
checkpoint-8000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7111253787a9df18bde957fc589d600eff7f44793b5e3bf64a2e32b4057938f9
3
+ size 14645
checkpoint-8000/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:922ff24c213c3bda10e2eb4a4d8d5cc817d8cf7cca674392f4fd327654134e8c
3
+ size 1383
checkpoint-8000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d028fa8da82e9494cd9148b8fe1607d2f58300c39ad900a04937564f7c4a6fc6
3
+ size 1465
checkpoint-8000/sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
checkpoint-8000/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
checkpoint-8000/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
50
+ "model_max_length": 512,
51
+ "pad_token": "<pad>",
52
+ "sep_token": "</s>",
53
+ "sp_model_kwargs": {},
54
+ "tokenizer_class": "XLMRobertaTokenizer",
55
+ "unk_token": "<unk>"
56
+ }
checkpoint-8000/trainer_state.json ADDED
@@ -0,0 +1,795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 7000,
3
+ "best_metric": 0.9318026741133476,
4
+ "best_model_checkpoint": "./results/checkpoint-7000",
5
+ "epoch": 0.2063344681729083,
6
+ "eval_steps": 500,
7
+ "global_step": 8000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0025791808521613536,
14
+ "grad_norm": 8.55178451538086,
15
+ "learning_rate": 3.96e-06,
16
+ "loss": 0.6948,
17
+ "step": 100
18
+ },
19
+ {
20
+ "epoch": 0.005158361704322707,
21
+ "grad_norm": 6.15341329574585,
22
+ "learning_rate": 7.960000000000002e-06,
23
+ "loss": 0.6256,
24
+ "step": 200
25
+ },
26
+ {
27
+ "epoch": 0.00773754255648406,
28
+ "grad_norm": 7.876453876495361,
29
+ "learning_rate": 1.196e-05,
30
+ "loss": 0.4996,
31
+ "step": 300
32
+ },
33
+ {
34
+ "epoch": 0.010316723408645414,
35
+ "grad_norm": 25.46845817565918,
36
+ "learning_rate": 1.5960000000000003e-05,
37
+ "loss": 0.4267,
38
+ "step": 400
39
+ },
40
+ {
41
+ "epoch": 0.012895904260806768,
42
+ "grad_norm": 12.137995719909668,
43
+ "learning_rate": 1.9960000000000002e-05,
44
+ "loss": 0.3932,
45
+ "step": 500
46
+ },
47
+ {
48
+ "epoch": 0.012895904260806768,
49
+ "eval_accuracy": 0.8497975290810141,
50
+ "eval_f1": 0.8492073290894184,
51
+ "eval_loss": 0.36694180965423584,
52
+ "eval_precision": 0.8566847895515343,
53
+ "eval_recall": 0.8497975290810141,
54
+ "eval_runtime": 81.3998,
55
+ "eval_samples_per_second": 952.607,
56
+ "eval_steps_per_second": 59.546,
57
+ "step": 500
58
+ },
59
+ {
60
+ "epoch": 0.01547508511296812,
61
+ "grad_norm": 9.444395065307617,
62
+ "learning_rate": 1.99829039165573e-05,
63
+ "loss": 0.3438,
64
+ "step": 600
65
+ },
66
+ {
67
+ "epoch": 0.018054265965129476,
68
+ "grad_norm": 12.698025703430176,
69
+ "learning_rate": 1.9965635145403055e-05,
70
+ "loss": 0.3897,
71
+ "step": 700
72
+ },
73
+ {
74
+ "epoch": 0.02063344681729083,
75
+ "grad_norm": 7.648699760437012,
76
+ "learning_rate": 1.994836637424881e-05,
77
+ "loss": 0.3197,
78
+ "step": 800
79
+ },
80
+ {
81
+ "epoch": 0.02321262766945218,
82
+ "grad_norm": 11.770590782165527,
83
+ "learning_rate": 1.9931097603094565e-05,
84
+ "loss": 0.2641,
85
+ "step": 900
86
+ },
87
+ {
88
+ "epoch": 0.025791808521613536,
89
+ "grad_norm": 1.2174805402755737,
90
+ "learning_rate": 1.991382883194032e-05,
91
+ "loss": 0.3093,
92
+ "step": 1000
93
+ },
94
+ {
95
+ "epoch": 0.025791808521613536,
96
+ "eval_accuracy": 0.8805163653245983,
97
+ "eval_f1": 0.8798423409034712,
98
+ "eval_loss": 0.29810118675231934,
99
+ "eval_precision": 0.8908826043373803,
100
+ "eval_recall": 0.8805163653245983,
101
+ "eval_runtime": 82.3661,
102
+ "eval_samples_per_second": 941.431,
103
+ "eval_steps_per_second": 58.847,
104
+ "step": 1000
105
+ },
106
+ {
107
+ "epoch": 0.02837098937377489,
108
+ "grad_norm": 35.91494369506836,
109
+ "learning_rate": 1.9896560060786075e-05,
110
+ "loss": 0.2701,
111
+ "step": 1100
112
+ },
113
+ {
114
+ "epoch": 0.03095017022593624,
115
+ "grad_norm": 10.724688529968262,
116
+ "learning_rate": 1.9879291289631834e-05,
117
+ "loss": 0.2723,
118
+ "step": 1200
119
+ },
120
+ {
121
+ "epoch": 0.0335293510780976,
122
+ "grad_norm": 11.826756477355957,
123
+ "learning_rate": 1.9862022518477585e-05,
124
+ "loss": 0.2669,
125
+ "step": 1300
126
+ },
127
+ {
128
+ "epoch": 0.03610853193025895,
129
+ "grad_norm": 13.958097457885742,
130
+ "learning_rate": 1.984475374732334e-05,
131
+ "loss": 0.2563,
132
+ "step": 1400
133
+ },
134
+ {
135
+ "epoch": 0.0386877127824203,
136
+ "grad_norm": 32.344303131103516,
137
+ "learning_rate": 1.98274849761691e-05,
138
+ "loss": 0.2717,
139
+ "step": 1500
140
+ },
141
+ {
142
+ "epoch": 0.0386877127824203,
143
+ "eval_accuracy": 0.9098424079853499,
144
+ "eval_f1": 0.9098439407901873,
145
+ "eval_loss": 0.2351153939962387,
146
+ "eval_precision": 0.9100045564108226,
147
+ "eval_recall": 0.9098424079853499,
148
+ "eval_runtime": 80.9696,
149
+ "eval_samples_per_second": 957.669,
150
+ "eval_steps_per_second": 59.862,
151
+ "step": 1500
152
+ },
153
+ {
154
+ "epoch": 0.04126689363458166,
155
+ "grad_norm": 41.87895584106445,
156
+ "learning_rate": 1.9810216205014854e-05,
157
+ "loss": 0.2775,
158
+ "step": 1600
159
+ },
160
+ {
161
+ "epoch": 0.04384607448674301,
162
+ "grad_norm": 9.029143333435059,
163
+ "learning_rate": 1.9792947433860606e-05,
164
+ "loss": 0.2747,
165
+ "step": 1700
166
+ },
167
+ {
168
+ "epoch": 0.04642525533890436,
169
+ "grad_norm": 0.5453509092330933,
170
+ "learning_rate": 1.9775678662706365e-05,
171
+ "loss": 0.2333,
172
+ "step": 1800
173
+ },
174
+ {
175
+ "epoch": 0.04900443619106572,
176
+ "grad_norm": 14.877126693725586,
177
+ "learning_rate": 1.975840989155212e-05,
178
+ "loss": 0.2335,
179
+ "step": 1900
180
+ },
181
+ {
182
+ "epoch": 0.05158361704322707,
183
+ "grad_norm": 9.152324676513672,
184
+ "learning_rate": 1.974114112039787e-05,
185
+ "loss": 0.2501,
186
+ "step": 2000
187
+ },
188
+ {
189
+ "epoch": 0.05158361704322707,
190
+ "eval_accuracy": 0.915400686079802,
191
+ "eval_f1": 0.9153786857019529,
192
+ "eval_loss": 0.24714471399784088,
193
+ "eval_precision": 0.9155981210913413,
194
+ "eval_recall": 0.915400686079802,
195
+ "eval_runtime": 80.7839,
196
+ "eval_samples_per_second": 959.869,
197
+ "eval_steps_per_second": 60.0,
198
+ "step": 2000
199
+ },
200
+ {
201
+ "epoch": 0.05416279789538842,
202
+ "grad_norm": 3.9494690895080566,
203
+ "learning_rate": 1.972387234924363e-05,
204
+ "loss": 0.2432,
205
+ "step": 2100
206
+ },
207
+ {
208
+ "epoch": 0.05674197874754978,
209
+ "grad_norm": 7.518190383911133,
210
+ "learning_rate": 1.9706603578089385e-05,
211
+ "loss": 0.2179,
212
+ "step": 2200
213
+ },
214
+ {
215
+ "epoch": 0.05932115959971113,
216
+ "grad_norm": 55.069461822509766,
217
+ "learning_rate": 1.968933480693514e-05,
218
+ "loss": 0.2603,
219
+ "step": 2300
220
+ },
221
+ {
222
+ "epoch": 0.06190034045187248,
223
+ "grad_norm": 21.235607147216797,
224
+ "learning_rate": 1.9672066035780896e-05,
225
+ "loss": 0.2363,
226
+ "step": 2400
227
+ },
228
+ {
229
+ "epoch": 0.06447952130403384,
230
+ "grad_norm": 30.956710815429688,
231
+ "learning_rate": 1.965479726462665e-05,
232
+ "loss": 0.264,
233
+ "step": 2500
234
+ },
235
+ {
236
+ "epoch": 0.06447952130403384,
237
+ "eval_accuracy": 0.9174769802171726,
238
+ "eval_f1": 0.9174732815674826,
239
+ "eval_loss": 0.22409705817699432,
240
+ "eval_precision": 0.9178450257324412,
241
+ "eval_recall": 0.9174769802171726,
242
+ "eval_runtime": 82.4939,
243
+ "eval_samples_per_second": 939.972,
244
+ "eval_steps_per_second": 58.756,
245
+ "step": 2500
246
+ },
247
+ {
248
+ "epoch": 0.0670587021561952,
249
+ "grad_norm": 3.0290513038635254,
250
+ "learning_rate": 1.9637528493472406e-05,
251
+ "loss": 0.2388,
252
+ "step": 2600
253
+ },
254
+ {
255
+ "epoch": 0.06963788300835655,
256
+ "grad_norm": 4.662931442260742,
257
+ "learning_rate": 1.962025972231816e-05,
258
+ "loss": 0.2132,
259
+ "step": 2700
260
+ },
261
+ {
262
+ "epoch": 0.0722170638605179,
263
+ "grad_norm": 31.94866180419922,
264
+ "learning_rate": 1.9602990951163916e-05,
265
+ "loss": 0.2576,
266
+ "step": 2800
267
+ },
268
+ {
269
+ "epoch": 0.07479624471267925,
270
+ "grad_norm": 3.5957560539245605,
271
+ "learning_rate": 1.958572218000967e-05,
272
+ "loss": 0.2404,
273
+ "step": 2900
274
+ },
275
+ {
276
+ "epoch": 0.0773754255648406,
277
+ "grad_norm": 6.48631477355957,
278
+ "learning_rate": 1.9568453408855427e-05,
279
+ "loss": 0.2514,
280
+ "step": 3000
281
+ },
282
+ {
283
+ "epoch": 0.0773754255648406,
284
+ "eval_accuracy": 0.9169998194526837,
285
+ "eval_f1": 0.9170017211060641,
286
+ "eval_loss": 0.21086551249027252,
287
+ "eval_precision": 0.9171348873594787,
288
+ "eval_recall": 0.9169998194526837,
289
+ "eval_runtime": 80.1992,
290
+ "eval_samples_per_second": 966.867,
291
+ "eval_steps_per_second": 60.437,
292
+ "step": 3000
293
+ },
294
+ {
295
+ "epoch": 0.07995460641700196,
296
+ "grad_norm": 5.789911270141602,
297
+ "learning_rate": 1.9551184637701185e-05,
298
+ "loss": 0.2138,
299
+ "step": 3100
300
+ },
301
+ {
302
+ "epoch": 0.08253378726916331,
303
+ "grad_norm": 8.613458633422852,
304
+ "learning_rate": 1.9533915866546937e-05,
305
+ "loss": 0.2323,
306
+ "step": 3200
307
+ },
308
+ {
309
+ "epoch": 0.08511296812132467,
310
+ "grad_norm": 5.5360822677612305,
311
+ "learning_rate": 1.9516647095392692e-05,
312
+ "loss": 0.2474,
313
+ "step": 3300
314
+ },
315
+ {
316
+ "epoch": 0.08769214897348603,
317
+ "grad_norm": 15.3199462890625,
318
+ "learning_rate": 1.949937832423845e-05,
319
+ "loss": 0.1964,
320
+ "step": 3400
321
+ },
322
+ {
323
+ "epoch": 0.09027132982564738,
324
+ "grad_norm": 0.23756052553653717,
325
+ "learning_rate": 1.9482109553084202e-05,
326
+ "loss": 0.2048,
327
+ "step": 3500
328
+ },
329
+ {
330
+ "epoch": 0.09027132982564738,
331
+ "eval_accuracy": 0.9212555776224498,
332
+ "eval_f1": 0.9212388888583593,
333
+ "eval_loss": 0.217011496424675,
334
+ "eval_precision": 0.9220491698721403,
335
+ "eval_recall": 0.9212555776224498,
336
+ "eval_runtime": 82.2451,
337
+ "eval_samples_per_second": 942.816,
338
+ "eval_steps_per_second": 58.934,
339
+ "step": 3500
340
+ },
341
+ {
342
+ "epoch": 0.09285051067780872,
343
+ "grad_norm": 3.4589858055114746,
344
+ "learning_rate": 1.9464840781929957e-05,
345
+ "loss": 0.2335,
346
+ "step": 3600
347
+ },
348
+ {
349
+ "epoch": 0.09542969152997008,
350
+ "grad_norm": 6.5932111740112305,
351
+ "learning_rate": 1.9447572010775716e-05,
352
+ "loss": 0.2239,
353
+ "step": 3700
354
+ },
355
+ {
356
+ "epoch": 0.09800887238213143,
357
+ "grad_norm": 2.6831107139587402,
358
+ "learning_rate": 1.943030323962147e-05,
359
+ "loss": 0.2049,
360
+ "step": 3800
361
+ },
362
+ {
363
+ "epoch": 0.10058805323429279,
364
+ "grad_norm": 26.725677490234375,
365
+ "learning_rate": 1.9413034468467223e-05,
366
+ "loss": 0.2404,
367
+ "step": 3900
368
+ },
369
+ {
370
+ "epoch": 0.10316723408645415,
371
+ "grad_norm": 26.966718673706055,
372
+ "learning_rate": 1.939576569731298e-05,
373
+ "loss": 0.241,
374
+ "step": 4000
375
+ },
376
+ {
377
+ "epoch": 0.10316723408645415,
378
+ "eval_accuracy": 0.9104227386448634,
379
+ "eval_f1": 0.9102303033501772,
380
+ "eval_loss": 0.22579680383205414,
381
+ "eval_precision": 0.9150459252068764,
382
+ "eval_recall": 0.9104227386448634,
383
+ "eval_runtime": 81.0753,
384
+ "eval_samples_per_second": 956.419,
385
+ "eval_steps_per_second": 59.784,
386
+ "step": 4000
387
+ },
388
+ {
389
+ "epoch": 0.1057464149386155,
390
+ "grad_norm": 17.290939331054688,
391
+ "learning_rate": 1.9378496926158737e-05,
392
+ "loss": 0.2298,
393
+ "step": 4100
394
+ },
395
+ {
396
+ "epoch": 0.10832559579077684,
397
+ "grad_norm": 8.844673156738281,
398
+ "learning_rate": 1.9361228155004492e-05,
399
+ "loss": 0.224,
400
+ "step": 4200
401
+ },
402
+ {
403
+ "epoch": 0.1109047766429382,
404
+ "grad_norm": 2.0837674140930176,
405
+ "learning_rate": 1.9343959383850247e-05,
406
+ "loss": 0.2367,
407
+ "step": 4300
408
+ },
409
+ {
410
+ "epoch": 0.11348395749509955,
411
+ "grad_norm": 7.639578819274902,
412
+ "learning_rate": 1.9326690612696002e-05,
413
+ "loss": 0.2194,
414
+ "step": 4400
415
+ },
416
+ {
417
+ "epoch": 0.11606313834726091,
418
+ "grad_norm": 9.39929485321045,
419
+ "learning_rate": 1.9309421841541757e-05,
420
+ "loss": 0.2297,
421
+ "step": 4500
422
+ },
423
+ {
424
+ "epoch": 0.11606313834726091,
425
+ "eval_accuracy": 0.9185473678780531,
426
+ "eval_f1": 0.918488362019573,
427
+ "eval_loss": 0.24766957759857178,
428
+ "eval_precision": 0.9204437423697496,
429
+ "eval_recall": 0.9185473678780531,
430
+ "eval_runtime": 80.6546,
431
+ "eval_samples_per_second": 961.408,
432
+ "eval_steps_per_second": 60.096,
433
+ "step": 4500
434
+ },
435
+ {
436
+ "epoch": 0.11864231919942227,
437
+ "grad_norm": 63.414676666259766,
438
+ "learning_rate": 1.9292153070387512e-05,
439
+ "loss": 0.2067,
440
+ "step": 4600
441
+ },
442
+ {
443
+ "epoch": 0.12122150005158362,
444
+ "grad_norm": 27.895687103271484,
445
+ "learning_rate": 1.9274884299233268e-05,
446
+ "loss": 0.1975,
447
+ "step": 4700
448
+ },
449
+ {
450
+ "epoch": 0.12380068090374496,
451
+ "grad_norm": 5.536584377288818,
452
+ "learning_rate": 1.9257615528079023e-05,
453
+ "loss": 0.2141,
454
+ "step": 4800
455
+ },
456
+ {
457
+ "epoch": 0.12637986175590632,
458
+ "grad_norm": 18.6892147064209,
459
+ "learning_rate": 1.9240346756924778e-05,
460
+ "loss": 0.2549,
461
+ "step": 4900
462
+ },
463
+ {
464
+ "epoch": 0.12895904260806768,
465
+ "grad_norm": 3.705787420272827,
466
+ "learning_rate": 1.9223077985770536e-05,
467
+ "loss": 0.2187,
468
+ "step": 5000
469
+ },
470
+ {
471
+ "epoch": 0.12895904260806768,
472
+ "eval_accuracy": 0.9244409481313353,
473
+ "eval_f1": 0.9243951823390982,
474
+ "eval_loss": 0.22254011034965515,
475
+ "eval_precision": 0.9250643288957023,
476
+ "eval_recall": 0.9244409481313353,
477
+ "eval_runtime": 84.6134,
478
+ "eval_samples_per_second": 916.427,
479
+ "eval_steps_per_second": 57.284,
480
+ "step": 5000
481
+ },
482
+ {
483
+ "epoch": 0.13153822346022903,
484
+ "grad_norm": 10.738173484802246,
485
+ "learning_rate": 1.9205809214616288e-05,
486
+ "loss": 0.2143,
487
+ "step": 5100
488
+ },
489
+ {
490
+ "epoch": 0.1341174043123904,
491
+ "grad_norm": 7.39179801940918,
492
+ "learning_rate": 1.9188540443462043e-05,
493
+ "loss": 0.205,
494
+ "step": 5200
495
+ },
496
+ {
497
+ "epoch": 0.13669658516455174,
498
+ "grad_norm": 14.402508735656738,
499
+ "learning_rate": 1.9171271672307802e-05,
500
+ "loss": 0.2027,
501
+ "step": 5300
502
+ },
503
+ {
504
+ "epoch": 0.1392757660167131,
505
+ "grad_norm": 8.077102661132812,
506
+ "learning_rate": 1.9154002901153554e-05,
507
+ "loss": 0.1909,
508
+ "step": 5400
509
+ },
510
+ {
511
+ "epoch": 0.14185494686887445,
512
+ "grad_norm": 2.036444664001465,
513
+ "learning_rate": 1.913673412999931e-05,
514
+ "loss": 0.2285,
515
+ "step": 5500
516
+ },
517
+ {
518
+ "epoch": 0.14185494686887445,
519
+ "eval_accuracy": 0.9220164555982564,
520
+ "eval_f1": 0.9220066809051832,
521
+ "eval_loss": 0.2105225920677185,
522
+ "eval_precision": 0.9220853946451525,
523
+ "eval_recall": 0.9220164555982564,
524
+ "eval_runtime": 85.408,
525
+ "eval_samples_per_second": 907.901,
526
+ "eval_steps_per_second": 56.751,
527
+ "step": 5500
528
+ },
529
+ {
530
+ "epoch": 0.1444341277210358,
531
+ "grad_norm": 3.172539472579956,
532
+ "learning_rate": 1.9119465358845067e-05,
533
+ "loss": 0.1839,
534
+ "step": 5600
535
+ },
536
+ {
537
+ "epoch": 0.14701330857319717,
538
+ "grad_norm": 4.246190547943115,
539
+ "learning_rate": 1.9102196587690823e-05,
540
+ "loss": 0.2248,
541
+ "step": 5700
542
+ },
543
+ {
544
+ "epoch": 0.1495924894253585,
545
+ "grad_norm": 9.676543235778809,
546
+ "learning_rate": 1.9084927816536574e-05,
547
+ "loss": 0.2003,
548
+ "step": 5800
549
+ },
550
+ {
551
+ "epoch": 0.15217167027751985,
552
+ "grad_norm": 14.093317031860352,
553
+ "learning_rate": 1.9067659045382333e-05,
554
+ "loss": 0.2283,
555
+ "step": 5900
556
+ },
557
+ {
558
+ "epoch": 0.1547508511296812,
559
+ "grad_norm": 6.108678817749023,
560
+ "learning_rate": 1.9050390274228088e-05,
561
+ "loss": 0.2001,
562
+ "step": 6000
563
+ },
564
+ {
565
+ "epoch": 0.1547508511296812,
566
+ "eval_accuracy": 0.9288514611436383,
567
+ "eval_f1": 0.9288285810713337,
568
+ "eval_loss": 0.19547687470912933,
569
+ "eval_precision": 0.9291294558172835,
570
+ "eval_recall": 0.9288514611436383,
571
+ "eval_runtime": 85.1832,
572
+ "eval_samples_per_second": 910.297,
573
+ "eval_steps_per_second": 56.901,
574
+ "step": 6000
575
+ },
576
+ {
577
+ "epoch": 0.15733003198184256,
578
+ "grad_norm": 25.980791091918945,
579
+ "learning_rate": 1.9033121503073843e-05,
580
+ "loss": 0.2045,
581
+ "step": 6100
582
+ },
583
+ {
584
+ "epoch": 0.15990921283400392,
585
+ "grad_norm": 3.697967767715454,
586
+ "learning_rate": 1.90158527319196e-05,
587
+ "loss": 0.1921,
588
+ "step": 6200
589
+ },
590
+ {
591
+ "epoch": 0.16248839368616527,
592
+ "grad_norm": 6.734536647796631,
593
+ "learning_rate": 1.8998583960765353e-05,
594
+ "loss": 0.2398,
595
+ "step": 6300
596
+ },
597
+ {
598
+ "epoch": 0.16506757453832663,
599
+ "grad_norm": 4.171863555908203,
600
+ "learning_rate": 1.898131518961111e-05,
601
+ "loss": 0.1851,
602
+ "step": 6400
603
+ },
604
+ {
605
+ "epoch": 0.16764675539048798,
606
+ "grad_norm": 8.22290325164795,
607
+ "learning_rate": 1.8964046418456864e-05,
608
+ "loss": 0.2048,
609
+ "step": 6500
610
+ },
611
+ {
612
+ "epoch": 0.16764675539048798,
613
+ "eval_accuracy": 0.9237316551030409,
614
+ "eval_f1": 0.9237108101584136,
615
+ "eval_loss": 0.21942158043384552,
616
+ "eval_precision": 0.9246712025171661,
617
+ "eval_recall": 0.9237316551030409,
618
+ "eval_runtime": 87.4195,
619
+ "eval_samples_per_second": 887.01,
620
+ "eval_steps_per_second": 55.445,
621
+ "step": 6500
622
+ },
623
+ {
624
+ "epoch": 0.17022593624264934,
625
+ "grad_norm": 1.4064490795135498,
626
+ "learning_rate": 1.894677764730262e-05,
627
+ "loss": 0.2192,
628
+ "step": 6600
629
+ },
630
+ {
631
+ "epoch": 0.1728051170948107,
632
+ "grad_norm": 0.8423879146575928,
633
+ "learning_rate": 1.8929508876148374e-05,
634
+ "loss": 0.2296,
635
+ "step": 6700
636
+ },
637
+ {
638
+ "epoch": 0.17538429794697205,
639
+ "grad_norm": 2.1388964653015137,
640
+ "learning_rate": 1.891224010499413e-05,
641
+ "loss": 0.2397,
642
+ "step": 6800
643
+ },
644
+ {
645
+ "epoch": 0.1779634787991334,
646
+ "grad_norm": 6.909358501434326,
647
+ "learning_rate": 1.8894971333839888e-05,
648
+ "loss": 0.1902,
649
+ "step": 6900
650
+ },
651
+ {
652
+ "epoch": 0.18054265965129476,
653
+ "grad_norm": 1.044767141342163,
654
+ "learning_rate": 1.887770256268564e-05,
655
+ "loss": 0.1931,
656
+ "step": 7000
657
+ },
658
+ {
659
+ "epoch": 0.18054265965129476,
660
+ "eval_accuracy": 0.9318046993887183,
661
+ "eval_f1": 0.9318026741133476,
662
+ "eval_loss": 0.18849702179431915,
663
+ "eval_precision": 0.9318098201446341,
664
+ "eval_recall": 0.9318046993887183,
665
+ "eval_runtime": 85.7797,
666
+ "eval_samples_per_second": 903.967,
667
+ "eval_steps_per_second": 56.505,
668
+ "step": 7000
669
+ },
670
+ {
671
+ "epoch": 0.1831218405034561,
672
+ "grad_norm": 9.8370943069458,
673
+ "learning_rate": 1.8860433791531395e-05,
674
+ "loss": 0.2179,
675
+ "step": 7100
676
+ },
677
+ {
678
+ "epoch": 0.18570102135561745,
679
+ "grad_norm": 3.937364101409912,
680
+ "learning_rate": 1.8843165020377153e-05,
681
+ "loss": 0.2017,
682
+ "step": 7200
683
+ },
684
+ {
685
+ "epoch": 0.1882802022077788,
686
+ "grad_norm": 1.7348313331604004,
687
+ "learning_rate": 1.8825896249222905e-05,
688
+ "loss": 0.2098,
689
+ "step": 7300
690
+ },
691
+ {
692
+ "epoch": 0.19085938305994016,
693
+ "grad_norm": 13.738706588745117,
694
+ "learning_rate": 1.880862747806866e-05,
695
+ "loss": 0.2176,
696
+ "step": 7400
697
+ },
698
+ {
699
+ "epoch": 0.1934385639121015,
700
+ "grad_norm": 7.888184547424316,
701
+ "learning_rate": 1.879135870691442e-05,
702
+ "loss": 0.2052,
703
+ "step": 7500
704
+ },
705
+ {
706
+ "epoch": 0.1934385639121015,
707
+ "eval_accuracy": 0.9288256686698821,
708
+ "eval_f1": 0.9287744301894623,
709
+ "eval_loss": 0.19062775373458862,
710
+ "eval_precision": 0.9296090951021123,
711
+ "eval_recall": 0.9288256686698821,
712
+ "eval_runtime": 84.5191,
713
+ "eval_samples_per_second": 917.45,
714
+ "eval_steps_per_second": 57.348,
715
+ "step": 7500
716
+ },
717
+ {
718
+ "epoch": 0.19601774476426287,
719
+ "grad_norm": 8.46711254119873,
720
+ "learning_rate": 1.8774089935760174e-05,
721
+ "loss": 0.2069,
722
+ "step": 7600
723
+ },
724
+ {
725
+ "epoch": 0.19859692561642422,
726
+ "grad_norm": 0.5938565731048584,
727
+ "learning_rate": 1.8756821164605926e-05,
728
+ "loss": 0.1842,
729
+ "step": 7700
730
+ },
731
+ {
732
+ "epoch": 0.20117610646858558,
733
+ "grad_norm": 14.665600776672363,
734
+ "learning_rate": 1.8739552393451684e-05,
735
+ "loss": 0.2097,
736
+ "step": 7800
737
+ },
738
+ {
739
+ "epoch": 0.20375528732074694,
740
+ "grad_norm": 11.763516426086426,
741
+ "learning_rate": 1.872228362229744e-05,
742
+ "loss": 0.221,
743
+ "step": 7900
744
+ },
745
+ {
746
+ "epoch": 0.2063344681729083,
747
+ "grad_norm": 3.6280648708343506,
748
+ "learning_rate": 1.8705014851143195e-05,
749
+ "loss": 0.1904,
750
+ "step": 8000
751
+ },
752
+ {
753
+ "epoch": 0.2063344681729083,
754
+ "eval_accuracy": 0.929612339119445,
755
+ "eval_f1": 0.9296143948889304,
756
+ "eval_loss": 0.18757739663124084,
757
+ "eval_precision": 0.9297072870129078,
758
+ "eval_recall": 0.929612339119445,
759
+ "eval_runtime": 85.1458,
760
+ "eval_samples_per_second": 910.696,
761
+ "eval_steps_per_second": 56.926,
762
+ "step": 8000
763
+ }
764
+ ],
765
+ "logging_steps": 100,
766
+ "max_steps": 116316,
767
+ "num_input_tokens_seen": 0,
768
+ "num_train_epochs": 3,
769
+ "save_steps": 500,
770
+ "stateful_callbacks": {
771
+ "EarlyStoppingCallback": {
772
+ "args": {
773
+ "early_stopping_patience": 3,
774
+ "early_stopping_threshold": 0.0
775
+ },
776
+ "attributes": {
777
+ "early_stopping_patience_counter": 2
778
+ }
779
+ },
780
+ "TrainerControl": {
781
+ "args": {
782
+ "should_epoch_stop": false,
783
+ "should_evaluate": false,
784
+ "should_log": false,
785
+ "should_save": true,
786
+ "should_training_stop": false
787
+ },
788
+ "attributes": {}
789
+ }
790
+ },
791
+ "total_flos": 2.894541453336672e+16,
792
+ "train_batch_size": 16,
793
+ "trial_name": null,
794
+ "trial_params": null
795
+ }
checkpoint-8000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce8abd265ced130b3a60da4969f0ccef6e504e9f0ee20e35110ba851d1b32cb3
3
+ size 5777
checkpoint-8500/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "dtype": "float32",
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "problem_type": "single_label_classification",
24
+ "transformers_version": "4.57.3",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250002
28
+ }
checkpoint-8500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:630287452f619f9b63d672a2de963e2387815a61e169ce3e8c4e38eaf3443f8c
3
+ size 1112205008
checkpoint-8500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76fb241df89cbf9a7ef9352c71ea051668d3f466ab6b31ddfe1d6affd2eed6ab
3
+ size 2224532875
checkpoint-8500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69835862f1cc5fe7a491a3efa7ffa0138b3b8637580ba7909e3f8c41ee34228a
3
+ size 14645
checkpoint-8500/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6ed9cd0c6034218e5c6ffcb7beab99f8695dfbf2fff5bfc25f34cdf042e26d2
3
+ size 1383
checkpoint-8500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6280d3b52579c204059f19675b839e105c8667272a05af43b8e228df65db087f
3
+ size 1465
checkpoint-8500/sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
checkpoint-8500/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
checkpoint-8500/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
50
+ "model_max_length": 512,
51
+ "pad_token": "<pad>",
52
+ "sep_token": "</s>",
53
+ "sp_model_kwargs": {},
54
+ "tokenizer_class": "XLMRobertaTokenizer",
55
+ "unk_token": "<unk>"
56
+ }
checkpoint-8500/trainer_state.json ADDED
@@ -0,0 +1,842 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 7000,
3
+ "best_metric": 0.9318026741133476,
4
+ "best_model_checkpoint": "./results/checkpoint-7000",
5
+ "epoch": 0.21923037243371504,
6
+ "eval_steps": 500,
7
+ "global_step": 8500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0025791808521613536,
14
+ "grad_norm": 8.55178451538086,
15
+ "learning_rate": 3.96e-06,
16
+ "loss": 0.6948,
17
+ "step": 100
18
+ },
19
+ {
20
+ "epoch": 0.005158361704322707,
21
+ "grad_norm": 6.15341329574585,
22
+ "learning_rate": 7.960000000000002e-06,
23
+ "loss": 0.6256,
24
+ "step": 200
25
+ },
26
+ {
27
+ "epoch": 0.00773754255648406,
28
+ "grad_norm": 7.876453876495361,
29
+ "learning_rate": 1.196e-05,
30
+ "loss": 0.4996,
31
+ "step": 300
32
+ },
33
+ {
34
+ "epoch": 0.010316723408645414,
35
+ "grad_norm": 25.46845817565918,
36
+ "learning_rate": 1.5960000000000003e-05,
37
+ "loss": 0.4267,
38
+ "step": 400
39
+ },
40
+ {
41
+ "epoch": 0.012895904260806768,
42
+ "grad_norm": 12.137995719909668,
43
+ "learning_rate": 1.9960000000000002e-05,
44
+ "loss": 0.3932,
45
+ "step": 500
46
+ },
47
+ {
48
+ "epoch": 0.012895904260806768,
49
+ "eval_accuracy": 0.8497975290810141,
50
+ "eval_f1": 0.8492073290894184,
51
+ "eval_loss": 0.36694180965423584,
52
+ "eval_precision": 0.8566847895515343,
53
+ "eval_recall": 0.8497975290810141,
54
+ "eval_runtime": 81.3998,
55
+ "eval_samples_per_second": 952.607,
56
+ "eval_steps_per_second": 59.546,
57
+ "step": 500
58
+ },
59
+ {
60
+ "epoch": 0.01547508511296812,
61
+ "grad_norm": 9.444395065307617,
62
+ "learning_rate": 1.99829039165573e-05,
63
+ "loss": 0.3438,
64
+ "step": 600
65
+ },
66
+ {
67
+ "epoch": 0.018054265965129476,
68
+ "grad_norm": 12.698025703430176,
69
+ "learning_rate": 1.9965635145403055e-05,
70
+ "loss": 0.3897,
71
+ "step": 700
72
+ },
73
+ {
74
+ "epoch": 0.02063344681729083,
75
+ "grad_norm": 7.648699760437012,
76
+ "learning_rate": 1.994836637424881e-05,
77
+ "loss": 0.3197,
78
+ "step": 800
79
+ },
80
+ {
81
+ "epoch": 0.02321262766945218,
82
+ "grad_norm": 11.770590782165527,
83
+ "learning_rate": 1.9931097603094565e-05,
84
+ "loss": 0.2641,
85
+ "step": 900
86
+ },
87
+ {
88
+ "epoch": 0.025791808521613536,
89
+ "grad_norm": 1.2174805402755737,
90
+ "learning_rate": 1.991382883194032e-05,
91
+ "loss": 0.3093,
92
+ "step": 1000
93
+ },
94
+ {
95
+ "epoch": 0.025791808521613536,
96
+ "eval_accuracy": 0.8805163653245983,
97
+ "eval_f1": 0.8798423409034712,
98
+ "eval_loss": 0.29810118675231934,
99
+ "eval_precision": 0.8908826043373803,
100
+ "eval_recall": 0.8805163653245983,
101
+ "eval_runtime": 82.3661,
102
+ "eval_samples_per_second": 941.431,
103
+ "eval_steps_per_second": 58.847,
104
+ "step": 1000
105
+ },
106
+ {
107
+ "epoch": 0.02837098937377489,
108
+ "grad_norm": 35.91494369506836,
109
+ "learning_rate": 1.9896560060786075e-05,
110
+ "loss": 0.2701,
111
+ "step": 1100
112
+ },
113
+ {
114
+ "epoch": 0.03095017022593624,
115
+ "grad_norm": 10.724688529968262,
116
+ "learning_rate": 1.9879291289631834e-05,
117
+ "loss": 0.2723,
118
+ "step": 1200
119
+ },
120
+ {
121
+ "epoch": 0.0335293510780976,
122
+ "grad_norm": 11.826756477355957,
123
+ "learning_rate": 1.9862022518477585e-05,
124
+ "loss": 0.2669,
125
+ "step": 1300
126
+ },
127
+ {
128
+ "epoch": 0.03610853193025895,
129
+ "grad_norm": 13.958097457885742,
130
+ "learning_rate": 1.984475374732334e-05,
131
+ "loss": 0.2563,
132
+ "step": 1400
133
+ },
134
+ {
135
+ "epoch": 0.0386877127824203,
136
+ "grad_norm": 32.344303131103516,
137
+ "learning_rate": 1.98274849761691e-05,
138
+ "loss": 0.2717,
139
+ "step": 1500
140
+ },
141
+ {
142
+ "epoch": 0.0386877127824203,
143
+ "eval_accuracy": 0.9098424079853499,
144
+ "eval_f1": 0.9098439407901873,
145
+ "eval_loss": 0.2351153939962387,
146
+ "eval_precision": 0.9100045564108226,
147
+ "eval_recall": 0.9098424079853499,
148
+ "eval_runtime": 80.9696,
149
+ "eval_samples_per_second": 957.669,
150
+ "eval_steps_per_second": 59.862,
151
+ "step": 1500
152
+ },
153
+ {
154
+ "epoch": 0.04126689363458166,
155
+ "grad_norm": 41.87895584106445,
156
+ "learning_rate": 1.9810216205014854e-05,
157
+ "loss": 0.2775,
158
+ "step": 1600
159
+ },
160
+ {
161
+ "epoch": 0.04384607448674301,
162
+ "grad_norm": 9.029143333435059,
163
+ "learning_rate": 1.9792947433860606e-05,
164
+ "loss": 0.2747,
165
+ "step": 1700
166
+ },
167
+ {
168
+ "epoch": 0.04642525533890436,
169
+ "grad_norm": 0.5453509092330933,
170
+ "learning_rate": 1.9775678662706365e-05,
171
+ "loss": 0.2333,
172
+ "step": 1800
173
+ },
174
+ {
175
+ "epoch": 0.04900443619106572,
176
+ "grad_norm": 14.877126693725586,
177
+ "learning_rate": 1.975840989155212e-05,
178
+ "loss": 0.2335,
179
+ "step": 1900
180
+ },
181
+ {
182
+ "epoch": 0.05158361704322707,
183
+ "grad_norm": 9.152324676513672,
184
+ "learning_rate": 1.974114112039787e-05,
185
+ "loss": 0.2501,
186
+ "step": 2000
187
+ },
188
+ {
189
+ "epoch": 0.05158361704322707,
190
+ "eval_accuracy": 0.915400686079802,
191
+ "eval_f1": 0.9153786857019529,
192
+ "eval_loss": 0.24714471399784088,
193
+ "eval_precision": 0.9155981210913413,
194
+ "eval_recall": 0.915400686079802,
195
+ "eval_runtime": 80.7839,
196
+ "eval_samples_per_second": 959.869,
197
+ "eval_steps_per_second": 60.0,
198
+ "step": 2000
199
+ },
200
+ {
201
+ "epoch": 0.05416279789538842,
202
+ "grad_norm": 3.9494690895080566,
203
+ "learning_rate": 1.972387234924363e-05,
204
+ "loss": 0.2432,
205
+ "step": 2100
206
+ },
207
+ {
208
+ "epoch": 0.05674197874754978,
209
+ "grad_norm": 7.518190383911133,
210
+ "learning_rate": 1.9706603578089385e-05,
211
+ "loss": 0.2179,
212
+ "step": 2200
213
+ },
214
+ {
215
+ "epoch": 0.05932115959971113,
216
+ "grad_norm": 55.069461822509766,
217
+ "learning_rate": 1.968933480693514e-05,
218
+ "loss": 0.2603,
219
+ "step": 2300
220
+ },
221
+ {
222
+ "epoch": 0.06190034045187248,
223
+ "grad_norm": 21.235607147216797,
224
+ "learning_rate": 1.9672066035780896e-05,
225
+ "loss": 0.2363,
226
+ "step": 2400
227
+ },
228
+ {
229
+ "epoch": 0.06447952130403384,
230
+ "grad_norm": 30.956710815429688,
231
+ "learning_rate": 1.965479726462665e-05,
232
+ "loss": 0.264,
233
+ "step": 2500
234
+ },
235
+ {
236
+ "epoch": 0.06447952130403384,
237
+ "eval_accuracy": 0.9174769802171726,
238
+ "eval_f1": 0.9174732815674826,
239
+ "eval_loss": 0.22409705817699432,
240
+ "eval_precision": 0.9178450257324412,
241
+ "eval_recall": 0.9174769802171726,
242
+ "eval_runtime": 82.4939,
243
+ "eval_samples_per_second": 939.972,
244
+ "eval_steps_per_second": 58.756,
245
+ "step": 2500
246
+ },
247
+ {
248
+ "epoch": 0.0670587021561952,
249
+ "grad_norm": 3.0290513038635254,
250
+ "learning_rate": 1.9637528493472406e-05,
251
+ "loss": 0.2388,
252
+ "step": 2600
253
+ },
254
+ {
255
+ "epoch": 0.06963788300835655,
256
+ "grad_norm": 4.662931442260742,
257
+ "learning_rate": 1.962025972231816e-05,
258
+ "loss": 0.2132,
259
+ "step": 2700
260
+ },
261
+ {
262
+ "epoch": 0.0722170638605179,
263
+ "grad_norm": 31.94866180419922,
264
+ "learning_rate": 1.9602990951163916e-05,
265
+ "loss": 0.2576,
266
+ "step": 2800
267
+ },
268
+ {
269
+ "epoch": 0.07479624471267925,
270
+ "grad_norm": 3.5957560539245605,
271
+ "learning_rate": 1.958572218000967e-05,
272
+ "loss": 0.2404,
273
+ "step": 2900
274
+ },
275
+ {
276
+ "epoch": 0.0773754255648406,
277
+ "grad_norm": 6.48631477355957,
278
+ "learning_rate": 1.9568453408855427e-05,
279
+ "loss": 0.2514,
280
+ "step": 3000
281
+ },
282
+ {
283
+ "epoch": 0.0773754255648406,
284
+ "eval_accuracy": 0.9169998194526837,
285
+ "eval_f1": 0.9170017211060641,
286
+ "eval_loss": 0.21086551249027252,
287
+ "eval_precision": 0.9171348873594787,
288
+ "eval_recall": 0.9169998194526837,
289
+ "eval_runtime": 80.1992,
290
+ "eval_samples_per_second": 966.867,
291
+ "eval_steps_per_second": 60.437,
292
+ "step": 3000
293
+ },
294
+ {
295
+ "epoch": 0.07995460641700196,
296
+ "grad_norm": 5.789911270141602,
297
+ "learning_rate": 1.9551184637701185e-05,
298
+ "loss": 0.2138,
299
+ "step": 3100
300
+ },
301
+ {
302
+ "epoch": 0.08253378726916331,
303
+ "grad_norm": 8.613458633422852,
304
+ "learning_rate": 1.9533915866546937e-05,
305
+ "loss": 0.2323,
306
+ "step": 3200
307
+ },
308
+ {
309
+ "epoch": 0.08511296812132467,
310
+ "grad_norm": 5.5360822677612305,
311
+ "learning_rate": 1.9516647095392692e-05,
312
+ "loss": 0.2474,
313
+ "step": 3300
314
+ },
315
+ {
316
+ "epoch": 0.08769214897348603,
317
+ "grad_norm": 15.3199462890625,
318
+ "learning_rate": 1.949937832423845e-05,
319
+ "loss": 0.1964,
320
+ "step": 3400
321
+ },
322
+ {
323
+ "epoch": 0.09027132982564738,
324
+ "grad_norm": 0.23756052553653717,
325
+ "learning_rate": 1.9482109553084202e-05,
326
+ "loss": 0.2048,
327
+ "step": 3500
328
+ },
329
+ {
330
+ "epoch": 0.09027132982564738,
331
+ "eval_accuracy": 0.9212555776224498,
332
+ "eval_f1": 0.9212388888583593,
333
+ "eval_loss": 0.217011496424675,
334
+ "eval_precision": 0.9220491698721403,
335
+ "eval_recall": 0.9212555776224498,
336
+ "eval_runtime": 82.2451,
337
+ "eval_samples_per_second": 942.816,
338
+ "eval_steps_per_second": 58.934,
339
+ "step": 3500
340
+ },
341
+ {
342
+ "epoch": 0.09285051067780872,
343
+ "grad_norm": 3.4589858055114746,
344
+ "learning_rate": 1.9464840781929957e-05,
345
+ "loss": 0.2335,
346
+ "step": 3600
347
+ },
348
+ {
349
+ "epoch": 0.09542969152997008,
350
+ "grad_norm": 6.5932111740112305,
351
+ "learning_rate": 1.9447572010775716e-05,
352
+ "loss": 0.2239,
353
+ "step": 3700
354
+ },
355
+ {
356
+ "epoch": 0.09800887238213143,
357
+ "grad_norm": 2.6831107139587402,
358
+ "learning_rate": 1.943030323962147e-05,
359
+ "loss": 0.2049,
360
+ "step": 3800
361
+ },
362
+ {
363
+ "epoch": 0.10058805323429279,
364
+ "grad_norm": 26.725677490234375,
365
+ "learning_rate": 1.9413034468467223e-05,
366
+ "loss": 0.2404,
367
+ "step": 3900
368
+ },
369
+ {
370
+ "epoch": 0.10316723408645415,
371
+ "grad_norm": 26.966718673706055,
372
+ "learning_rate": 1.939576569731298e-05,
373
+ "loss": 0.241,
374
+ "step": 4000
375
+ },
376
+ {
377
+ "epoch": 0.10316723408645415,
378
+ "eval_accuracy": 0.9104227386448634,
379
+ "eval_f1": 0.9102303033501772,
380
+ "eval_loss": 0.22579680383205414,
381
+ "eval_precision": 0.9150459252068764,
382
+ "eval_recall": 0.9104227386448634,
383
+ "eval_runtime": 81.0753,
384
+ "eval_samples_per_second": 956.419,
385
+ "eval_steps_per_second": 59.784,
386
+ "step": 4000
387
+ },
388
+ {
389
+ "epoch": 0.1057464149386155,
390
+ "grad_norm": 17.290939331054688,
391
+ "learning_rate": 1.9378496926158737e-05,
392
+ "loss": 0.2298,
393
+ "step": 4100
394
+ },
395
+ {
396
+ "epoch": 0.10832559579077684,
397
+ "grad_norm": 8.844673156738281,
398
+ "learning_rate": 1.9361228155004492e-05,
399
+ "loss": 0.224,
400
+ "step": 4200
401
+ },
402
+ {
403
+ "epoch": 0.1109047766429382,
404
+ "grad_norm": 2.0837674140930176,
405
+ "learning_rate": 1.9343959383850247e-05,
406
+ "loss": 0.2367,
407
+ "step": 4300
408
+ },
409
+ {
410
+ "epoch": 0.11348395749509955,
411
+ "grad_norm": 7.639578819274902,
412
+ "learning_rate": 1.9326690612696002e-05,
413
+ "loss": 0.2194,
414
+ "step": 4400
415
+ },
416
+ {
417
+ "epoch": 0.11606313834726091,
418
+ "grad_norm": 9.39929485321045,
419
+ "learning_rate": 1.9309421841541757e-05,
420
+ "loss": 0.2297,
421
+ "step": 4500
422
+ },
423
+ {
424
+ "epoch": 0.11606313834726091,
425
+ "eval_accuracy": 0.9185473678780531,
426
+ "eval_f1": 0.918488362019573,
427
+ "eval_loss": 0.24766957759857178,
428
+ "eval_precision": 0.9204437423697496,
429
+ "eval_recall": 0.9185473678780531,
430
+ "eval_runtime": 80.6546,
431
+ "eval_samples_per_second": 961.408,
432
+ "eval_steps_per_second": 60.096,
433
+ "step": 4500
434
+ },
435
+ {
436
+ "epoch": 0.11864231919942227,
437
+ "grad_norm": 63.414676666259766,
438
+ "learning_rate": 1.9292153070387512e-05,
439
+ "loss": 0.2067,
440
+ "step": 4600
441
+ },
442
+ {
443
+ "epoch": 0.12122150005158362,
444
+ "grad_norm": 27.895687103271484,
445
+ "learning_rate": 1.9274884299233268e-05,
446
+ "loss": 0.1975,
447
+ "step": 4700
448
+ },
449
+ {
450
+ "epoch": 0.12380068090374496,
451
+ "grad_norm": 5.536584377288818,
452
+ "learning_rate": 1.9257615528079023e-05,
453
+ "loss": 0.2141,
454
+ "step": 4800
455
+ },
456
+ {
457
+ "epoch": 0.12637986175590632,
458
+ "grad_norm": 18.6892147064209,
459
+ "learning_rate": 1.9240346756924778e-05,
460
+ "loss": 0.2549,
461
+ "step": 4900
462
+ },
463
+ {
464
+ "epoch": 0.12895904260806768,
465
+ "grad_norm": 3.705787420272827,
466
+ "learning_rate": 1.9223077985770536e-05,
467
+ "loss": 0.2187,
468
+ "step": 5000
469
+ },
470
+ {
471
+ "epoch": 0.12895904260806768,
472
+ "eval_accuracy": 0.9244409481313353,
473
+ "eval_f1": 0.9243951823390982,
474
+ "eval_loss": 0.22254011034965515,
475
+ "eval_precision": 0.9250643288957023,
476
+ "eval_recall": 0.9244409481313353,
477
+ "eval_runtime": 84.6134,
478
+ "eval_samples_per_second": 916.427,
479
+ "eval_steps_per_second": 57.284,
480
+ "step": 5000
481
+ },
482
+ {
483
+ "epoch": 0.13153822346022903,
484
+ "grad_norm": 10.738173484802246,
485
+ "learning_rate": 1.9205809214616288e-05,
486
+ "loss": 0.2143,
487
+ "step": 5100
488
+ },
489
+ {
490
+ "epoch": 0.1341174043123904,
491
+ "grad_norm": 7.39179801940918,
492
+ "learning_rate": 1.9188540443462043e-05,
493
+ "loss": 0.205,
494
+ "step": 5200
495
+ },
496
+ {
497
+ "epoch": 0.13669658516455174,
498
+ "grad_norm": 14.402508735656738,
499
+ "learning_rate": 1.9171271672307802e-05,
500
+ "loss": 0.2027,
501
+ "step": 5300
502
+ },
503
+ {
504
+ "epoch": 0.1392757660167131,
505
+ "grad_norm": 8.077102661132812,
506
+ "learning_rate": 1.9154002901153554e-05,
507
+ "loss": 0.1909,
508
+ "step": 5400
509
+ },
510
+ {
511
+ "epoch": 0.14185494686887445,
512
+ "grad_norm": 2.036444664001465,
513
+ "learning_rate": 1.913673412999931e-05,
514
+ "loss": 0.2285,
515
+ "step": 5500
516
+ },
517
+ {
518
+ "epoch": 0.14185494686887445,
519
+ "eval_accuracy": 0.9220164555982564,
520
+ "eval_f1": 0.9220066809051832,
521
+ "eval_loss": 0.2105225920677185,
522
+ "eval_precision": 0.9220853946451525,
523
+ "eval_recall": 0.9220164555982564,
524
+ "eval_runtime": 85.408,
525
+ "eval_samples_per_second": 907.901,
526
+ "eval_steps_per_second": 56.751,
527
+ "step": 5500
528
+ },
529
+ {
530
+ "epoch": 0.1444341277210358,
531
+ "grad_norm": 3.172539472579956,
532
+ "learning_rate": 1.9119465358845067e-05,
533
+ "loss": 0.1839,
534
+ "step": 5600
535
+ },
536
+ {
537
+ "epoch": 0.14701330857319717,
538
+ "grad_norm": 4.246190547943115,
539
+ "learning_rate": 1.9102196587690823e-05,
540
+ "loss": 0.2248,
541
+ "step": 5700
542
+ },
543
+ {
544
+ "epoch": 0.1495924894253585,
545
+ "grad_norm": 9.676543235778809,
546
+ "learning_rate": 1.9084927816536574e-05,
547
+ "loss": 0.2003,
548
+ "step": 5800
549
+ },
550
+ {
551
+ "epoch": 0.15217167027751985,
552
+ "grad_norm": 14.093317031860352,
553
+ "learning_rate": 1.9067659045382333e-05,
554
+ "loss": 0.2283,
555
+ "step": 5900
556
+ },
557
+ {
558
+ "epoch": 0.1547508511296812,
559
+ "grad_norm": 6.108678817749023,
560
+ "learning_rate": 1.9050390274228088e-05,
561
+ "loss": 0.2001,
562
+ "step": 6000
563
+ },
564
+ {
565
+ "epoch": 0.1547508511296812,
566
+ "eval_accuracy": 0.9288514611436383,
567
+ "eval_f1": 0.9288285810713337,
568
+ "eval_loss": 0.19547687470912933,
569
+ "eval_precision": 0.9291294558172835,
570
+ "eval_recall": 0.9288514611436383,
571
+ "eval_runtime": 85.1832,
572
+ "eval_samples_per_second": 910.297,
573
+ "eval_steps_per_second": 56.901,
574
+ "step": 6000
575
+ },
576
+ {
577
+ "epoch": 0.15733003198184256,
578
+ "grad_norm": 25.980791091918945,
579
+ "learning_rate": 1.9033121503073843e-05,
580
+ "loss": 0.2045,
581
+ "step": 6100
582
+ },
583
+ {
584
+ "epoch": 0.15990921283400392,
585
+ "grad_norm": 3.697967767715454,
586
+ "learning_rate": 1.90158527319196e-05,
587
+ "loss": 0.1921,
588
+ "step": 6200
589
+ },
590
+ {
591
+ "epoch": 0.16248839368616527,
592
+ "grad_norm": 6.734536647796631,
593
+ "learning_rate": 1.8998583960765353e-05,
594
+ "loss": 0.2398,
595
+ "step": 6300
596
+ },
597
+ {
598
+ "epoch": 0.16506757453832663,
599
+ "grad_norm": 4.171863555908203,
600
+ "learning_rate": 1.898131518961111e-05,
601
+ "loss": 0.1851,
602
+ "step": 6400
603
+ },
604
+ {
605
+ "epoch": 0.16764675539048798,
606
+ "grad_norm": 8.22290325164795,
607
+ "learning_rate": 1.8964046418456864e-05,
608
+ "loss": 0.2048,
609
+ "step": 6500
610
+ },
611
+ {
612
+ "epoch": 0.16764675539048798,
613
+ "eval_accuracy": 0.9237316551030409,
614
+ "eval_f1": 0.9237108101584136,
615
+ "eval_loss": 0.21942158043384552,
616
+ "eval_precision": 0.9246712025171661,
617
+ "eval_recall": 0.9237316551030409,
618
+ "eval_runtime": 87.4195,
619
+ "eval_samples_per_second": 887.01,
620
+ "eval_steps_per_second": 55.445,
621
+ "step": 6500
622
+ },
623
+ {
624
+ "epoch": 0.17022593624264934,
625
+ "grad_norm": 1.4064490795135498,
626
+ "learning_rate": 1.894677764730262e-05,
627
+ "loss": 0.2192,
628
+ "step": 6600
629
+ },
630
+ {
631
+ "epoch": 0.1728051170948107,
632
+ "grad_norm": 0.8423879146575928,
633
+ "learning_rate": 1.8929508876148374e-05,
634
+ "loss": 0.2296,
635
+ "step": 6700
636
+ },
637
+ {
638
+ "epoch": 0.17538429794697205,
639
+ "grad_norm": 2.1388964653015137,
640
+ "learning_rate": 1.891224010499413e-05,
641
+ "loss": 0.2397,
642
+ "step": 6800
643
+ },
644
+ {
645
+ "epoch": 0.1779634787991334,
646
+ "grad_norm": 6.909358501434326,
647
+ "learning_rate": 1.8894971333839888e-05,
648
+ "loss": 0.1902,
649
+ "step": 6900
650
+ },
651
+ {
652
+ "epoch": 0.18054265965129476,
653
+ "grad_norm": 1.044767141342163,
654
+ "learning_rate": 1.887770256268564e-05,
655
+ "loss": 0.1931,
656
+ "step": 7000
657
+ },
658
+ {
659
+ "epoch": 0.18054265965129476,
660
+ "eval_accuracy": 0.9318046993887183,
661
+ "eval_f1": 0.9318026741133476,
662
+ "eval_loss": 0.18849702179431915,
663
+ "eval_precision": 0.9318098201446341,
664
+ "eval_recall": 0.9318046993887183,
665
+ "eval_runtime": 85.7797,
666
+ "eval_samples_per_second": 903.967,
667
+ "eval_steps_per_second": 56.505,
668
+ "step": 7000
669
+ },
670
+ {
671
+ "epoch": 0.1831218405034561,
672
+ "grad_norm": 9.8370943069458,
673
+ "learning_rate": 1.8860433791531395e-05,
674
+ "loss": 0.2179,
675
+ "step": 7100
676
+ },
677
+ {
678
+ "epoch": 0.18570102135561745,
679
+ "grad_norm": 3.937364101409912,
680
+ "learning_rate": 1.8843165020377153e-05,
681
+ "loss": 0.2017,
682
+ "step": 7200
683
+ },
684
+ {
685
+ "epoch": 0.1882802022077788,
686
+ "grad_norm": 1.7348313331604004,
687
+ "learning_rate": 1.8825896249222905e-05,
688
+ "loss": 0.2098,
689
+ "step": 7300
690
+ },
691
+ {
692
+ "epoch": 0.19085938305994016,
693
+ "grad_norm": 13.738706588745117,
694
+ "learning_rate": 1.880862747806866e-05,
695
+ "loss": 0.2176,
696
+ "step": 7400
697
+ },
698
+ {
699
+ "epoch": 0.1934385639121015,
700
+ "grad_norm": 7.888184547424316,
701
+ "learning_rate": 1.879135870691442e-05,
702
+ "loss": 0.2052,
703
+ "step": 7500
704
+ },
705
+ {
706
+ "epoch": 0.1934385639121015,
707
+ "eval_accuracy": 0.9288256686698821,
708
+ "eval_f1": 0.9287744301894623,
709
+ "eval_loss": 0.19062775373458862,
710
+ "eval_precision": 0.9296090951021123,
711
+ "eval_recall": 0.9288256686698821,
712
+ "eval_runtime": 84.5191,
713
+ "eval_samples_per_second": 917.45,
714
+ "eval_steps_per_second": 57.348,
715
+ "step": 7500
716
+ },
717
+ {
718
+ "epoch": 0.19601774476426287,
719
+ "grad_norm": 8.46711254119873,
720
+ "learning_rate": 1.8774089935760174e-05,
721
+ "loss": 0.2069,
722
+ "step": 7600
723
+ },
724
+ {
725
+ "epoch": 0.19859692561642422,
726
+ "grad_norm": 0.5938565731048584,
727
+ "learning_rate": 1.8756821164605926e-05,
728
+ "loss": 0.1842,
729
+ "step": 7700
730
+ },
731
+ {
732
+ "epoch": 0.20117610646858558,
733
+ "grad_norm": 14.665600776672363,
734
+ "learning_rate": 1.8739552393451684e-05,
735
+ "loss": 0.2097,
736
+ "step": 7800
737
+ },
738
+ {
739
+ "epoch": 0.20375528732074694,
740
+ "grad_norm": 11.763516426086426,
741
+ "learning_rate": 1.872228362229744e-05,
742
+ "loss": 0.221,
743
+ "step": 7900
744
+ },
745
+ {
746
+ "epoch": 0.2063344681729083,
747
+ "grad_norm": 3.6280648708343506,
748
+ "learning_rate": 1.8705014851143195e-05,
749
+ "loss": 0.1904,
750
+ "step": 8000
751
+ },
752
+ {
753
+ "epoch": 0.2063344681729083,
754
+ "eval_accuracy": 0.929612339119445,
755
+ "eval_f1": 0.9296143948889304,
756
+ "eval_loss": 0.18757739663124084,
757
+ "eval_precision": 0.9297072870129078,
758
+ "eval_recall": 0.929612339119445,
759
+ "eval_runtime": 85.1458,
760
+ "eval_samples_per_second": 910.696,
761
+ "eval_steps_per_second": 56.926,
762
+ "step": 8000
763
+ },
764
+ {
765
+ "epoch": 0.20891364902506965,
766
+ "grad_norm": 15.164484977722168,
767
+ "learning_rate": 1.868774607998895e-05,
768
+ "loss": 0.1958,
769
+ "step": 8100
770
+ },
771
+ {
772
+ "epoch": 0.211492829877231,
773
+ "grad_norm": 2.8772385120391846,
774
+ "learning_rate": 1.8670477308834705e-05,
775
+ "loss": 0.1878,
776
+ "step": 8200
777
+ },
778
+ {
779
+ "epoch": 0.21407201072939233,
780
+ "grad_norm": 2.487945079803467,
781
+ "learning_rate": 1.865320853768046e-05,
782
+ "loss": 0.2033,
783
+ "step": 8300
784
+ },
785
+ {
786
+ "epoch": 0.2166511915815537,
787
+ "grad_norm": 9.773316383361816,
788
+ "learning_rate": 1.8635939766526215e-05,
789
+ "loss": 0.1709,
790
+ "step": 8400
791
+ },
792
+ {
793
+ "epoch": 0.21923037243371504,
794
+ "grad_norm": 11.347258567810059,
795
+ "learning_rate": 1.861867099537197e-05,
796
+ "loss": 0.2125,
797
+ "step": 8500
798
+ },
799
+ {
800
+ "epoch": 0.21923037243371504,
801
+ "eval_accuracy": 0.9292125557762245,
802
+ "eval_f1": 0.9291884106571301,
803
+ "eval_loss": 0.18593531847000122,
804
+ "eval_precision": 0.9303160172840454,
805
+ "eval_recall": 0.9292125557762245,
806
+ "eval_runtime": 82.8305,
807
+ "eval_samples_per_second": 936.153,
808
+ "eval_steps_per_second": 58.517,
809
+ "step": 8500
810
+ }
811
+ ],
812
+ "logging_steps": 100,
813
+ "max_steps": 116316,
814
+ "num_input_tokens_seen": 0,
815
+ "num_train_epochs": 3,
816
+ "save_steps": 500,
817
+ "stateful_callbacks": {
818
+ "EarlyStoppingCallback": {
819
+ "args": {
820
+ "early_stopping_patience": 3,
821
+ "early_stopping_threshold": 0.0
822
+ },
823
+ "attributes": {
824
+ "early_stopping_patience_counter": 3
825
+ }
826
+ },
827
+ "TrainerControl": {
828
+ "args": {
829
+ "should_epoch_stop": false,
830
+ "should_evaluate": false,
831
+ "should_log": false,
832
+ "should_save": true,
833
+ "should_training_stop": true
834
+ },
835
+ "attributes": {}
836
+ }
837
+ },
838
+ "total_flos": 3.072344504550528e+16,
839
+ "train_batch_size": 16,
840
+ "trial_name": null,
841
+ "trial_params": null
842
+ }
checkpoint-8500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce8abd265ced130b3a60da4969f0ccef6e504e9f0ee20e35110ba851d1b32cb3
3
+ size 5777
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "dtype": "float32",
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "problem_type": "single_label_classification",
24
+ "transformers_version": "4.57.3",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250002
28
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11581fcf59ed6effc51225813116b13565a436317e8381e00444ac3ce13d695e
3
+ size 1112205008
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
test.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
2
+ import torch
3
+
4
+ MODEL_PATH = "model.safetensors" # папка с model.safetensors + config.json
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
7
+ model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH)
8
+
9
+ def predict(text: str):
10
+ inputs = tokenizer(
11
+ text,
12
+ return_tensors="pt",
13
+ truncation=True,
14
+ padding=True,
15
+ max_length=512
16
+ )
17
+
18
+ with torch.no_grad():
19
+ outputs = model(**inputs)
20
+
21
+ logits = outputs.logits
22
+ probs = torch.softmax(logits, dim=-1)
23
+
24
+ pred_id = torch.argmax(probs, dim=-1).item()
25
+ confidence = probs[0, pred_id].item()
26
+
27
+ return {
28
+ "label_id": pred_id,
29
+ "confidence": confidence
30
+ }
31
+
32
+ print(predict("Салем калын калай?"))
tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
50
+ "model_max_length": 512,
51
+ "pad_token": "<pad>",
52
+ "sep_token": "</s>",
53
+ "sp_model_kwargs": {},
54
+ "tokenizer_class": "XLMRobertaTokenizer",
55
+ "unk_token": "<unk>"
56
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce8abd265ced130b3a60da4969f0ccef6e504e9f0ee20e35110ba851d1b32cb3
3
+ size 5777