CNR-ILC commited on
Commit
7bfa702
·
verified ·
1 Parent(s): 259b085

ILC-CNR/gs-Logion

Browse files
README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ base_model: cabrooks/LOGION-50k_wordpiece
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: gs-Logion
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # gs-Logion
15
+
16
+ This model is a fine-tuned version of [cabrooks/LOGION-50k_wordpiece](https://huggingface.co/cabrooks/LOGION-50k_wordpiece) on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 2.3943
19
+ - Top1 Acc: 0.5342
20
+ - Top5 Acc: 0.6849
21
+ - Top10 Acc: 0.7671
22
+ - Top20 Acc: 0.7945
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 5e-05
42
+ - train_batch_size: 16
43
+ - eval_batch_size: 8
44
+ - seed: 42
45
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
46
+ - lr_scheduler_type: linear
47
+ - num_epochs: 10
48
+ - mixed_precision_training: Native AMP
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss | Top1 Acc | Top5 Acc | Top10 Acc | Top20 Acc |
53
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:|:---------:|:---------:|
54
+ | 3.6348 | 1.0 | 1972 | 3.1662 | 0.5059 | 0.6647 | 0.7235 | 0.7471 |
55
+ | 3.1196 | 2.0 | 3944 | 2.8723 | 0.4898 | 0.6531 | 0.7075 | 0.7959 |
56
+ | 2.9131 | 3.0 | 5916 | 2.7926 | 0.5 | 0.6644 | 0.7671 | 0.8014 |
57
+ | 2.7745 | 4.0 | 7888 | 2.6731 | 0.5102 | 0.6871 | 0.7755 | 0.8095 |
58
+ | 2.6649 | 5.0 | 9860 | 2.6617 | 0.5660 | 0.6918 | 0.7484 | 0.7862 |
59
+ | 2.5923 | 6.0 | 11832 | 2.5783 | 0.5759 | 0.7089 | 0.7405 | 0.7722 |
60
+ | 2.5206 | 7.0 | 13804 | 2.5101 | 0.6458 | 0.7708 | 0.8264 | 0.8611 |
61
+ | 2.4581 | 8.0 | 15776 | 2.4972 | 0.5874 | 0.7343 | 0.7972 | 0.8252 |
62
+ | 2.4239 | 9.0 | 17748 | 2.4337 | 0.5685 | 0.7329 | 0.7740 | 0.8082 |
63
+ | 2.401 | 10.0 | 19720 | 2.4897 | 0.5804 | 0.7343 | 0.8042 | 0.8462 |
64
+
65
+
66
+ ### Framework versions
67
+
68
+ - Transformers 4.51.3
69
+ - Pytorch 2.7.0+cu126
70
+ - Datasets 3.5.1
71
+ - Tokenizers 0.21.1
all_results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_loss": 2.3943166732788086,
4
+ "eval_runtime": 935.4163,
5
+ "eval_samples_per_second": 3.88,
6
+ "eval_steps_per_second": 0.485,
7
+ "eval_top10_acc": 0.7671232876712328,
8
+ "eval_top1_acc": 0.5342465753424658,
9
+ "eval_top20_acc": 0.7945205479452054,
10
+ "eval_top5_acc": 0.684931506849315,
11
+ "step": 19720,
12
+ "total_flos": 2.076631553015808e+16,
13
+ "train_loss": 2.7502908888501523,
14
+ "train_runtime": 4509.892,
15
+ "train_samples_per_second": 69.971,
16
+ "train_steps_per_second": 4.373
17
+ }
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 3072,
12
+ "layer_norm_eps": 1e-12,
13
+ "max_position_embeddings": 512,
14
+ "model_type": "bert",
15
+ "num_attention_heads": 12,
16
+ "num_hidden_layers": 12,
17
+ "pad_token_id": 0,
18
+ "position_embedding_type": "absolute",
19
+ "torch_dtype": "float32",
20
+ "transformers_version": "4.51.3",
21
+ "type_vocab_size": 2,
22
+ "use_cache": true,
23
+ "vocab_size": 50000
24
+ }
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_loss": 2.3943166732788086,
4
+ "eval_runtime": 935.4163,
5
+ "eval_samples_per_second": 3.88,
6
+ "eval_steps_per_second": 0.485,
7
+ "eval_top10_acc": 0.7671232876712328,
8
+ "eval_top1_acc": 0.5342465753424658,
9
+ "eval_top20_acc": 0.7945205479452054,
10
+ "eval_top5_acc": 0.684931506849315
11
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77d84373add14b8c0d1b64f4f782c44a02775aab469d9bd56990a1dfc9bd26f6
3
+ size 497995232
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 512,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "step": 19720,
4
+ "total_flos": 2.076631553015808e+16,
5
+ "train_loss": 2.7502908888501523,
6
+ "train_runtime": 4509.892,
7
+ "train_samples_per_second": 69.971,
8
+ "train_steps_per_second": 4.373
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 10.0,
6
+ "eval_steps": 500,
7
+ "global_step": 19720,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "grad_norm": 6.913986682891846,
15
+ "learning_rate": 4.500507099391481e-05,
16
+ "loss": 3.6348,
17
+ "step": 1972
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_loss": 3.166245222091675,
22
+ "eval_runtime": 259.7532,
23
+ "eval_samples_per_second": 3.823,
24
+ "eval_steps_per_second": 0.481,
25
+ "eval_top10_acc": 0.7235294117647059,
26
+ "eval_top1_acc": 0.5058823529411764,
27
+ "eval_top20_acc": 0.7470588235294118,
28
+ "eval_top5_acc": 0.6647058823529411,
29
+ "step": 1972
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "grad_norm": 6.612165927886963,
34
+ "learning_rate": 4.000760649087221e-05,
35
+ "loss": 3.1196,
36
+ "step": 3944
37
+ },
38
+ {
39
+ "epoch": 2.0,
40
+ "eval_loss": 2.872304916381836,
41
+ "eval_runtime": 260.1173,
42
+ "eval_samples_per_second": 3.818,
43
+ "eval_steps_per_second": 0.481,
44
+ "eval_top10_acc": 0.7074829931972789,
45
+ "eval_top1_acc": 0.4897959183673469,
46
+ "eval_top20_acc": 0.7959183673469388,
47
+ "eval_top5_acc": 0.6530612244897959,
48
+ "step": 3944
49
+ },
50
+ {
51
+ "epoch": 3.0,
52
+ "grad_norm": 6.386370658874512,
53
+ "learning_rate": 3.5010141987829614e-05,
54
+ "loss": 2.9131,
55
+ "step": 5916
56
+ },
57
+ {
58
+ "epoch": 3.0,
59
+ "eval_loss": 2.7925734519958496,
60
+ "eval_runtime": 257.3182,
61
+ "eval_samples_per_second": 3.859,
62
+ "eval_steps_per_second": 0.486,
63
+ "eval_top10_acc": 0.7671232876712328,
64
+ "eval_top1_acc": 0.5,
65
+ "eval_top20_acc": 0.8013698630136986,
66
+ "eval_top5_acc": 0.6643835616438356,
67
+ "step": 5916
68
+ },
69
+ {
70
+ "epoch": 4.0,
71
+ "grad_norm": 8.058805465698242,
72
+ "learning_rate": 3.0010141987829615e-05,
73
+ "loss": 2.7745,
74
+ "step": 7888
75
+ },
76
+ {
77
+ "epoch": 4.0,
78
+ "eval_loss": 2.6731116771698,
79
+ "eval_runtime": 260.3384,
80
+ "eval_samples_per_second": 3.814,
81
+ "eval_steps_per_second": 0.48,
82
+ "eval_top10_acc": 0.7755102040816326,
83
+ "eval_top1_acc": 0.5102040816326531,
84
+ "eval_top20_acc": 0.8095238095238095,
85
+ "eval_top5_acc": 0.6870748299319728,
86
+ "step": 7888
87
+ },
88
+ {
89
+ "epoch": 5.0,
90
+ "grad_norm": 7.619845390319824,
91
+ "learning_rate": 2.5010141987829615e-05,
92
+ "loss": 2.6649,
93
+ "step": 9860
94
+ },
95
+ {
96
+ "epoch": 5.0,
97
+ "eval_loss": 2.6617324352264404,
98
+ "eval_runtime": 257.8712,
99
+ "eval_samples_per_second": 3.851,
100
+ "eval_steps_per_second": 0.485,
101
+ "eval_top10_acc": 0.7484276729559748,
102
+ "eval_top1_acc": 0.5660377358490566,
103
+ "eval_top20_acc": 0.7861635220125787,
104
+ "eval_top5_acc": 0.6918238993710691,
105
+ "step": 9860
106
+ },
107
+ {
108
+ "epoch": 6.0,
109
+ "grad_norm": 6.427279949188232,
110
+ "learning_rate": 2.001521298174442e-05,
111
+ "loss": 2.5923,
112
+ "step": 11832
113
+ },
114
+ {
115
+ "epoch": 6.0,
116
+ "eval_loss": 2.5783021450042725,
117
+ "eval_runtime": 257.5407,
118
+ "eval_samples_per_second": 3.856,
119
+ "eval_steps_per_second": 0.485,
120
+ "eval_top10_acc": 0.740506329113924,
121
+ "eval_top1_acc": 0.5759493670886076,
122
+ "eval_top20_acc": 0.7721518987341772,
123
+ "eval_top5_acc": 0.7088607594936709,
124
+ "step": 11832
125
+ },
126
+ {
127
+ "epoch": 7.0,
128
+ "grad_norm": 7.530773639678955,
129
+ "learning_rate": 1.5017748478701827e-05,
130
+ "loss": 2.5206,
131
+ "step": 13804
132
+ },
133
+ {
134
+ "epoch": 7.0,
135
+ "eval_loss": 2.5101499557495117,
136
+ "eval_runtime": 254.4047,
137
+ "eval_samples_per_second": 3.903,
138
+ "eval_steps_per_second": 0.491,
139
+ "eval_top10_acc": 0.8263888888888888,
140
+ "eval_top1_acc": 0.6458333333333334,
141
+ "eval_top20_acc": 0.8611111111111112,
142
+ "eval_top5_acc": 0.7708333333333334,
143
+ "step": 13804
144
+ },
145
+ {
146
+ "epoch": 8.0,
147
+ "grad_norm": 6.4842047691345215,
148
+ "learning_rate": 1.0017748478701826e-05,
149
+ "loss": 2.4581,
150
+ "step": 15776
151
+ },
152
+ {
153
+ "epoch": 8.0,
154
+ "eval_loss": 2.4972355365753174,
155
+ "eval_runtime": 256.1582,
156
+ "eval_samples_per_second": 3.877,
157
+ "eval_steps_per_second": 0.488,
158
+ "eval_top10_acc": 0.7972027972027972,
159
+ "eval_top1_acc": 0.5874125874125874,
160
+ "eval_top20_acc": 0.8251748251748252,
161
+ "eval_top5_acc": 0.7342657342657343,
162
+ "step": 15776
163
+ },
164
+ {
165
+ "epoch": 9.0,
166
+ "grad_norm": 7.002655029296875,
167
+ "learning_rate": 5.017748478701826e-06,
168
+ "loss": 2.4239,
169
+ "step": 17748
170
+ },
171
+ {
172
+ "epoch": 9.0,
173
+ "eval_loss": 2.4337377548217773,
174
+ "eval_runtime": 255.3165,
175
+ "eval_samples_per_second": 3.889,
176
+ "eval_steps_per_second": 0.49,
177
+ "eval_top10_acc": 0.773972602739726,
178
+ "eval_top1_acc": 0.5684931506849316,
179
+ "eval_top20_acc": 0.8082191780821918,
180
+ "eval_top5_acc": 0.7328767123287672,
181
+ "step": 17748
182
+ },
183
+ {
184
+ "epoch": 10.0,
185
+ "grad_norm": 6.361621856689453,
186
+ "learning_rate": 2.281947261663286e-08,
187
+ "loss": 2.401,
188
+ "step": 19720
189
+ },
190
+ {
191
+ "epoch": 10.0,
192
+ "eval_loss": 2.4897375106811523,
193
+ "eval_runtime": 257.3155,
194
+ "eval_samples_per_second": 3.859,
195
+ "eval_steps_per_second": 0.486,
196
+ "eval_top10_acc": 0.8041958041958042,
197
+ "eval_top1_acc": 0.5804195804195804,
198
+ "eval_top20_acc": 0.8461538461538461,
199
+ "eval_top5_acc": 0.7342657342657343,
200
+ "step": 19720
201
+ },
202
+ {
203
+ "epoch": 10.0,
204
+ "step": 19720,
205
+ "total_flos": 2.076631553015808e+16,
206
+ "train_loss": 2.7502908888501523,
207
+ "train_runtime": 4509.892,
208
+ "train_samples_per_second": 69.971,
209
+ "train_steps_per_second": 4.373
210
+ },
211
+ {
212
+ "epoch": 10.0,
213
+ "eval_loss": 2.3943166732788086,
214
+ "eval_runtime": 935.4163,
215
+ "eval_samples_per_second": 3.88,
216
+ "eval_steps_per_second": 0.485,
217
+ "eval_top10_acc": 0.7671232876712328,
218
+ "eval_top1_acc": 0.5342465753424658,
219
+ "eval_top20_acc": 0.7945205479452054,
220
+ "eval_top5_acc": 0.684931506849315,
221
+ "step": 19720
222
+ }
223
+ ],
224
+ "logging_steps": 500,
225
+ "max_steps": 19720,
226
+ "num_input_tokens_seen": 0,
227
+ "num_train_epochs": 10,
228
+ "save_steps": 500,
229
+ "stateful_callbacks": {
230
+ "TrainerControl": {
231
+ "args": {
232
+ "should_epoch_stop": false,
233
+ "should_evaluate": false,
234
+ "should_log": false,
235
+ "should_save": false,
236
+ "should_training_stop": false
237
+ },
238
+ "attributes": {}
239
+ }
240
+ },
241
+ "total_flos": 2.076631553015808e+16,
242
+ "train_batch_size": 16,
243
+ "trial_name": null,
244
+ "trial_params": null
245
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56d990e78a87875a305cb75d68d2e66e4c4d1896a4e05bd3d7dde0c1e1ebd2d3
3
+ size 5649
vocab.txt ADDED
The diff for this file is too large to render. See raw diff