floflodebilbao commited on
Commit
065ede7
·
verified ·
1 Parent(s): 95344f1

End of training

Browse files
README.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: allenai/led-base-16384
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - rouge
9
+ - bleu
10
+ - precision
11
+ - recall
12
+ - f1
13
+ model-index:
14
+ - name: LED_sum_challenge2
15
+ results: []
16
+ ---
17
+
18
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
19
+ should probably proofread and complete it, then remove this comment. -->
20
+
21
+ # LED_sum_challenge2
22
+
23
+ This model is a fine-tuned version of [allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) on an unknown dataset.
24
+ It achieves the following results on the evaluation set:
25
+ - Loss: 2.9586
26
+ - Rouge1: 0.2918
27
+ - Rouge2: 0.1012
28
+ - Rougel: 0.2293
29
+ - Rougelsum: 0.2288
30
+ - Gen Len: 28.12
31
+ - Bleu: 0.0548
32
+ - Precisions: 0.1048
33
+ - Brevity Penalty: 0.9001
34
+ - Length Ratio: 0.9048
35
+ - Translation Length: 1093.0
36
+ - Reference Length: 1208.0
37
+ - Precision: 0.8818
38
+ - Recall: 0.8759
39
+ - F1: 0.8788
40
+ - Hashcode: roberta-large_L17_no-idf_version=0.3.12(hug_trans=4.53.1)
41
+
42
+ ## Model description
43
+
44
+ More information needed
45
+
46
+ ## Intended uses & limitations
47
+
48
+ More information needed
49
+
50
+ ## Training and evaluation data
51
+
52
+ More information needed
53
+
54
+ ## Training procedure
55
+
56
+ ### Training hyperparameters
57
+
58
+ The following hyperparameters were used during training:
59
+ - learning_rate: 2e-05
60
+ - train_batch_size: 8
61
+ - eval_batch_size: 8
62
+ - seed: 42
63
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
64
+ - lr_scheduler_type: linear
65
+ - num_epochs: 10
66
+ - mixed_precision_training: Native AMP
67
+
68
+ ### Training results
69
+
70
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | Bleu | Precisions | Brevity Penalty | Length Ratio | Translation Length | Reference Length | Precision | Recall | F1 | Hashcode |
71
+ |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:|:------:|:----------:|:---------------:|:------------:|:------------------:|:----------------:|:---------:|:------:|:------:|:---------------------------------------------------------:|
72
+ | 9.0848 | 1.0 | 13 | 7.5283 | 0.24 | 0.0579 | 0.1713 | 0.1714 | 31.78 | 0.0296 | 0.0629 | 1.0 | 1.0439 | 1261.0 | 1208.0 | 0.8521 | 0.8597 | 0.8558 | roberta-large_L17_no-idf_version=0.3.12(hug_trans=4.53.1) |
73
+ | 6.171 | 2.0 | 26 | 4.9217 | 0.2695 | 0.0854 | 0.203 | 0.2033 | 25.98 | 0.0368 | 0.0987 | 0.8063 | 0.8228 | 994.0 | 1208.0 | 0.8806 | 0.8705 | 0.8755 | roberta-large_L17_no-idf_version=0.3.12(hug_trans=4.53.1) |
74
+ | 4.4536 | 3.0 | 39 | 4.1312 | 0.2717 | 0.0862 | 0.2162 | 0.2157 | 23.34 | 0.0352 | 0.1067 | 0.6694 | 0.7136 | 862.0 | 1208.0 | 0.8846 | 0.8732 | 0.8788 | roberta-large_L17_no-idf_version=0.3.12(hug_trans=4.53.1) |
75
+ | 3.7683 | 4.0 | 52 | 3.7332 | 0.3043 | 0.0981 | 0.2301 | 0.2308 | 25.46 | 0.0499 | 0.1154 | 0.7784 | 0.7997 | 966.0 | 1208.0 | 0.8885 | 0.8787 | 0.8835 | roberta-large_L17_no-idf_version=0.3.12(hug_trans=4.53.1) |
76
+ | 3.3278 | 5.0 | 65 | 3.4699 | 0.2978 | 0.1041 | 0.2351 | 0.2344 | 25.38 | 0.0497 | 0.1117 | 0.7854 | 0.8055 | 973.0 | 1208.0 | 0.8869 | 0.8763 | 0.8815 | roberta-large_L17_no-idf_version=0.3.12(hug_trans=4.53.1) |
77
+ | 3.0332 | 6.0 | 78 | 3.2808 | 0.2946 | 0.1013 | 0.2335 | 0.2319 | 26.48 | 0.0503 | 0.1069 | 0.8181 | 0.8328 | 1006.0 | 1208.0 | 0.8857 | 0.8774 | 0.8815 | roberta-large_L17_no-idf_version=0.3.12(hug_trans=4.53.1) |
78
+ | 2.8037 | 7.0 | 91 | 3.1443 | 0.295 | 0.0965 | 0.2275 | 0.2264 | 27.52 | 0.0428 | 0.0978 | 0.8612 | 0.87 | 1051.0 | 1208.0 | 0.8822 | 0.8777 | 0.8799 | roberta-large_L17_no-idf_version=0.3.12(hug_trans=4.53.1) |
79
+ | 2.637 | 8.0 | 104 | 3.0523 | 0.2834 | 0.0997 | 0.2263 | 0.2257 | 27.22 | 0.0499 | 0.1034 | 0.8527 | 0.8626 | 1042.0 | 1208.0 | 0.8813 | 0.8752 | 0.8781 | roberta-large_L17_no-idf_version=0.3.12(hug_trans=4.53.1) |
80
+ | 2.5158 | 9.0 | 117 | 2.9900 | 0.2821 | 0.0989 | 0.2271 | 0.2273 | 27.18 | 0.0508 | 0.1051 | 0.848 | 0.8584 | 1037.0 | 1208.0 | 0.8842 | 0.8773 | 0.8806 | roberta-large_L17_no-idf_version=0.3.12(hug_trans=4.53.1) |
81
+ | 2.4321 | 10.0 | 130 | 2.9586 | 0.2918 | 0.1012 | 0.2293 | 0.2288 | 28.12 | 0.0548 | 0.1048 | 0.9001 | 0.9048 | 1093.0 | 1208.0 | 0.8818 | 0.8759 | 0.8788 | roberta-large_L17_no-idf_version=0.3.12(hug_trans=4.53.1) |
82
+
83
+
84
+ ### Framework versions
85
+
86
+ - Transformers 4.53.1
87
+ - Pytorch 2.7.0+cu126
88
+ - Datasets 3.6.0
89
+ - Tokenizers 0.21.1
config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "gelu",
4
+ "architectures": [
5
+ "LEDForConditionalGeneration"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "attention_window": [
9
+ 1024,
10
+ 1024,
11
+ 1024,
12
+ 1024,
13
+ 1024,
14
+ 1024
15
+ ],
16
+ "bos_token_id": 0,
17
+ "classif_dropout": 0.0,
18
+ "classifier_dropout": 0.0,
19
+ "d_model": 768,
20
+ "decoder_attention_heads": 12,
21
+ "decoder_ffn_dim": 3072,
22
+ "decoder_layerdrop": 0.0,
23
+ "decoder_layers": 6,
24
+ "decoder_start_token_id": 2,
25
+ "dropout": 0.1,
26
+ "encoder_attention_heads": 12,
27
+ "encoder_ffn_dim": 3072,
28
+ "encoder_layerdrop": 0.0,
29
+ "encoder_layers": 6,
30
+ "eos_token_id": 2,
31
+ "id2label": {
32
+ "0": "LABEL_0",
33
+ "1": "LABEL_1",
34
+ "2": "LABEL_2"
35
+ },
36
+ "init_std": 0.02,
37
+ "is_encoder_decoder": true,
38
+ "label2id": {
39
+ "LABEL_0": 0,
40
+ "LABEL_1": 1,
41
+ "LABEL_2": 2
42
+ },
43
+ "max_decoder_position_embeddings": 1024,
44
+ "max_encoder_position_embeddings": 16384,
45
+ "model_type": "led",
46
+ "num_hidden_layers": 6,
47
+ "pad_token_id": 1,
48
+ "torch_dtype": "float32",
49
+ "transformers_version": "4.53.1",
50
+ "use_cache": false,
51
+ "vocab_size": 50265
52
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 0,
3
+ "decoder_start_token_id": 2,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.53.1"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4e468c3454e274b1b40f9e53663dd1a3b4afebc3d2cee64b529f84147323b33
3
+ size 647614116
runs/Jul24_12-36-22_tardis/events.out.tfevents.1753353384.tardis.457126.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e894d85d3997b3bd008cfe467f3c3acf8e4a4fd0d27bef71db8995bc8d51250
3
+ size 5588
runs/Jul24_12-36-50_tardis/events.out.tfevents.1753353412.tardis.457314.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c74a3912c605f5ee4555d7dc0a56cf918fc7cf74841d4757ece830c6ddfd0406
3
+ size 19355
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "extra_special_tokens": {},
51
+ "mask_token": "<mask>",
52
+ "model_max_length": 16384,
53
+ "pad_token": "<pad>",
54
+ "sep_token": "</s>",
55
+ "tokenizer_class": "LEDTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": "<unk>"
58
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c4182bd7e603c309f839aff947a33c91a19c11ba1a67580b124aab30b08a839
3
+ size 5905
vocab.json ADDED
The diff for this file is too large to render. See raw diff