aashish-249 commited on
Commit
b0b8438
·
1 Parent(s): ec2872d

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/mt5-small
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - rouge
8
+ model-index:
9
+ - name: mt5-summarize-te
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # mt5-summarize-te
17
+
18
+ This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 2.1257
21
+ - Rouge1: 0.5211
22
+ - Rouge2: 0.4338
23
+ - Rougel: 0.4813
24
+ - Rougelsum: 0.4819
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 0.0005
44
+ - train_batch_size: 2
45
+ - eval_batch_size: 1
46
+ - seed: 42
47
+ - gradient_accumulation_steps: 16
48
+ - total_train_batch_size: 32
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: linear
51
+ - lr_scheduler_warmup_steps: 90
52
+ - num_epochs: 10
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum |
57
+ |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|
58
+ | 3.8667 | 0.2 | 100 | 2.5990 | 0.4450 | 0.3695 | 0.4151 | 0.4154 |
59
+ | 3.2326 | 0.39 | 200 | 2.5107 | 0.5023 | 0.4156 | 0.4567 | 0.4571 |
60
+ | 3.1169 | 0.59 | 300 | 2.4503 | 0.5092 | 0.4204 | 0.4775 | 0.4762 |
61
+ | 2.9083 | 0.79 | 400 | 2.4005 | 0.5053 | 0.4179 | 0.4699 | 0.4709 |
62
+ | 2.9652 | 0.98 | 500 | 2.3218 | 0.4833 | 0.4003 | 0.4528 | 0.4555 |
63
+ | 2.8848 | 1.18 | 600 | 2.3262 | 0.5309 | 0.4415 | 0.4868 | 0.4879 |
64
+ | 2.6585 | 1.37 | 700 | 2.3118 | 0.5168 | 0.4273 | 0.4780 | 0.4773 |
65
+ | 2.6662 | 1.57 | 800 | 2.2823 | 0.5112 | 0.4233 | 0.4713 | 0.4727 |
66
+ | 2.7628 | 1.77 | 900 | 2.2381 | 0.5158 | 0.4269 | 0.4798 | 0.4798 |
67
+ | 2.7156 | 1.96 | 1000 | 2.2466 | 0.5280 | 0.4452 | 0.4836 | 0.4844 |
68
+ | 2.5683 | 2.16 | 1100 | 2.2495 | 0.5184 | 0.4300 | 0.4779 | 0.4773 |
69
+ | 2.5248 | 2.36 | 1200 | 2.2498 | 0.5179 | 0.4282 | 0.4790 | 0.4803 |
70
+ | 2.5809 | 2.55 | 1300 | 2.2336 | 0.5233 | 0.4385 | 0.4895 | 0.4920 |
71
+ | 2.7113 | 2.75 | 1400 | 2.2368 | 0.5079 | 0.4207 | 0.4707 | 0.4716 |
72
+ | 2.6151 | 2.95 | 1500 | 2.1993 | 0.5108 | 0.4236 | 0.4681 | 0.4679 |
73
+ | 2.5172 | 3.14 | 1600 | 2.2197 | 0.5138 | 0.4257 | 0.4778 | 0.4781 |
74
+ | 2.5873 | 3.34 | 1700 | 2.1900 | 0.5185 | 0.4312 | 0.4823 | 0.4821 |
75
+ | 2.4245 | 3.53 | 1800 | 2.1982 | 0.5222 | 0.4332 | 0.4837 | 0.4853 |
76
+ | 2.4983 | 3.73 | 1900 | 2.1756 | 0.5125 | 0.4247 | 0.4809 | 0.4810 |
77
+ | 2.3963 | 3.93 | 2000 | 2.1900 | 0.5259 | 0.4400 | 0.4870 | 0.4884 |
78
+ | 2.3465 | 4.12 | 2100 | 2.1963 | 0.5300 | 0.4412 | 0.4900 | 0.4915 |
79
+ | 2.4625 | 4.32 | 2200 | 2.1818 | 0.5277 | 0.4384 | 0.4868 | 0.4882 |
80
+ | 2.4257 | 4.52 | 2300 | 2.1504 | 0.5212 | 0.4342 | 0.4833 | 0.4842 |
81
+ | 2.368 | 4.71 | 2400 | 2.1463 | 0.5252 | 0.4418 | 0.4856 | 0.4869 |
82
+ | 2.427 | 4.91 | 2500 | 2.1581 | 0.5161 | 0.4267 | 0.4766 | 0.4771 |
83
+ | 2.3443 | 5.11 | 2600 | 2.1551 | 0.5167 | 0.4281 | 0.4794 | 0.4794 |
84
+ | 2.2923 | 5.3 | 2700 | 2.1596 | 0.5183 | 0.4255 | 0.4668 | 0.4686 |
85
+ | 2.2956 | 5.5 | 2800 | 2.1438 | 0.5125 | 0.4268 | 0.4747 | 0.4754 |
86
+ | 2.2973 | 5.69 | 2900 | 2.1523 | 0.5139 | 0.4259 | 0.4712 | 0.4722 |
87
+ | 2.3013 | 5.89 | 3000 | 2.1514 | 0.5138 | 0.4236 | 0.4741 | 0.4742 |
88
+ | 2.2222 | 6.09 | 3100 | 2.1558 | 0.5172 | 0.4300 | 0.4773 | 0.4784 |
89
+ | 2.3957 | 6.28 | 3200 | 2.1451 | 0.5203 | 0.4326 | 0.4815 | 0.4817 |
90
+ | 2.1995 | 6.48 | 3300 | 2.1476 | 0.5146 | 0.4264 | 0.4747 | 0.4752 |
91
+ | 2.2931 | 6.68 | 3400 | 2.1252 | 0.5120 | 0.4252 | 0.4683 | 0.4683 |
92
+ | 2.3062 | 6.87 | 3500 | 2.1313 | 0.5197 | 0.4339 | 0.4803 | 0.4807 |
93
+ | 2.2844 | 7.07 | 3600 | 2.1281 | 0.5197 | 0.4339 | 0.4868 | 0.4876 |
94
+ | 2.1158 | 7.27 | 3700 | 2.1438 | 0.5208 | 0.4333 | 0.4818 | 0.4823 |
95
+ | 2.2523 | 7.46 | 3800 | 2.1221 | 0.5197 | 0.4324 | 0.4783 | 0.4788 |
96
+ | 2.2389 | 7.66 | 3900 | 2.1336 | 0.5144 | 0.4262 | 0.4769 | 0.4771 |
97
+ | 2.2209 | 7.85 | 4000 | 2.1317 | 0.5211 | 0.4338 | 0.4813 | 0.4819 |
98
+ | 2.1828 | 8.05 | 4100 | 2.1366 | 0.5208 | 0.4336 | 0.4814 | 0.4816 |
99
+ | 2.2746 | 8.25 | 4200 | 2.1325 | 0.5219 | 0.4342 | 0.4819 | 0.4823 |
100
+ | 2.229 | 8.44 | 4300 | 2.1334 | 0.5214 | 0.4329 | 0.4809 | 0.4812 |
101
+ | 2.2762 | 8.64 | 4400 | 2.1223 | 0.5161 | 0.4288 | 0.4761 | 0.4769 |
102
+ | 2.2005 | 8.84 | 4500 | 2.1322 | 0.5197 | 0.4320 | 0.4793 | 0.4799 |
103
+ | 2.1975 | 9.03 | 4600 | 2.1294 | 0.5211 | 0.4338 | 0.4813 | 0.4819 |
104
+ | 2.3219 | 9.23 | 4700 | 2.1251 | 0.5148 | 0.4260 | 0.4768 | 0.4772 |
105
+ | 2.252 | 9.43 | 4800 | 2.1261 | 0.5211 | 0.4338 | 0.4813 | 0.4819 |
106
+ | 2.2594 | 9.62 | 4900 | 2.1236 | 0.5200 | 0.4331 | 0.4808 | 0.4814 |
107
+ | 2.1675 | 9.82 | 5000 | 2.1257 | 0.5211 | 0.4338 | 0.4813 | 0.4819 |
108
+
109
+
110
+ ### Framework versions
111
+
112
+ - Transformers 4.35.0
113
+ - Pytorch 2.1.0+cu118
114
+ - Datasets 2.14.6
115
+ - Tokenizers 0.14.1
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/mt5-small",
3
+ "architectures": [
4
+ "MT5ForConditionalGeneration"
5
+ ],
6
+ "classifier_dropout": 0.0,
7
+ "d_ff": 1024,
8
+ "d_kv": 64,
9
+ "d_model": 512,
10
+ "decoder_start_token_id": 0,
11
+ "dense_act_fn": "gelu_new",
12
+ "dropout_rate": 0.1,
13
+ "eos_token_id": 1,
14
+ "feed_forward_proj": "gated-gelu",
15
+ "initializer_factor": 1.0,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": true,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "length_penalty": 0.6,
20
+ "max_length": 128,
21
+ "model_type": "mt5",
22
+ "no_repeat_ngram_size": 2,
23
+ "num_beams": 15,
24
+ "num_decoder_layers": 8,
25
+ "num_heads": 6,
26
+ "num_layers": 8,
27
+ "pad_token_id": 0,
28
+ "relative_attention_max_distance": 128,
29
+ "relative_attention_num_buckets": 32,
30
+ "tie_word_embeddings": false,
31
+ "tokenizer_class": "T5Tokenizer",
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.35.0",
34
+ "use_cache": true,
35
+ "vocab_size": 250112
36
+ }
generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 0,
3
+ "eos_token_id": 1,
4
+ "length_penalty": 0.6,
5
+ "max_length": 128,
6
+ "no_repeat_ngram_size": 2,
7
+ "num_beams": 15,
8
+ "pad_token_id": 0,
9
+ "transformers_version": "4.35.0"
10
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dee95b3342f482c16e2999470a9d4e8fe8eadc637db5f2b5c541fb63bfc7c62
3
+ size 1200729512
runs/Nov04_11-55-28_011e4f6cabd7/events.out.tfevents.1699098928.011e4f6cabd7.199.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58a6600e62e9b35e5069037081e200f78b7d294abc859437a9962bcd152a09b5
3
+ size 108508
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": {
3
+ "content": "</s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "pad_token": {
10
+ "content": "<pad>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
3
+ size 4309802
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:312eb8d5be03ad38b677f813ef3a3d0d384568a2171251925d9db4a759f05ef4
3
+ size 16330397
tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<pad>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "</s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<unk>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [],
29
+ "clean_up_tokenization_spaces": true,
30
+ "eos_token": "</s>",
31
+ "extra_ids": 0,
32
+ "legacy": true,
33
+ "model_max_length": 1000000000000000019884624838656,
34
+ "pad_token": "<pad>",
35
+ "sp_model_kwargs": {},
36
+ "tokenizer_class": "T5Tokenizer",
37
+ "unk_token": "<unk>"
38
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:482181ec6f38685c677665b5d5e22b4e50c86754b540811187389b269d87e3c2
3
+ size 4664