cotysong113 commited on
Commit
836f202
·
verified ·
1 Parent(s): b4a15ca

End of training

Browse files
README.md CHANGED
@@ -18,12 +18,12 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [google-t5/t5-small](https://huggingface.co/google-t5/t5-small) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 2.4819
22
- - Rouge1: 0.1435
23
- - Rouge2: 0.0508
24
- - Rougel: 0.1179
25
- - Rougelsum: 0.1179
26
- - Gen Len: 19.0
27
 
28
  ## Model description
29
 
@@ -49,21 +49,20 @@ The following hyperparameters were used during training:
49
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
50
  - lr_scheduler_type: linear
51
  - num_epochs: 4
52
- - mixed_precision_training: Native AMP
53
 
54
  ### Training results
55
 
56
  | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
57
  |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:|
58
- | No log | 1.0 | 62 | 2.7787 | 0.125 | 0.0331 | 0.1038 | 0.1038 | 19.0 |
59
- | No log | 2.0 | 124 | 2.5635 | 0.1388 | 0.0491 | 0.1151 | 0.115 | 19.0 |
60
- | No log | 3.0 | 186 | 2.4988 | 0.1432 | 0.0501 | 0.1167 | 0.1168 | 19.0 |
61
- | No log | 4.0 | 248 | 2.4819 | 0.1435 | 0.0508 | 0.1179 | 0.1179 | 19.0 |
62
 
63
 
64
  ### Framework versions
65
 
66
- - Transformers 4.46.2
67
- - Pytorch 2.5.1+cu124
68
  - Datasets 3.1.0
69
- - Tokenizers 0.20.1
 
18
 
19
  This model is a fine-tuned version of [google-t5/t5-small](https://huggingface.co/google-t5/t5-small) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 2.5385
22
+ - Rouge1: 0.141
23
+ - Rouge2: 0.0479
24
+ - Rougel: 0.1172
25
+ - Rougelsum: 0.1172
26
+ - Gen Len: 20.0
27
 
28
  ## Model description
29
 
 
49
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
50
  - lr_scheduler_type: linear
51
  - num_epochs: 4
 
52
 
53
  ### Training results
54
 
55
  | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
56
  |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:|
57
+ | No log | 1.0 | 62 | 2.8323 | 0.1328 | 0.0383 | 0.1112 | 0.111 | 20.0 |
58
+ | No log | 2.0 | 124 | 2.6200 | 0.1408 | 0.0475 | 0.117 | 0.117 | 20.0 |
59
+ | No log | 3.0 | 186 | 2.5557 | 0.1406 | 0.0472 | 0.1161 | 0.1161 | 20.0 |
60
+ | No log | 4.0 | 248 | 2.5385 | 0.141 | 0.0479 | 0.1172 | 0.1172 | 20.0 |
61
 
62
 
63
  ### Framework versions
64
 
65
+ - Transformers 4.47.0
66
+ - Pytorch 2.5.1
67
  - Datasets 3.1.0
68
+ - Tokenizers 0.21.0
config.json CHANGED
@@ -55,7 +55,7 @@
55
  }
56
  },
57
  "torch_dtype": "float32",
58
- "transformers_version": "4.46.2",
59
  "use_cache": true,
60
  "vocab_size": 32128
61
  }
 
55
  }
56
  },
57
  "torch_dtype": "float32",
58
+ "transformers_version": "4.47.0",
59
  "use_cache": true,
60
  "vocab_size": 32128
61
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "decoder_start_token_id": 0,
3
  "eos_token_id": 1,
4
  "pad_token_id": 0,
5
- "transformers_version": "4.46.2"
6
  }
 
2
  "decoder_start_token_id": 0,
3
  "eos_token_id": 1,
4
  "pad_token_id": 0,
5
+ "transformers_version": "4.47.0"
6
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:323a3087e95bec2b817f261652dd376189a0736e2fd6980fa2c71daf9d99956d
3
  size 242041896
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab27ec6f806c818da0dc913f1f5df82dd65ce62705040dd2564554b4a43de2d0
3
  size 242041896
runs/Dec09_14-44-37_bogon/events.out.tfevents.1733726678.bogon.89513.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeeffdbe583643a2f210bf2961490c143fe8acc08b8ba269a4e1e32ebc69782e
3
+ size 8496
tokenizer_config.json CHANGED
@@ -930,6 +930,7 @@
930
  "clean_up_tokenization_spaces": true,
931
  "eos_token": "</s>",
932
  "extra_ids": 100,
 
933
  "model_max_length": 512,
934
  "pad_token": "<pad>",
935
  "tokenizer_class": "T5Tokenizer",
 
930
  "clean_up_tokenization_spaces": true,
931
  "eos_token": "</s>",
932
  "extra_ids": 100,
933
+ "extra_special_tokens": {},
934
  "model_max_length": 512,
935
  "pad_token": "<pad>",
936
  "tokenizer_class": "T5Tokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f9af873a04eea9088dffdb6431163dc98b0bb8e76c77c91e075212cfa6a14cb7
3
- size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd222a68140c324f706901732cb544e673b7ecf7a0e64dacf7f686d54b20b7d0
3
+ size 5496