Add files using upload-large-folder tool
Browse files- plbart_fmft_official_1e-05/checkpoint-103026/config.json +34 -0
- plbart_fmft_official_1e-05/checkpoint-103026/generation_config.json +9 -0
- plbart_fmft_official_1e-05/checkpoint-103026/model.safetensors +3 -0
- plbart_fmft_official_1e-05/checkpoint-103026/optimizer.pt +3 -0
- plbart_fmft_official_1e-05/checkpoint-103026/rng_state.pth +3 -0
- plbart_fmft_official_1e-05/checkpoint-103026/scheduler.pt +3 -0
- plbart_fmft_official_1e-05/checkpoint-103026/trainer_state.json +0 -0
- plbart_fmft_official_1e-05/checkpoint-103026/training_args.bin +3 -0
- plbart_fmft_official_1e-05/checkpoint-117744/config.json +34 -0
- plbart_fmft_official_1e-05/checkpoint-117744/generation_config.json +9 -0
- plbart_fmft_official_1e-05/checkpoint-117744/model.safetensors +3 -0
- plbart_fmft_official_1e-05/checkpoint-117744/optimizer.pt +3 -0
- plbart_fmft_official_1e-05/checkpoint-117744/rng_state.pth +3 -0
- plbart_fmft_official_1e-05/checkpoint-117744/scheduler.pt +3 -0
- plbart_fmft_official_1e-05/checkpoint-117744/trainer_state.json +0 -0
- plbart_fmft_official_1e-05/checkpoint-117744/training_args.bin +3 -0
- plbart_fmft_official_1e-05/complete_results.json +0 -0
- plbart_fmft_official_1e-05/config.json +34 -0
- plbart_fmft_official_1e-05/generation_config.json +9 -0
- plbart_fmft_official_1e-05/model.safetensors +3 -0
- plbart_fmft_official_1e-05/sentencepiece.bpe.model +3 -0
- plbart_fmft_official_1e-05/special_tokens_map.json +20 -0
- plbart_fmft_official_1e-05/tokenizer_config.json +88 -0
- plbart_fmft_official_1e-05/training_args.txt +145 -0
plbart_fmft_official_1e-05/checkpoint-103026/config.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "uclanlp/plbart-base",
|
| 3 |
+
"activation_dropout": 0.0,
|
| 4 |
+
"activation_function": "gelu",
|
| 5 |
+
"architectures": [
|
| 6 |
+
"PLBartForConditionalGeneration"
|
| 7 |
+
],
|
| 8 |
+
"attention_dropout": 0.1,
|
| 9 |
+
"bos_token_id": 0,
|
| 10 |
+
"classifier_dropout": 0.0,
|
| 11 |
+
"d_model": 768,
|
| 12 |
+
"decoder_attention_heads": 12,
|
| 13 |
+
"decoder_ffn_dim": 3072,
|
| 14 |
+
"decoder_layerdrop": 0.0,
|
| 15 |
+
"decoder_layers": 6,
|
| 16 |
+
"dropout": 0.1,
|
| 17 |
+
"encoder_attention_heads": 12,
|
| 18 |
+
"encoder_ffn_dim": 3072,
|
| 19 |
+
"encoder_layerdrop": 0.0,
|
| 20 |
+
"encoder_layers": 6,
|
| 21 |
+
"eos_token_id": 2,
|
| 22 |
+
"forced_eos_token_id": 2,
|
| 23 |
+
"init_std": 0.02,
|
| 24 |
+
"is_encoder_decoder": true,
|
| 25 |
+
"max_position_embeddings": 1024,
|
| 26 |
+
"model_type": "plbart",
|
| 27 |
+
"num_hidden_layers": 6,
|
| 28 |
+
"pad_token_id": 1,
|
| 29 |
+
"scale_embedding": true,
|
| 30 |
+
"torch_dtype": "float32",
|
| 31 |
+
"transformers_version": "4.47.0",
|
| 32 |
+
"use_cache": true,
|
| 33 |
+
"vocab_size": 50005
|
| 34 |
+
}
|
plbart_fmft_official_1e-05/checkpoint-103026/generation_config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"decoder_start_token_id": 0,
|
| 3 |
+
"eos_token_id": 2,
|
| 4 |
+
"max_new_tokens": 256,
|
| 5 |
+
"min_new_tokens": 2,
|
| 6 |
+
"num_beams": 3,
|
| 7 |
+
"pad_token_id": 1,
|
| 8 |
+
"transformers_version": "4.47.0"
|
| 9 |
+
}
|
plbart_fmft_official_1e-05/checkpoint-103026/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:816e43c27b9f8ed8070008a9e23d60c6eb7fc4419f313898135b6ab45b1b40a0
|
| 3 |
+
size 557112860
|
plbart_fmft_official_1e-05/checkpoint-103026/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a39a46f84b6ae50a97c033742fefa46388cdd1a21252e96859b5f9a3d5a8fb6a
|
| 3 |
+
size 1113982859
|
plbart_fmft_official_1e-05/checkpoint-103026/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bad46ce7d5f175ad6a8930587b64ba9c17d4384b051d42bbde858a2fc6389eb7
|
| 3 |
+
size 14645
|
plbart_fmft_official_1e-05/checkpoint-103026/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c337637185fd80cd189c72d3bb64bfd33c12b6458a1e298f92773bd809f4b66e
|
| 3 |
+
size 1465
|
plbart_fmft_official_1e-05/checkpoint-103026/trainer_state.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
plbart_fmft_official_1e-05/checkpoint-103026/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4cbbe535fd7d595a9e873964ff829b32a45a17e830e6972ab43f99df4848122e
|
| 3 |
+
size 8081
|
plbart_fmft_official_1e-05/checkpoint-117744/config.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "uclanlp/plbart-base",
|
| 3 |
+
"activation_dropout": 0.0,
|
| 4 |
+
"activation_function": "gelu",
|
| 5 |
+
"architectures": [
|
| 6 |
+
"PLBartForConditionalGeneration"
|
| 7 |
+
],
|
| 8 |
+
"attention_dropout": 0.1,
|
| 9 |
+
"bos_token_id": 0,
|
| 10 |
+
"classifier_dropout": 0.0,
|
| 11 |
+
"d_model": 768,
|
| 12 |
+
"decoder_attention_heads": 12,
|
| 13 |
+
"decoder_ffn_dim": 3072,
|
| 14 |
+
"decoder_layerdrop": 0.0,
|
| 15 |
+
"decoder_layers": 6,
|
| 16 |
+
"dropout": 0.1,
|
| 17 |
+
"encoder_attention_heads": 12,
|
| 18 |
+
"encoder_ffn_dim": 3072,
|
| 19 |
+
"encoder_layerdrop": 0.0,
|
| 20 |
+
"encoder_layers": 6,
|
| 21 |
+
"eos_token_id": 2,
|
| 22 |
+
"forced_eos_token_id": 2,
|
| 23 |
+
"init_std": 0.02,
|
| 24 |
+
"is_encoder_decoder": true,
|
| 25 |
+
"max_position_embeddings": 1024,
|
| 26 |
+
"model_type": "plbart",
|
| 27 |
+
"num_hidden_layers": 6,
|
| 28 |
+
"pad_token_id": 1,
|
| 29 |
+
"scale_embedding": true,
|
| 30 |
+
"torch_dtype": "float32",
|
| 31 |
+
"transformers_version": "4.47.0",
|
| 32 |
+
"use_cache": true,
|
| 33 |
+
"vocab_size": 50005
|
| 34 |
+
}
|
plbart_fmft_official_1e-05/checkpoint-117744/generation_config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"decoder_start_token_id": 0,
|
| 3 |
+
"eos_token_id": 2,
|
| 4 |
+
"max_new_tokens": 256,
|
| 5 |
+
"min_new_tokens": 2,
|
| 6 |
+
"num_beams": 3,
|
| 7 |
+
"pad_token_id": 1,
|
| 8 |
+
"transformers_version": "4.47.0"
|
| 9 |
+
}
|
plbart_fmft_official_1e-05/checkpoint-117744/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fd77ec5e5927764b9bc5ebad074bdf6b2fe3647157b1e0c3c9bc76e35d55cdda
|
| 3 |
+
size 557112860
|
plbart_fmft_official_1e-05/checkpoint-117744/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8f66940e600e6c65a9cdd8ee3d4519e3a0b9b0ef2ae8c9f19d1fb680ec727501
|
| 3 |
+
size 1113982859
|
plbart_fmft_official_1e-05/checkpoint-117744/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f4a5951e001a9359f7e983b3b7ce52ce35cb24fbab56c5789a9b40f0b2895ee1
|
| 3 |
+
size 14645
|
plbart_fmft_official_1e-05/checkpoint-117744/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5a07404cd44d75d59cb3734047d5b90b137885eff7dda0584677aed1e8f3584a
|
| 3 |
+
size 1465
|
plbart_fmft_official_1e-05/checkpoint-117744/trainer_state.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
plbart_fmft_official_1e-05/checkpoint-117744/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4cbbe535fd7d595a9e873964ff829b32a45a17e830e6972ab43f99df4848122e
|
| 3 |
+
size 8081
|
plbart_fmft_official_1e-05/complete_results.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
plbart_fmft_official_1e-05/config.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "uclanlp/plbart-base",
|
| 3 |
+
"activation_dropout": 0.0,
|
| 4 |
+
"activation_function": "gelu",
|
| 5 |
+
"architectures": [
|
| 6 |
+
"PLBartForConditionalGeneration"
|
| 7 |
+
],
|
| 8 |
+
"attention_dropout": 0.1,
|
| 9 |
+
"bos_token_id": 0,
|
| 10 |
+
"classifier_dropout": 0.0,
|
| 11 |
+
"d_model": 768,
|
| 12 |
+
"decoder_attention_heads": 12,
|
| 13 |
+
"decoder_ffn_dim": 3072,
|
| 14 |
+
"decoder_layerdrop": 0.0,
|
| 15 |
+
"decoder_layers": 6,
|
| 16 |
+
"dropout": 0.1,
|
| 17 |
+
"encoder_attention_heads": 12,
|
| 18 |
+
"encoder_ffn_dim": 3072,
|
| 19 |
+
"encoder_layerdrop": 0.0,
|
| 20 |
+
"encoder_layers": 6,
|
| 21 |
+
"eos_token_id": 2,
|
| 22 |
+
"forced_eos_token_id": 2,
|
| 23 |
+
"init_std": 0.02,
|
| 24 |
+
"is_encoder_decoder": true,
|
| 25 |
+
"max_position_embeddings": 1024,
|
| 26 |
+
"model_type": "plbart",
|
| 27 |
+
"num_hidden_layers": 6,
|
| 28 |
+
"pad_token_id": 1,
|
| 29 |
+
"scale_embedding": true,
|
| 30 |
+
"torch_dtype": "float32",
|
| 31 |
+
"transformers_version": "4.47.0",
|
| 32 |
+
"use_cache": true,
|
| 33 |
+
"vocab_size": 50005
|
| 34 |
+
}
|
plbart_fmft_official_1e-05/generation_config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"decoder_start_token_id": 0,
|
| 3 |
+
"eos_token_id": 2,
|
| 4 |
+
"max_new_tokens": 256,
|
| 5 |
+
"min_new_tokens": 2,
|
| 6 |
+
"num_beams": 3,
|
| 7 |
+
"pad_token_id": 1,
|
| 8 |
+
"transformers_version": "4.47.0"
|
| 9 |
+
}
|
plbart_fmft_official_1e-05/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:816e43c27b9f8ed8070008a9e23d60c6eb7fc4419f313898135b6ab45b1b40a0
|
| 3 |
+
size 557112860
|
plbart_fmft_official_1e-05/sentencepiece.bpe.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f72f5d040a176945623a255484d24066f8c0da89a294359154e226efbe494b80
|
| 3 |
+
size 985833
|
plbart_fmft_official_1e-05/special_tokens_map.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"__java__",
|
| 4 |
+
"__python__",
|
| 5 |
+
"__en_XX__"
|
| 6 |
+
],
|
| 7 |
+
"bos_token": "<s>",
|
| 8 |
+
"cls_token": "<s>",
|
| 9 |
+
"eos_token": "</s>",
|
| 10 |
+
"mask_token": {
|
| 11 |
+
"content": "<mask>",
|
| 12 |
+
"lstrip": true,
|
| 13 |
+
"normalized": true,
|
| 14 |
+
"rstrip": false,
|
| 15 |
+
"single_word": false
|
| 16 |
+
},
|
| 17 |
+
"pad_token": "<pad>",
|
| 18 |
+
"sep_token": "</s>",
|
| 19 |
+
"unk_token": "<unk>"
|
| 20 |
+
}
|
plbart_fmft_official_1e-05/tokenizer_config.json
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "<s>",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"1": {
|
| 12 |
+
"content": "<pad>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"2": {
|
| 20 |
+
"content": "</s>",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"3": {
|
| 28 |
+
"content": "<unk>",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"50001": {
|
| 36 |
+
"content": "__java__",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
},
|
| 43 |
+
"50002": {
|
| 44 |
+
"content": "__python__",
|
| 45 |
+
"lstrip": false,
|
| 46 |
+
"normalized": false,
|
| 47 |
+
"rstrip": false,
|
| 48 |
+
"single_word": false,
|
| 49 |
+
"special": true
|
| 50 |
+
},
|
| 51 |
+
"50003": {
|
| 52 |
+
"content": "__en_XX__",
|
| 53 |
+
"lstrip": false,
|
| 54 |
+
"normalized": false,
|
| 55 |
+
"rstrip": false,
|
| 56 |
+
"single_word": false,
|
| 57 |
+
"special": true
|
| 58 |
+
},
|
| 59 |
+
"50004": {
|
| 60 |
+
"content": "<mask>",
|
| 61 |
+
"lstrip": true,
|
| 62 |
+
"normalized": true,
|
| 63 |
+
"rstrip": false,
|
| 64 |
+
"single_word": false,
|
| 65 |
+
"special": true
|
| 66 |
+
}
|
| 67 |
+
},
|
| 68 |
+
"additional_special_tokens": [
|
| 69 |
+
"__java__",
|
| 70 |
+
"__python__",
|
| 71 |
+
"__en_XX__"
|
| 72 |
+
],
|
| 73 |
+
"bos_token": "<s>",
|
| 74 |
+
"clean_up_tokenization_spaces": true,
|
| 75 |
+
"cls_token": "<s>",
|
| 76 |
+
"eos_token": "</s>",
|
| 77 |
+
"extra_special_tokens": {},
|
| 78 |
+
"language_codes": "base",
|
| 79 |
+
"mask_token": "<mask>",
|
| 80 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 81 |
+
"pad_token": "<pad>",
|
| 82 |
+
"sep_token": "</s>",
|
| 83 |
+
"sp_model_kwargs": {},
|
| 84 |
+
"src_lang": null,
|
| 85 |
+
"tgt_lang": null,
|
| 86 |
+
"tokenizer_class": "PLBartTokenizer",
|
| 87 |
+
"unk_token": "<unk>"
|
| 88 |
+
}
|
plbart_fmft_official_1e-05/training_args.txt
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Seq2SeqTrainingArguments(
|
| 2 |
+
_n_gpu=1,
|
| 3 |
+
accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
|
| 4 |
+
adafactor=False,
|
| 5 |
+
adam_beta1=0.9,
|
| 6 |
+
adam_beta2=0.999,
|
| 7 |
+
adam_epsilon=1e-08,
|
| 8 |
+
auto_find_batch_size=False,
|
| 9 |
+
average_tokens_across_devices=False,
|
| 10 |
+
batch_eval_metrics=False,
|
| 11 |
+
bf16=False,
|
| 12 |
+
bf16_full_eval=False,
|
| 13 |
+
data_seed=None,
|
| 14 |
+
dataloader_drop_last=False,
|
| 15 |
+
dataloader_num_workers=0,
|
| 16 |
+
dataloader_persistent_workers=False,
|
| 17 |
+
dataloader_pin_memory=True,
|
| 18 |
+
dataloader_prefetch_factor=None,
|
| 19 |
+
ddp_backend=None,
|
| 20 |
+
ddp_broadcast_buffers=None,
|
| 21 |
+
ddp_bucket_cap_mb=None,
|
| 22 |
+
ddp_find_unused_parameters=None,
|
| 23 |
+
ddp_timeout=1800,
|
| 24 |
+
debug=[],
|
| 25 |
+
deepspeed=None,
|
| 26 |
+
disable_tqdm=False,
|
| 27 |
+
dispatch_batches=None,
|
| 28 |
+
do_eval=True,
|
| 29 |
+
do_predict=False,
|
| 30 |
+
do_train=False,
|
| 31 |
+
eval_accumulation_steps=None,
|
| 32 |
+
eval_delay=0,
|
| 33 |
+
eval_do_concat_batches=True,
|
| 34 |
+
eval_on_start=False,
|
| 35 |
+
eval_steps=None,
|
| 36 |
+
eval_strategy=IntervalStrategy.EPOCH,
|
| 37 |
+
eval_use_gather_object=False,
|
| 38 |
+
evaluation_strategy=None,
|
| 39 |
+
fp16=True,
|
| 40 |
+
fp16_backend=auto,
|
| 41 |
+
fp16_full_eval=False,
|
| 42 |
+
fp16_opt_level=O1,
|
| 43 |
+
fsdp=[],
|
| 44 |
+
fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
|
| 45 |
+
fsdp_min_num_params=0,
|
| 46 |
+
fsdp_transformer_layer_cls_to_wrap=None,
|
| 47 |
+
full_determinism=False,
|
| 48 |
+
generation_config=GenerationConfig {
|
| 49 |
+
"decoder_start_token_id": 0,
|
| 50 |
+
"eos_token_id": 2,
|
| 51 |
+
"max_new_tokens": 256,
|
| 52 |
+
"min_new_tokens": 2,
|
| 53 |
+
"num_beams": 3,
|
| 54 |
+
"pad_token_id": 1
|
| 55 |
+
}
|
| 56 |
+
,
|
| 57 |
+
generation_max_length=None,
|
| 58 |
+
generation_num_beams=None,
|
| 59 |
+
gradient_accumulation_steps=1,
|
| 60 |
+
gradient_checkpointing=False,
|
| 61 |
+
gradient_checkpointing_kwargs=None,
|
| 62 |
+
greater_is_better=True,
|
| 63 |
+
group_by_length=False,
|
| 64 |
+
half_precision_backend=auto,
|
| 65 |
+
hub_always_push=False,
|
| 66 |
+
hub_model_id=None,
|
| 67 |
+
hub_private_repo=None,
|
| 68 |
+
hub_strategy=HubStrategy.EVERY_SAVE,
|
| 69 |
+
hub_token=<HUB_TOKEN>,
|
| 70 |
+
ignore_data_skip=False,
|
| 71 |
+
include_for_metrics=[],
|
| 72 |
+
include_inputs_for_metrics=False,
|
| 73 |
+
include_num_input_tokens_seen=False,
|
| 74 |
+
include_tokens_per_second=False,
|
| 75 |
+
jit_mode_eval=False,
|
| 76 |
+
label_names=None,
|
| 77 |
+
label_smoothing_factor=0.0,
|
| 78 |
+
learning_rate=1e-05,
|
| 79 |
+
length_column_name=length,
|
| 80 |
+
load_best_model_at_end=True,
|
| 81 |
+
local_rank=0,
|
| 82 |
+
log_level=passive,
|
| 83 |
+
log_level_replica=warning,
|
| 84 |
+
log_on_each_node=True,
|
| 85 |
+
logging_dir=./results-cc/plbart/plbart_fmft_official_1e-05/logs,
|
| 86 |
+
logging_first_step=False,
|
| 87 |
+
logging_nan_inf_filter=True,
|
| 88 |
+
logging_steps=5,
|
| 89 |
+
logging_strategy=IntervalStrategy.STEPS,
|
| 90 |
+
lr_scheduler_kwargs={},
|
| 91 |
+
lr_scheduler_type=SchedulerType.LINEAR,
|
| 92 |
+
max_grad_norm=1.0,
|
| 93 |
+
max_steps=-1,
|
| 94 |
+
metric_for_best_model=eval_bleu_4,
|
| 95 |
+
mp_parameters=,
|
| 96 |
+
neftune_noise_alpha=None,
|
| 97 |
+
no_cuda=False,
|
| 98 |
+
num_train_epochs=8,
|
| 99 |
+
optim=OptimizerNames.ADAMW_TORCH,
|
| 100 |
+
optim_args=None,
|
| 101 |
+
optim_target_modules=None,
|
| 102 |
+
output_dir=./results-cc/plbart/plbart_fmft_official_1e-05,
|
| 103 |
+
overwrite_output_dir=False,
|
| 104 |
+
past_index=-1,
|
| 105 |
+
per_device_eval_batch_size=8,
|
| 106 |
+
per_device_train_batch_size=8,
|
| 107 |
+
predict_with_generate=True,
|
| 108 |
+
prediction_loss_only=False,
|
| 109 |
+
push_to_hub=False,
|
| 110 |
+
push_to_hub_model_id=None,
|
| 111 |
+
push_to_hub_organization=None,
|
| 112 |
+
push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
|
| 113 |
+
ray_scope=last,
|
| 114 |
+
remove_unused_columns=True,
|
| 115 |
+
report_to=['wandb'],
|
| 116 |
+
restore_callback_states_from_checkpoint=False,
|
| 117 |
+
resume_from_checkpoint=None,
|
| 118 |
+
run_name=plbart_fmft_official_1e-05,
|
| 119 |
+
save_on_each_node=False,
|
| 120 |
+
save_only_model=False,
|
| 121 |
+
save_safetensors=True,
|
| 122 |
+
save_steps=500,
|
| 123 |
+
save_strategy=SaveStrategy.EPOCH,
|
| 124 |
+
save_total_limit=2,
|
| 125 |
+
seed=42,
|
| 126 |
+
skip_memory_metrics=True,
|
| 127 |
+
sortish_sampler=False,
|
| 128 |
+
split_batches=None,
|
| 129 |
+
tf32=None,
|
| 130 |
+
torch_compile=False,
|
| 131 |
+
torch_compile_backend=None,
|
| 132 |
+
torch_compile_mode=None,
|
| 133 |
+
torch_empty_cache_steps=None,
|
| 134 |
+
torchdynamo=None,
|
| 135 |
+
tpu_metrics_debug=False,
|
| 136 |
+
tpu_num_cores=None,
|
| 137 |
+
use_cpu=False,
|
| 138 |
+
use_ipex=False,
|
| 139 |
+
use_legacy_prediction_loop=False,
|
| 140 |
+
use_liger_kernel=False,
|
| 141 |
+
use_mps_device=False,
|
| 142 |
+
warmup_ratio=0.0,
|
| 143 |
+
warmup_steps=0,
|
| 144 |
+
weight_decay=0.01,
|
| 145 |
+
)
|