Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- gpt2-eli5-final-by-Yvens-Yan/merges.txt +0 -0
- gpt2-eli5-final-by-Yvens-Yan/special_tokens_map.json +6 -0
- gpt2-eli5-final-by-Yvens-Yan/tokenizer.json +0 -0
- gpt2-eli5-final-by-Yvens-Yan/tokenizer_config.json +21 -0
- gpt2-eli5-final-by-Yvens-Yan/vocab.json +0 -0
- gpt2-eli5-final-by-Yvens/config.json +46 -0
- gpt2-eli5-final-by-Yvens/generation_config.json +9 -0
- gpt2-eli5-final-by-Yvens/merges.txt +0 -0
- gpt2-eli5-final-by-Yvens/model.safetensors +3 -0
- gpt2-eli5-final-by-Yvens/special_tokens_map.json +6 -0
- gpt2-eli5-final-by-Yvens/tokenizer.json +0 -0
- gpt2-eli5-final-by-Yvens/tokenizer_config.json +21 -0
- gpt2-eli5-final-by-Yvens/training_args.bin +3 -0
- gpt2-eli5-final-by-Yvens/vocab.json +0 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/config.json +46 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/generation_config.json +9 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/merges.txt +0 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/model.safetensors +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/optimizer.pt +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/rng_state.pth +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/scheduler.pt +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/special_tokens_map.json +6 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/tokenizer.json +0 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/tokenizer_config.json +21 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/trainer_state.json +119 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/training_args.bin +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-1116/vocab.json +0 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/config.json +46 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/generation_config.json +9 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/merges.txt +0 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/model.safetensors +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/optimizer.pt +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/rng_state.pth +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/scheduler.pt +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/special_tokens_map.json +6 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/tokenizer.json +0 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/tokenizer_config.json +21 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/trainer_state.json +204 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/training_args.bin +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-2232/vocab.json +0 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-3348/config.json +46 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-3348/generation_config.json +9 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-3348/merges.txt +0 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-3348/model.safetensors +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-3348/optimizer.pt +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-3348/rng_state.pth +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-3348/scheduler.pt +3 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-3348/special_tokens_map.json +6 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-3348/tokenizer.json +0 -0
- gpt2-eli5-finetuned-by-yvens/checkpoint-3348/tokenizer_config.json +21 -0
gpt2-eli5-final-by-Yvens-Yan/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-final-by-Yvens-Yan/special_tokens_map.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<|endoftext|>",
|
| 3 |
+
"eos_token": "<|endoftext|>",
|
| 4 |
+
"pad_token": "<|endoftext|>",
|
| 5 |
+
"unk_token": "<|endoftext|>"
|
| 6 |
+
}
|
gpt2-eli5-final-by-Yvens-Yan/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-final-by-Yvens-Yan/tokenizer_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"50256": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
}
|
| 12 |
+
},
|
| 13 |
+
"bos_token": "<|endoftext|>",
|
| 14 |
+
"clean_up_tokenization_spaces": false,
|
| 15 |
+
"eos_token": "<|endoftext|>",
|
| 16 |
+
"extra_special_tokens": {},
|
| 17 |
+
"model_max_length": 1024,
|
| 18 |
+
"pad_token": "<|endoftext|>",
|
| 19 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 20 |
+
"unk_token": "<|endoftext|>"
|
| 21 |
+
}
|
gpt2-eli5-final-by-Yvens-Yan/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-final-by-Yvens/config.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_num_labels": 1,
|
| 3 |
+
"activation_function": "gelu_new",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"GPT2LMHeadModel"
|
| 6 |
+
],
|
| 7 |
+
"attn_pdrop": 0.1,
|
| 8 |
+
"bos_token_id": 50256,
|
| 9 |
+
"dtype": "float32",
|
| 10 |
+
"embd_pdrop": 0.1,
|
| 11 |
+
"eos_token_id": 50256,
|
| 12 |
+
"id2label": {
|
| 13 |
+
"0": "LABEL_0"
|
| 14 |
+
},
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"label2id": {
|
| 17 |
+
"LABEL_0": 0
|
| 18 |
+
},
|
| 19 |
+
"layer_norm_epsilon": 1e-05,
|
| 20 |
+
"model_type": "gpt2",
|
| 21 |
+
"n_ctx": 1024,
|
| 22 |
+
"n_embd": 768,
|
| 23 |
+
"n_head": 12,
|
| 24 |
+
"n_inner": null,
|
| 25 |
+
"n_layer": 6,
|
| 26 |
+
"n_positions": 1024,
|
| 27 |
+
"pad_token_id": 50256,
|
| 28 |
+
"reorder_and_upcast_attn": false,
|
| 29 |
+
"resid_pdrop": 0.1,
|
| 30 |
+
"scale_attn_by_inverse_layer_idx": false,
|
| 31 |
+
"scale_attn_weights": true,
|
| 32 |
+
"summary_activation": null,
|
| 33 |
+
"summary_first_dropout": 0.1,
|
| 34 |
+
"summary_proj_to_labels": true,
|
| 35 |
+
"summary_type": "cls_index",
|
| 36 |
+
"summary_use_proj": true,
|
| 37 |
+
"task_specific_params": {
|
| 38 |
+
"text-generation": {
|
| 39 |
+
"do_sample": true,
|
| 40 |
+
"max_length": 50
|
| 41 |
+
}
|
| 42 |
+
},
|
| 43 |
+
"transformers_version": "4.57.3",
|
| 44 |
+
"use_cache": true,
|
| 45 |
+
"vocab_size": 50257
|
| 46 |
+
}
|
gpt2-eli5-final-by-Yvens/generation_config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 50256,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
50256
|
| 6 |
+
],
|
| 7 |
+
"pad_token_id": 50256,
|
| 8 |
+
"transformers_version": "4.57.3"
|
| 9 |
+
}
|
gpt2-eli5-final-by-Yvens/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-final-by-Yvens/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:17bbe70ac1e467013a740686bcd5cb1576ce2c70134f3adfcf535fdfcad273d4
|
| 3 |
+
size 327657928
|
gpt2-eli5-final-by-Yvens/special_tokens_map.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<|endoftext|>",
|
| 3 |
+
"eos_token": "<|endoftext|>",
|
| 4 |
+
"pad_token": "<|endoftext|>",
|
| 5 |
+
"unk_token": "<|endoftext|>"
|
| 6 |
+
}
|
gpt2-eli5-final-by-Yvens/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-final-by-Yvens/tokenizer_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"50256": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
}
|
| 12 |
+
},
|
| 13 |
+
"bos_token": "<|endoftext|>",
|
| 14 |
+
"clean_up_tokenization_spaces": false,
|
| 15 |
+
"eos_token": "<|endoftext|>",
|
| 16 |
+
"extra_special_tokens": {},
|
| 17 |
+
"model_max_length": 1024,
|
| 18 |
+
"pad_token": "<|endoftext|>",
|
| 19 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 20 |
+
"unk_token": "<|endoftext|>"
|
| 21 |
+
}
|
gpt2-eli5-final-by-Yvens/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df40388a7fc006cad21a16f1e843b1a6de7a55741fabf5cd70900fe1a2186c2f
|
| 3 |
+
size 5841
|
gpt2-eli5-final-by-Yvens/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/config.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_num_labels": 1,
|
| 3 |
+
"activation_function": "gelu_new",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"GPT2LMHeadModel"
|
| 6 |
+
],
|
| 7 |
+
"attn_pdrop": 0.1,
|
| 8 |
+
"bos_token_id": 50256,
|
| 9 |
+
"dtype": "float32",
|
| 10 |
+
"embd_pdrop": 0.1,
|
| 11 |
+
"eos_token_id": 50256,
|
| 12 |
+
"id2label": {
|
| 13 |
+
"0": "LABEL_0"
|
| 14 |
+
},
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"label2id": {
|
| 17 |
+
"LABEL_0": 0
|
| 18 |
+
},
|
| 19 |
+
"layer_norm_epsilon": 1e-05,
|
| 20 |
+
"model_type": "gpt2",
|
| 21 |
+
"n_ctx": 1024,
|
| 22 |
+
"n_embd": 768,
|
| 23 |
+
"n_head": 12,
|
| 24 |
+
"n_inner": null,
|
| 25 |
+
"n_layer": 6,
|
| 26 |
+
"n_positions": 1024,
|
| 27 |
+
"pad_token_id": 50256,
|
| 28 |
+
"reorder_and_upcast_attn": false,
|
| 29 |
+
"resid_pdrop": 0.1,
|
| 30 |
+
"scale_attn_by_inverse_layer_idx": false,
|
| 31 |
+
"scale_attn_weights": true,
|
| 32 |
+
"summary_activation": null,
|
| 33 |
+
"summary_first_dropout": 0.1,
|
| 34 |
+
"summary_proj_to_labels": true,
|
| 35 |
+
"summary_type": "cls_index",
|
| 36 |
+
"summary_use_proj": true,
|
| 37 |
+
"task_specific_params": {
|
| 38 |
+
"text-generation": {
|
| 39 |
+
"do_sample": true,
|
| 40 |
+
"max_length": 50
|
| 41 |
+
}
|
| 42 |
+
},
|
| 43 |
+
"transformers_version": "4.57.3",
|
| 44 |
+
"use_cache": true,
|
| 45 |
+
"vocab_size": 50257
|
| 46 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/generation_config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 50256,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
50256
|
| 6 |
+
],
|
| 7 |
+
"pad_token_id": 50256,
|
| 8 |
+
"transformers_version": "4.57.3"
|
| 9 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d941b8217e715b4cf88747b8c5061c2bd45c770a50ba810876fe14ccb868f6b6
|
| 3 |
+
size 327657928
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d2368a2a27ec60ddb1fe8d4bd1957459629e208ada4b4720201f9aefb6fc776
|
| 3 |
+
size 655362763
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:842c78c5af0fe75a5fee647b2a44387e9e616631df773da0340a779b06e603d8
|
| 3 |
+
size 14455
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1cecd9662819a2b5eca8a0133c1afeee7b3bb3d98a0cffffb7a24d299094a8ac
|
| 3 |
+
size 1465
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/special_tokens_map.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<|endoftext|>",
|
| 3 |
+
"eos_token": "<|endoftext|>",
|
| 4 |
+
"pad_token": "<|endoftext|>",
|
| 5 |
+
"unk_token": "<|endoftext|>"
|
| 6 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/tokenizer_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"50256": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
}
|
| 12 |
+
},
|
| 13 |
+
"bos_token": "<|endoftext|>",
|
| 14 |
+
"clean_up_tokenization_spaces": false,
|
| 15 |
+
"eos_token": "<|endoftext|>",
|
| 16 |
+
"extra_special_tokens": {},
|
| 17 |
+
"model_max_length": 1024,
|
| 18 |
+
"pad_token": "<|endoftext|>",
|
| 19 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 20 |
+
"unk_token": "<|endoftext|>"
|
| 21 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/trainer_state.json
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": 1116,
|
| 3 |
+
"best_metric": 3.806886911392212,
|
| 4 |
+
"best_model_checkpoint": "./gpt2-eli5-finetuned-by-yvens/checkpoint-1116",
|
| 5 |
+
"epoch": 1.0,
|
| 6 |
+
"eval_steps": 500,
|
| 7 |
+
"global_step": 1116,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 0.08960573476702509,
|
| 14 |
+
"grad_norm": 5.001793384552002,
|
| 15 |
+
"learning_rate": 3.96e-06,
|
| 16 |
+
"loss": 4.0385,
|
| 17 |
+
"step": 100
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"epoch": 0.17921146953405018,
|
| 21 |
+
"grad_norm": 4.920190811157227,
|
| 22 |
+
"learning_rate": 7.960000000000002e-06,
|
| 23 |
+
"loss": 4.0278,
|
| 24 |
+
"step": 200
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"epoch": 0.26881720430107525,
|
| 28 |
+
"grad_norm": 5.068778991699219,
|
| 29 |
+
"learning_rate": 1.196e-05,
|
| 30 |
+
"loss": 3.9832,
|
| 31 |
+
"step": 300
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"epoch": 0.35842293906810035,
|
| 35 |
+
"grad_norm": 4.9818525314331055,
|
| 36 |
+
"learning_rate": 1.5960000000000003e-05,
|
| 37 |
+
"loss": 3.9571,
|
| 38 |
+
"step": 400
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"epoch": 0.44802867383512546,
|
| 42 |
+
"grad_norm": 5.104599952697754,
|
| 43 |
+
"learning_rate": 1.9960000000000002e-05,
|
| 44 |
+
"loss": 3.941,
|
| 45 |
+
"step": 500
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"epoch": 0.5376344086021505,
|
| 49 |
+
"grad_norm": 5.533235549926758,
|
| 50 |
+
"learning_rate": 1.9304775280898877e-05,
|
| 51 |
+
"loss": 3.9193,
|
| 52 |
+
"step": 600
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"epoch": 0.6272401433691757,
|
| 56 |
+
"grad_norm": 5.129825592041016,
|
| 57 |
+
"learning_rate": 1.860252808988764e-05,
|
| 58 |
+
"loss": 3.9058,
|
| 59 |
+
"step": 700
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"epoch": 0.7168458781362007,
|
| 63 |
+
"grad_norm": 4.828098773956299,
|
| 64 |
+
"learning_rate": 1.7900280898876406e-05,
|
| 65 |
+
"loss": 3.8911,
|
| 66 |
+
"step": 800
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"epoch": 0.8064516129032258,
|
| 70 |
+
"grad_norm": 4.615754127502441,
|
| 71 |
+
"learning_rate": 1.719803370786517e-05,
|
| 72 |
+
"loss": 3.898,
|
| 73 |
+
"step": 900
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"epoch": 0.8960573476702509,
|
| 77 |
+
"grad_norm": 4.701573371887207,
|
| 78 |
+
"learning_rate": 1.6495786516853935e-05,
|
| 79 |
+
"loss": 3.9061,
|
| 80 |
+
"step": 1000
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"epoch": 0.985663082437276,
|
| 84 |
+
"grad_norm": 4.873327255249023,
|
| 85 |
+
"learning_rate": 1.5793539325842696e-05,
|
| 86 |
+
"loss": 3.9055,
|
| 87 |
+
"step": 1100
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"epoch": 1.0,
|
| 91 |
+
"eval_loss": 3.806886911392212,
|
| 92 |
+
"eval_runtime": 216.4202,
|
| 93 |
+
"eval_samples_per_second": 10.831,
|
| 94 |
+
"eval_steps_per_second": 1.354,
|
| 95 |
+
"step": 1116
|
| 96 |
+
}
|
| 97 |
+
],
|
| 98 |
+
"logging_steps": 100,
|
| 99 |
+
"max_steps": 3348,
|
| 100 |
+
"num_input_tokens_seen": 0,
|
| 101 |
+
"num_train_epochs": 3,
|
| 102 |
+
"save_steps": 500,
|
| 103 |
+
"stateful_callbacks": {
|
| 104 |
+
"TrainerControl": {
|
| 105 |
+
"args": {
|
| 106 |
+
"should_epoch_stop": false,
|
| 107 |
+
"should_evaluate": false,
|
| 108 |
+
"should_log": false,
|
| 109 |
+
"should_save": true,
|
| 110 |
+
"should_training_stop": false
|
| 111 |
+
},
|
| 112 |
+
"attributes": {}
|
| 113 |
+
}
|
| 114 |
+
},
|
| 115 |
+
"total_flos": 291574511566848.0,
|
| 116 |
+
"train_batch_size": 8,
|
| 117 |
+
"trial_name": null,
|
| 118 |
+
"trial_params": null
|
| 119 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df40388a7fc006cad21a16f1e843b1a6de7a55741fabf5cd70900fe1a2186c2f
|
| 3 |
+
size 5841
|
gpt2-eli5-finetuned-by-yvens/checkpoint-1116/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/config.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_num_labels": 1,
|
| 3 |
+
"activation_function": "gelu_new",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"GPT2LMHeadModel"
|
| 6 |
+
],
|
| 7 |
+
"attn_pdrop": 0.1,
|
| 8 |
+
"bos_token_id": 50256,
|
| 9 |
+
"dtype": "float32",
|
| 10 |
+
"embd_pdrop": 0.1,
|
| 11 |
+
"eos_token_id": 50256,
|
| 12 |
+
"id2label": {
|
| 13 |
+
"0": "LABEL_0"
|
| 14 |
+
},
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"label2id": {
|
| 17 |
+
"LABEL_0": 0
|
| 18 |
+
},
|
| 19 |
+
"layer_norm_epsilon": 1e-05,
|
| 20 |
+
"model_type": "gpt2",
|
| 21 |
+
"n_ctx": 1024,
|
| 22 |
+
"n_embd": 768,
|
| 23 |
+
"n_head": 12,
|
| 24 |
+
"n_inner": null,
|
| 25 |
+
"n_layer": 6,
|
| 26 |
+
"n_positions": 1024,
|
| 27 |
+
"pad_token_id": 50256,
|
| 28 |
+
"reorder_and_upcast_attn": false,
|
| 29 |
+
"resid_pdrop": 0.1,
|
| 30 |
+
"scale_attn_by_inverse_layer_idx": false,
|
| 31 |
+
"scale_attn_weights": true,
|
| 32 |
+
"summary_activation": null,
|
| 33 |
+
"summary_first_dropout": 0.1,
|
| 34 |
+
"summary_proj_to_labels": true,
|
| 35 |
+
"summary_type": "cls_index",
|
| 36 |
+
"summary_use_proj": true,
|
| 37 |
+
"task_specific_params": {
|
| 38 |
+
"text-generation": {
|
| 39 |
+
"do_sample": true,
|
| 40 |
+
"max_length": 50
|
| 41 |
+
}
|
| 42 |
+
},
|
| 43 |
+
"transformers_version": "4.57.3",
|
| 44 |
+
"use_cache": true,
|
| 45 |
+
"vocab_size": 50257
|
| 46 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/generation_config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 50256,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
50256
|
| 6 |
+
],
|
| 7 |
+
"pad_token_id": 50256,
|
| 8 |
+
"transformers_version": "4.57.3"
|
| 9 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d850c9361398f131400b607805a3af037a6e660e4194da2f4ad3c118829804c4
|
| 3 |
+
size 327657928
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d6990318122fee5ae156681219db11aaa9a09185ec96e2766e358ff5bd2263c
|
| 3 |
+
size 655362763
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:66e1159454344ea29d8f6dc41e44e1487b918f50e97024bc69655f2f5356f533
|
| 3 |
+
size 14455
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:75d9ceaed5269550269bcf5a9a284f40bb077f2eecd25f54cf0d1342e4dfef66
|
| 3 |
+
size 1465
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/special_tokens_map.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<|endoftext|>",
|
| 3 |
+
"eos_token": "<|endoftext|>",
|
| 4 |
+
"pad_token": "<|endoftext|>",
|
| 5 |
+
"unk_token": "<|endoftext|>"
|
| 6 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/tokenizer_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"50256": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
}
|
| 12 |
+
},
|
| 13 |
+
"bos_token": "<|endoftext|>",
|
| 14 |
+
"clean_up_tokenization_spaces": false,
|
| 15 |
+
"eos_token": "<|endoftext|>",
|
| 16 |
+
"extra_special_tokens": {},
|
| 17 |
+
"model_max_length": 1024,
|
| 18 |
+
"pad_token": "<|endoftext|>",
|
| 19 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 20 |
+
"unk_token": "<|endoftext|>"
|
| 21 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/trainer_state.json
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": 2232,
|
| 3 |
+
"best_metric": 3.794713258743286,
|
| 4 |
+
"best_model_checkpoint": "./gpt2-eli5-finetuned-by-yvens/checkpoint-2232",
|
| 5 |
+
"epoch": 2.0,
|
| 6 |
+
"eval_steps": 500,
|
| 7 |
+
"global_step": 2232,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 0.08960573476702509,
|
| 14 |
+
"grad_norm": 5.001793384552002,
|
| 15 |
+
"learning_rate": 3.96e-06,
|
| 16 |
+
"loss": 4.0385,
|
| 17 |
+
"step": 100
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"epoch": 0.17921146953405018,
|
| 21 |
+
"grad_norm": 4.920190811157227,
|
| 22 |
+
"learning_rate": 7.960000000000002e-06,
|
| 23 |
+
"loss": 4.0278,
|
| 24 |
+
"step": 200
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"epoch": 0.26881720430107525,
|
| 28 |
+
"grad_norm": 5.068778991699219,
|
| 29 |
+
"learning_rate": 1.196e-05,
|
| 30 |
+
"loss": 3.9832,
|
| 31 |
+
"step": 300
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"epoch": 0.35842293906810035,
|
| 35 |
+
"grad_norm": 4.9818525314331055,
|
| 36 |
+
"learning_rate": 1.5960000000000003e-05,
|
| 37 |
+
"loss": 3.9571,
|
| 38 |
+
"step": 400
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"epoch": 0.44802867383512546,
|
| 42 |
+
"grad_norm": 5.104599952697754,
|
| 43 |
+
"learning_rate": 1.9960000000000002e-05,
|
| 44 |
+
"loss": 3.941,
|
| 45 |
+
"step": 500
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"epoch": 0.5376344086021505,
|
| 49 |
+
"grad_norm": 5.533235549926758,
|
| 50 |
+
"learning_rate": 1.9304775280898877e-05,
|
| 51 |
+
"loss": 3.9193,
|
| 52 |
+
"step": 600
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"epoch": 0.6272401433691757,
|
| 56 |
+
"grad_norm": 5.129825592041016,
|
| 57 |
+
"learning_rate": 1.860252808988764e-05,
|
| 58 |
+
"loss": 3.9058,
|
| 59 |
+
"step": 700
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"epoch": 0.7168458781362007,
|
| 63 |
+
"grad_norm": 4.828098773956299,
|
| 64 |
+
"learning_rate": 1.7900280898876406e-05,
|
| 65 |
+
"loss": 3.8911,
|
| 66 |
+
"step": 800
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"epoch": 0.8064516129032258,
|
| 70 |
+
"grad_norm": 4.615754127502441,
|
| 71 |
+
"learning_rate": 1.719803370786517e-05,
|
| 72 |
+
"loss": 3.898,
|
| 73 |
+
"step": 900
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"epoch": 0.8960573476702509,
|
| 77 |
+
"grad_norm": 4.701573371887207,
|
| 78 |
+
"learning_rate": 1.6495786516853935e-05,
|
| 79 |
+
"loss": 3.9061,
|
| 80 |
+
"step": 1000
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"epoch": 0.985663082437276,
|
| 84 |
+
"grad_norm": 4.873327255249023,
|
| 85 |
+
"learning_rate": 1.5793539325842696e-05,
|
| 86 |
+
"loss": 3.9055,
|
| 87 |
+
"step": 1100
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"epoch": 1.0,
|
| 91 |
+
"eval_loss": 3.806886911392212,
|
| 92 |
+
"eval_runtime": 216.4202,
|
| 93 |
+
"eval_samples_per_second": 10.831,
|
| 94 |
+
"eval_steps_per_second": 1.354,
|
| 95 |
+
"step": 1116
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"epoch": 1.075268817204301,
|
| 99 |
+
"grad_norm": 4.852152347564697,
|
| 100 |
+
"learning_rate": 1.509129213483146e-05,
|
| 101 |
+
"loss": 3.8508,
|
| 102 |
+
"step": 1200
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"epoch": 1.1648745519713262,
|
| 106 |
+
"grad_norm": 4.97360372543335,
|
| 107 |
+
"learning_rate": 1.4389044943820225e-05,
|
| 108 |
+
"loss": 3.8355,
|
| 109 |
+
"step": 1300
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"epoch": 1.2544802867383513,
|
| 113 |
+
"grad_norm": 5.336327075958252,
|
| 114 |
+
"learning_rate": 1.368679775280899e-05,
|
| 115 |
+
"loss": 3.8389,
|
| 116 |
+
"step": 1400
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"epoch": 1.3440860215053765,
|
| 120 |
+
"grad_norm": 4.8845391273498535,
|
| 121 |
+
"learning_rate": 1.2984550561797752e-05,
|
| 122 |
+
"loss": 3.8434,
|
| 123 |
+
"step": 1500
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"epoch": 1.4336917562724014,
|
| 127 |
+
"grad_norm": 5.234856128692627,
|
| 128 |
+
"learning_rate": 1.2282303370786517e-05,
|
| 129 |
+
"loss": 3.8194,
|
| 130 |
+
"step": 1600
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"epoch": 1.5232974910394266,
|
| 134 |
+
"grad_norm": 5.9331865310668945,
|
| 135 |
+
"learning_rate": 1.1580056179775281e-05,
|
| 136 |
+
"loss": 3.8246,
|
| 137 |
+
"step": 1700
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"epoch": 1.6129032258064515,
|
| 141 |
+
"grad_norm": 5.067050933837891,
|
| 142 |
+
"learning_rate": 1.0877808988764044e-05,
|
| 143 |
+
"loss": 3.8345,
|
| 144 |
+
"step": 1800
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"epoch": 1.7025089605734767,
|
| 148 |
+
"grad_norm": 5.266586780548096,
|
| 149 |
+
"learning_rate": 1.0175561797752809e-05,
|
| 150 |
+
"loss": 3.8354,
|
| 151 |
+
"step": 1900
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"epoch": 1.7921146953405018,
|
| 155 |
+
"grad_norm": 4.780787944793701,
|
| 156 |
+
"learning_rate": 9.473314606741573e-06,
|
| 157 |
+
"loss": 3.8364,
|
| 158 |
+
"step": 2000
|
| 159 |
+
},
|
| 160 |
+
{
|
| 161 |
+
"epoch": 1.881720430107527,
|
| 162 |
+
"grad_norm": 4.910538196563721,
|
| 163 |
+
"learning_rate": 8.771067415730338e-06,
|
| 164 |
+
"loss": 3.8148,
|
| 165 |
+
"step": 2100
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"epoch": 1.971326164874552,
|
| 169 |
+
"grad_norm": 5.021151065826416,
|
| 170 |
+
"learning_rate": 8.068820224719102e-06,
|
| 171 |
+
"loss": 3.8147,
|
| 172 |
+
"step": 2200
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"epoch": 2.0,
|
| 176 |
+
"eval_loss": 3.794713258743286,
|
| 177 |
+
"eval_runtime": 219.9998,
|
| 178 |
+
"eval_samples_per_second": 10.655,
|
| 179 |
+
"eval_steps_per_second": 1.332,
|
| 180 |
+
"step": 2232
|
| 181 |
+
}
|
| 182 |
+
],
|
| 183 |
+
"logging_steps": 100,
|
| 184 |
+
"max_steps": 3348,
|
| 185 |
+
"num_input_tokens_seen": 0,
|
| 186 |
+
"num_train_epochs": 3,
|
| 187 |
+
"save_steps": 500,
|
| 188 |
+
"stateful_callbacks": {
|
| 189 |
+
"TrainerControl": {
|
| 190 |
+
"args": {
|
| 191 |
+
"should_epoch_stop": false,
|
| 192 |
+
"should_evaluate": false,
|
| 193 |
+
"should_log": false,
|
| 194 |
+
"should_save": true,
|
| 195 |
+
"should_training_stop": false
|
| 196 |
+
},
|
| 197 |
+
"attributes": {}
|
| 198 |
+
}
|
| 199 |
+
},
|
| 200 |
+
"total_flos": 583149023133696.0,
|
| 201 |
+
"train_batch_size": 8,
|
| 202 |
+
"trial_name": null,
|
| 203 |
+
"trial_params": null
|
| 204 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df40388a7fc006cad21a16f1e843b1a6de7a55741fabf5cd70900fe1a2186c2f
|
| 3 |
+
size 5841
|
gpt2-eli5-finetuned-by-yvens/checkpoint-2232/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-finetuned-by-yvens/checkpoint-3348/config.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_num_labels": 1,
|
| 3 |
+
"activation_function": "gelu_new",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"GPT2LMHeadModel"
|
| 6 |
+
],
|
| 7 |
+
"attn_pdrop": 0.1,
|
| 8 |
+
"bos_token_id": 50256,
|
| 9 |
+
"dtype": "float32",
|
| 10 |
+
"embd_pdrop": 0.1,
|
| 11 |
+
"eos_token_id": 50256,
|
| 12 |
+
"id2label": {
|
| 13 |
+
"0": "LABEL_0"
|
| 14 |
+
},
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"label2id": {
|
| 17 |
+
"LABEL_0": 0
|
| 18 |
+
},
|
| 19 |
+
"layer_norm_epsilon": 1e-05,
|
| 20 |
+
"model_type": "gpt2",
|
| 21 |
+
"n_ctx": 1024,
|
| 22 |
+
"n_embd": 768,
|
| 23 |
+
"n_head": 12,
|
| 24 |
+
"n_inner": null,
|
| 25 |
+
"n_layer": 6,
|
| 26 |
+
"n_positions": 1024,
|
| 27 |
+
"pad_token_id": 50256,
|
| 28 |
+
"reorder_and_upcast_attn": false,
|
| 29 |
+
"resid_pdrop": 0.1,
|
| 30 |
+
"scale_attn_by_inverse_layer_idx": false,
|
| 31 |
+
"scale_attn_weights": true,
|
| 32 |
+
"summary_activation": null,
|
| 33 |
+
"summary_first_dropout": 0.1,
|
| 34 |
+
"summary_proj_to_labels": true,
|
| 35 |
+
"summary_type": "cls_index",
|
| 36 |
+
"summary_use_proj": true,
|
| 37 |
+
"task_specific_params": {
|
| 38 |
+
"text-generation": {
|
| 39 |
+
"do_sample": true,
|
| 40 |
+
"max_length": 50
|
| 41 |
+
}
|
| 42 |
+
},
|
| 43 |
+
"transformers_version": "4.57.3",
|
| 44 |
+
"use_cache": true,
|
| 45 |
+
"vocab_size": 50257
|
| 46 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-3348/generation_config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 50256,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
50256
|
| 6 |
+
],
|
| 7 |
+
"pad_token_id": 50256,
|
| 8 |
+
"transformers_version": "4.57.3"
|
| 9 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-3348/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-finetuned-by-yvens/checkpoint-3348/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:17bbe70ac1e467013a740686bcd5cb1576ce2c70134f3adfcf535fdfcad273d4
|
| 3 |
+
size 327657928
|
gpt2-eli5-finetuned-by-yvens/checkpoint-3348/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:53465fadd00e9798f08db1f22fb7db9df089516b41b995491bc7c441a728b0a4
|
| 3 |
+
size 655362763
|
gpt2-eli5-finetuned-by-yvens/checkpoint-3348/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6169b4431d49d166de3b34de957b534726a26586fb8bc6a800362473218ee71
|
| 3 |
+
size 14455
|
gpt2-eli5-finetuned-by-yvens/checkpoint-3348/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d279d20f9cc838eedba1fcafa52ed1b7fb5c2ef58289537fdfe6a7fb47d3dfb
|
| 3 |
+
size 1465
|
gpt2-eli5-finetuned-by-yvens/checkpoint-3348/special_tokens_map.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<|endoftext|>",
|
| 3 |
+
"eos_token": "<|endoftext|>",
|
| 4 |
+
"pad_token": "<|endoftext|>",
|
| 5 |
+
"unk_token": "<|endoftext|>"
|
| 6 |
+
}
|
gpt2-eli5-finetuned-by-yvens/checkpoint-3348/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gpt2-eli5-finetuned-by-yvens/checkpoint-3348/tokenizer_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"50256": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
}
|
| 12 |
+
},
|
| 13 |
+
"bos_token": "<|endoftext|>",
|
| 14 |
+
"clean_up_tokenization_spaces": false,
|
| 15 |
+
"eos_token": "<|endoftext|>",
|
| 16 |
+
"extra_special_tokens": {},
|
| 17 |
+
"model_max_length": 1024,
|
| 18 |
+
"pad_token": "<|endoftext|>",
|
| 19 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 20 |
+
"unk_token": "<|endoftext|>"
|
| 21 |
+
}
|