imda-lseokmin commited on
Commit
57a83d0
·
verified ·
1 Parent(s): 310c9f4

Upload 104 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +52 -3
  2. all_results.json +9 -0
  3. checkpoint-1000/config.json +39 -0
  4. checkpoint-1000/generation_config.json +6 -0
  5. checkpoint-1000/merges.txt +0 -0
  6. checkpoint-1000/model.safetensors +3 -0
  7. checkpoint-1000/optimizer.pt +3 -0
  8. checkpoint-1000/rng_state.pth +3 -0
  9. checkpoint-1000/scheduler.pt +3 -0
  10. checkpoint-1000/special_tokens_map.json +5 -0
  11. checkpoint-1000/tokenizer.json +0 -0
  12. checkpoint-1000/tokenizer_config.json +19 -0
  13. checkpoint-1000/trainer_state.json +47 -0
  14. checkpoint-1000/training_args.bin +3 -0
  15. checkpoint-1000/vocab.json +0 -0
  16. checkpoint-1500/config.json +39 -0
  17. checkpoint-1500/generation_config.json +6 -0
  18. checkpoint-1500/merges.txt +0 -0
  19. checkpoint-1500/model.safetensors +3 -0
  20. checkpoint-1500/optimizer.pt +3 -0
  21. checkpoint-1500/rng_state.pth +3 -0
  22. checkpoint-1500/scheduler.pt +3 -0
  23. checkpoint-1500/special_tokens_map.json +5 -0
  24. checkpoint-1500/tokenizer.json +0 -0
  25. checkpoint-1500/tokenizer_config.json +19 -0
  26. checkpoint-1500/trainer_state.json +54 -0
  27. checkpoint-1500/training_args.bin +3 -0
  28. checkpoint-1500/vocab.json +0 -0
  29. checkpoint-2000/config.json +39 -0
  30. checkpoint-2000/generation_config.json +6 -0
  31. checkpoint-2000/merges.txt +0 -0
  32. checkpoint-2000/model.safetensors +3 -0
  33. checkpoint-2000/optimizer.pt +3 -0
  34. checkpoint-2000/rng_state.pth +3 -0
  35. checkpoint-2000/scheduler.pt +3 -0
  36. checkpoint-2000/special_tokens_map.json +5 -0
  37. checkpoint-2000/tokenizer.json +0 -0
  38. checkpoint-2000/tokenizer_config.json +19 -0
  39. checkpoint-2000/trainer_state.json +61 -0
  40. checkpoint-2000/training_args.bin +3 -0
  41. checkpoint-2000/vocab.json +0 -0
  42. checkpoint-2500/config.json +39 -0
  43. checkpoint-2500/generation_config.json +6 -0
  44. checkpoint-2500/merges.txt +0 -0
  45. checkpoint-2500/model.safetensors +3 -0
  46. checkpoint-2500/optimizer.pt +3 -0
  47. checkpoint-2500/rng_state.pth +3 -0
  48. checkpoint-2500/scheduler.pt +3 -0
  49. checkpoint-2500/special_tokens_map.json +5 -0
  50. checkpoint-2500/tokenizer.json +0 -0
README.md CHANGED
@@ -1,3 +1,52 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: gpt2
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: sg_toxic_generator_model
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # sg_toxic_generator_model
15
+
16
+ This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - learning_rate: 5e-05
36
+ - train_batch_size: 2
37
+ - eval_batch_size: 2
38
+ - seed: 42
39
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
+ - lr_scheduler_type: linear
41
+ - num_epochs: 20.0
42
+
43
+ ### Training results
44
+
45
+
46
+
47
+ ### Framework versions
48
+
49
+ - Transformers 4.41.1
50
+ - Pytorch 2.1.2+cu121
51
+ - Datasets 2.16.0
52
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "total_flos": 3982090567680000.0,
4
+ "train_loss": 3.3043636781382935,
5
+ "train_runtime": 1550.8683,
6
+ "train_samples": 381,
7
+ "train_samples_per_second": 4.913,
8
+ "train_steps_per_second": 2.463
9
+ }
checkpoint-1000/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.41.1",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
checkpoint-1000/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.41.1"
6
+ }
checkpoint-1000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9374fbb2ab4ef9ed278af4c3dd610e1f31b821aeb5f658753ce0a7c2d429ccb3
3
+ size 497774208
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efadce1692ab0bdae031711abd241c437d3ee5e9ef9b4993134be7ae94400a4e
3
+ size 995642298
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14b5d9e136cfcc76d63c4356cc00ffab5521f8f3617e241d52b7c803985e22ed
3
+ size 14244
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d96409fac135efbe2309f3c0211dadf7288e17afa4f579d63e7c168779b9281
3
+ size 1064
checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
checkpoint-1000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": true,
15
+ "eos_token": "<|endoftext|>",
16
+ "model_max_length": 1024,
17
+ "tokenizer_class": "GPT2Tokenizer",
18
+ "unk_token": "<|endoftext|>"
19
+ }
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.2356020942408374,
5
+ "eval_steps": 500,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 2.6178010471204187,
13
+ "grad_norm": 4.953078269958496,
14
+ "learning_rate": 4.3455497382198955e-05,
15
+ "loss": 4.0934,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 5.2356020942408374,
20
+ "grad_norm": 4.2992095947265625,
21
+ "learning_rate": 3.691099476439791e-05,
22
+ "loss": 3.7159,
23
+ "step": 1000
24
+ }
25
+ ],
26
+ "logging_steps": 500,
27
+ "max_steps": 3820,
28
+ "num_input_tokens_seen": 0,
29
+ "num_train_epochs": 20,
30
+ "save_steps": 500,
31
+ "stateful_callbacks": {
32
+ "TrainerControl": {
33
+ "args": {
34
+ "should_epoch_stop": false,
35
+ "should_evaluate": false,
36
+ "should_log": false,
37
+ "should_save": true,
38
+ "should_training_stop": false
39
+ },
40
+ "attributes": {}
41
+ }
42
+ },
43
+ "total_flos": 1042555207680000.0,
44
+ "train_batch_size": 2,
45
+ "trial_name": null,
46
+ "trial_params": null
47
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8340bb38e933c578d75c23425604f9ded7016172341fa692743654b53749296
3
+ size 5112
checkpoint-1000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.41.1",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
checkpoint-1500/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.41.1"
6
+ }
checkpoint-1500/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e46ba788734db115fb6e80cd918016b5d0a8176158c9a46ad41e5fd7374b2ee
3
+ size 497774208
checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc409abb6f627a63e5c6fc725c92793bb69ef51474783aa8aa68ecfb0d9ea20c
3
+ size 995642298
checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1db5d3b38d3e26de8462f8cf8ea5bc12b75c70c7392fb2c277da7f198bcd4291
3
+ size 14244
checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33779dd91a5985bcac403d0c5b95438046f2b3d559c530858d88d590aef5839d
3
+ size 1064
checkpoint-1500/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
checkpoint-1500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": true,
15
+ "eos_token": "<|endoftext|>",
16
+ "model_max_length": 1024,
17
+ "tokenizer_class": "GPT2Tokenizer",
18
+ "unk_token": "<|endoftext|>"
19
+ }
checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 7.853403141361256,
5
+ "eval_steps": 500,
6
+ "global_step": 1500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 2.6178010471204187,
13
+ "grad_norm": 4.953078269958496,
14
+ "learning_rate": 4.3455497382198955e-05,
15
+ "loss": 4.0934,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 5.2356020942408374,
20
+ "grad_norm": 4.2992095947265625,
21
+ "learning_rate": 3.691099476439791e-05,
22
+ "loss": 3.7159,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 7.853403141361256,
27
+ "grad_norm": 4.553157329559326,
28
+ "learning_rate": 3.036649214659686e-05,
29
+ "loss": 3.4509,
30
+ "step": 1500
31
+ }
32
+ ],
33
+ "logging_steps": 500,
34
+ "max_steps": 3820,
35
+ "num_input_tokens_seen": 0,
36
+ "num_train_epochs": 20,
37
+ "save_steps": 500,
38
+ "stateful_callbacks": {
39
+ "TrainerControl": {
40
+ "args": {
41
+ "should_epoch_stop": false,
42
+ "should_evaluate": false,
43
+ "should_log": false,
44
+ "should_save": true,
45
+ "should_training_stop": false
46
+ },
47
+ "attributes": {}
48
+ }
49
+ },
50
+ "total_flos": 1564094103552000.0,
51
+ "train_batch_size": 2,
52
+ "trial_name": null,
53
+ "trial_params": null
54
+ }
checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8340bb38e933c578d75c23425604f9ded7016172341fa692743654b53749296
3
+ size 5112
checkpoint-1500/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.41.1",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
checkpoint-2000/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.41.1"
6
+ }
checkpoint-2000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9de7024658ea0d43e1e1c53f32f4822af922273dc673662db08a8d6a141e952
3
+ size 497774208
checkpoint-2000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d98936d517a4c41ba060cd5f77e4668686846b97556395a277778a952c639ac6
3
+ size 995642298
checkpoint-2000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe24944c83fc32fd759afd3e676042b69a588c11b6add69a89f07481ed1d7b4e
3
+ size 14244
checkpoint-2000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb020281ace5e9570b54946f63f46d18d91488bfbb3ae1454798e4c4ff1f70ab
3
+ size 1064
checkpoint-2000/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
checkpoint-2000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": true,
15
+ "eos_token": "<|endoftext|>",
16
+ "model_max_length": 1024,
17
+ "tokenizer_class": "GPT2Tokenizer",
18
+ "unk_token": "<|endoftext|>"
19
+ }
checkpoint-2000/trainer_state.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 10.471204188481675,
5
+ "eval_steps": 500,
6
+ "global_step": 2000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 2.6178010471204187,
13
+ "grad_norm": 4.953078269958496,
14
+ "learning_rate": 4.3455497382198955e-05,
15
+ "loss": 4.0934,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 5.2356020942408374,
20
+ "grad_norm": 4.2992095947265625,
21
+ "learning_rate": 3.691099476439791e-05,
22
+ "loss": 3.7159,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 7.853403141361256,
27
+ "grad_norm": 4.553157329559326,
28
+ "learning_rate": 3.036649214659686e-05,
29
+ "loss": 3.4509,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 10.471204188481675,
34
+ "grad_norm": 4.436212062835693,
35
+ "learning_rate": 2.382198952879581e-05,
36
+ "loss": 3.2407,
37
+ "step": 2000
38
+ }
39
+ ],
40
+ "logging_steps": 500,
41
+ "max_steps": 3820,
42
+ "num_input_tokens_seen": 0,
43
+ "num_train_epochs": 20,
44
+ "save_steps": 500,
45
+ "stateful_callbacks": {
46
+ "TrainerControl": {
47
+ "args": {
48
+ "should_epoch_stop": false,
49
+ "should_evaluate": false,
50
+ "should_log": false,
51
+ "should_save": true,
52
+ "should_training_stop": false
53
+ },
54
+ "attributes": {}
55
+ }
56
+ },
57
+ "total_flos": 2085110415360000.0,
58
+ "train_batch_size": 2,
59
+ "trial_name": null,
60
+ "trial_params": null
61
+ }
checkpoint-2000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8340bb38e933c578d75c23425604f9ded7016172341fa692743654b53749296
3
+ size 5112
checkpoint-2000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2500/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.41.1",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
checkpoint-2500/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.41.1"
6
+ }
checkpoint-2500/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5984eb709b5c93a7d5eb92abd745e613fb990be404ced8603000ec605c0c7f00
3
+ size 497774208
checkpoint-2500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2ad2ecd854cbec1dda54bf9cda10a979a5341154044f567087733cf8cf386d1
3
+ size 995642298
checkpoint-2500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:855f9cc495b3b87180ebaf565bd4bd614066909697663573736256c818c0dc49
3
+ size 14244
checkpoint-2500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2432a09a517e1cc05aad739783864b8631ac12f1dc79e5cd6b52f9cbea0c0e0e
3
+ size 1064
checkpoint-2500/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
checkpoint-2500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff