tcmmichaelb139 commited on
Commit
6a91ca3
·
verified ·
1 Parent(s): 5a1eed0

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "dtype": "float32",
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 1024,
16
+ "n_head": 16,
17
+ "n_inner": null,
18
+ "n_layer": 24,
19
+ "n_positions": 1024,
20
+ "n_special": 0,
21
+ "pad_token_id": 50256,
22
+ "predict_special_tokens": true,
23
+ "reorder_and_upcast_attn": false,
24
+ "resid_pdrop": 0.1,
25
+ "scale_attn_by_inverse_layer_idx": false,
26
+ "scale_attn_weights": true,
27
+ "summary_activation": null,
28
+ "summary_first_dropout": 0.1,
29
+ "summary_proj_to_labels": true,
30
+ "summary_type": "cls_index",
31
+ "summary_use_proj": true,
32
+ "task_specific_params": {
33
+ "text-generation": {
34
+ "do_sample": true,
35
+ "max_length": 50
36
+ }
37
+ },
38
+ "transformers_version": "4.56.1",
39
+ "use_cache": true,
40
+ "vocab_size": 50257
41
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": [
5
+ 50256
6
+ ],
7
+ "pad_token_id": 50256,
8
+ "transformers_version": "4.56.1"
9
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5fe9e82b1a3f3af61549377a4a3f7262cf980859320d81288a56fa4d9bf8be8
3
+ size 1419322880
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99e69022da89a3c0e947578ca4f554541aa37bd7a24337f2cb1ebe7b9c5547c2
3
+ size 2838833803
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1e4d08610730ac21a86fb7de99b50287d5cf193d4d421a30288aadb9831a611
3
+ size 14645
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bfeea8e4447d7603087ddf9c6b046876057b7a98bce2b7f669464c51f2acc90
3
+ size 1465
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": false,
15
+ "eos_token": "<|endoftext|>",
16
+ "extra_special_tokens": {},
17
+ "model_max_length": 1024,
18
+ "pad_token": "<|endoftext|>",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
trainer_state.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 10.0,
6
+ "eval_steps": 500,
7
+ "global_step": 1750,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.1428571428571428,
14
+ "grad_norm": 1.5440185070037842,
15
+ "learning_rate": 4.975e-05,
16
+ "loss": 1.3873,
17
+ "step": 200
18
+ },
19
+ {
20
+ "epoch": 2.2857142857142856,
21
+ "grad_norm": 1.1913411617279053,
22
+ "learning_rate": 4.3580645161290324e-05,
23
+ "loss": 0.0893,
24
+ "step": 400
25
+ },
26
+ {
27
+ "epoch": 3.4285714285714284,
28
+ "grad_norm": 0.7728761434555054,
29
+ "learning_rate": 3.7129032258064516e-05,
30
+ "loss": 0.0617,
31
+ "step": 600
32
+ },
33
+ {
34
+ "epoch": 4.571428571428571,
35
+ "grad_norm": 0.8336076736450195,
36
+ "learning_rate": 3.067741935483871e-05,
37
+ "loss": 0.0527,
38
+ "step": 800
39
+ },
40
+ {
41
+ "epoch": 5.714285714285714,
42
+ "grad_norm": 0.7497674822807312,
43
+ "learning_rate": 2.4225806451612903e-05,
44
+ "loss": 0.0414,
45
+ "step": 1000
46
+ },
47
+ {
48
+ "epoch": 6.857142857142857,
49
+ "grad_norm": 0.8337773084640503,
50
+ "learning_rate": 1.7774193548387098e-05,
51
+ "loss": 0.035,
52
+ "step": 1200
53
+ },
54
+ {
55
+ "epoch": 8.0,
56
+ "grad_norm": 0.5487708449363708,
57
+ "learning_rate": 1.132258064516129e-05,
58
+ "loss": 0.031,
59
+ "step": 1400
60
+ },
61
+ {
62
+ "epoch": 9.142857142857142,
63
+ "grad_norm": 0.49486181139945984,
64
+ "learning_rate": 4.870967741935484e-06,
65
+ "loss": 0.0264,
66
+ "step": 1600
67
+ }
68
+ ],
69
+ "logging_steps": 200,
70
+ "max_steps": 1750,
71
+ "num_input_tokens_seen": 0,
72
+ "num_train_epochs": 10,
73
+ "save_steps": 500,
74
+ "stateful_callbacks": {
75
+ "TrainerControl": {
76
+ "args": {
77
+ "should_epoch_stop": false,
78
+ "should_evaluate": false,
79
+ "should_log": false,
80
+ "should_save": true,
81
+ "should_training_stop": true
82
+ },
83
+ "attributes": {}
84
+ }
85
+ },
86
+ "total_flos": 6500904861696000.0,
87
+ "train_batch_size": 4,
88
+ "trial_name": null,
89
+ "trial_params": null
90
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2f4fc6172790fcb1c4a21db97d5f309ed8a5593ee4935375dcc90d880289256
3
+ size 5777
vocab.json ADDED
The diff for this file is too large to render. See raw diff