liangc40 commited on
Commit
3f48734
·
verified ·
1 Parent(s): af7a379

Upload folder using huggingface_hub

Browse files
Files changed (40) hide show
  1. student_finetuned/checkpoint-282/config.json +41 -0
  2. student_finetuned/checkpoint-282/generation_config.json +6 -0
  3. student_finetuned/checkpoint-282/model.safetensors +3 -0
  4. student_finetuned/checkpoint-282/optimizer.pt +3 -0
  5. student_finetuned/checkpoint-282/rng_state.pth +3 -0
  6. student_finetuned/checkpoint-282/scaler.pt +3 -0
  7. student_finetuned/checkpoint-282/scheduler.pt +3 -0
  8. student_finetuned/checkpoint-282/special_tokens_map.json +7 -0
  9. student_finetuned/checkpoint-282/tokenizer.json +0 -0
  10. student_finetuned/checkpoint-282/tokenizer_config.json +58 -0
  11. student_finetuned/checkpoint-282/trainer_state.json +230 -0
  12. student_finetuned/checkpoint-282/training_args.bin +3 -0
  13. student_finetuned/checkpoint-282/vocab.txt +0 -0
  14. student_finetuned/config.json +41 -0
  15. student_finetuned/generation_config.json +6 -0
  16. student_finetuned/model.safetensors +3 -0
  17. student_finetuned/special_tokens_map.json +7 -0
  18. student_finetuned/tokenizer.json +0 -0
  19. student_finetuned/tokenizer_config.json +58 -0
  20. student_finetuned/vocab.txt +0 -0
  21. student_lora_finetuned/README.md +202 -0
  22. student_lora_finetuned/adapter_config.json +33 -0
  23. student_lora_finetuned/adapter_model.safetensors +3 -0
  24. student_lora_finetuned/checkpoint-930/README.md +202 -0
  25. student_lora_finetuned/checkpoint-930/adapter_config.json +33 -0
  26. student_lora_finetuned/checkpoint-930/adapter_model.safetensors +3 -0
  27. student_lora_finetuned/checkpoint-930/optimizer.pt +3 -0
  28. student_lora_finetuned/checkpoint-930/rng_state.pth +3 -0
  29. student_lora_finetuned/checkpoint-930/scaler.pt +3 -0
  30. student_lora_finetuned/checkpoint-930/scheduler.pt +3 -0
  31. student_lora_finetuned/checkpoint-930/special_tokens_map.json +37 -0
  32. student_lora_finetuned/checkpoint-930/tokenizer.json +0 -0
  33. student_lora_finetuned/checkpoint-930/tokenizer_config.json +65 -0
  34. student_lora_finetuned/checkpoint-930/trainer_state.json +1336 -0
  35. student_lora_finetuned/checkpoint-930/training_args.bin +3 -0
  36. student_lora_finetuned/checkpoint-930/vocab.txt +0 -0
  37. student_lora_finetuned/special_tokens_map.json +37 -0
  38. student_lora_finetuned/tokenizer.json +0 -0
  39. student_lora_finetuned/tokenizer_config.json +65 -0
  40. student_lora_finetuned/vocab.txt +0 -0
student_finetuned/checkpoint-282/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 50256,
10
+ "gradient_checkpointing": false,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "output_past": true,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "task_specific_params": {
31
+ "text-generation": {
32
+ "do_sample": true,
33
+ "max_length": 120
34
+ }
35
+ },
36
+ "tokenizer_class": "BertTokenizer",
37
+ "torch_dtype": "float32",
38
+ "transformers_version": "4.51.3",
39
+ "use_cache": true,
40
+ "vocab_size": 22557
41
+ }
student_finetuned/checkpoint-282/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.51.3"
6
+ }
student_finetuned/checkpoint-282/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b902ff392321f8b7ef9a1b9e12591d74125e98cd04e2b6cd9aec38649307716
3
+ size 412679808
student_finetuned/checkpoint-282/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a6d898b50fbc032544a92bd56e8920b56fba1de580989acafc54e842b395a31
3
+ size 825453498
student_finetuned/checkpoint-282/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3837a2b51525e412b7700c9c872dacba4f277c1a3292053e3a1ccd8b5d06bcc4
3
+ size 14244
student_finetuned/checkpoint-282/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82fbb8a41699527f8e897a6a4653fd200a409dcfcca603338998c5883c75ff74
3
+ size 988
student_finetuned/checkpoint-282/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ad6dd2b5727fe8a6f4108f69903c823118467fa11ae53cc14acdc6b5095985b
3
+ size 1064
student_finetuned/checkpoint-282/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
student_finetuned/checkpoint-282/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
student_finetuned/checkpoint-282/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
student_finetuned/checkpoint-282/trainer_state.json ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 282,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.10638297872340426,
14
+ "grad_norm": 2.73763108253479,
15
+ "learning_rate": 4.840425531914894e-05,
16
+ "loss": 7.0593,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.2127659574468085,
21
+ "grad_norm": 4.184208869934082,
22
+ "learning_rate": 4.663120567375887e-05,
23
+ "loss": 4.3509,
24
+ "step": 20
25
+ },
26
+ {
27
+ "epoch": 0.3191489361702128,
28
+ "grad_norm": 2.815650224685669,
29
+ "learning_rate": 4.48581560283688e-05,
30
+ "loss": 2.3531,
31
+ "step": 30
32
+ },
33
+ {
34
+ "epoch": 0.425531914893617,
35
+ "grad_norm": 1.4433400630950928,
36
+ "learning_rate": 4.3085106382978725e-05,
37
+ "loss": 1.3585,
38
+ "step": 40
39
+ },
40
+ {
41
+ "epoch": 0.5319148936170213,
42
+ "grad_norm": 1.3113492727279663,
43
+ "learning_rate": 4.1312056737588654e-05,
44
+ "loss": 0.8143,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.6382978723404256,
49
+ "grad_norm": 1.1828652620315552,
50
+ "learning_rate": 3.953900709219858e-05,
51
+ "loss": 0.7293,
52
+ "step": 60
53
+ },
54
+ {
55
+ "epoch": 0.7446808510638298,
56
+ "grad_norm": 1.0307129621505737,
57
+ "learning_rate": 3.776595744680852e-05,
58
+ "loss": 0.5471,
59
+ "step": 70
60
+ },
61
+ {
62
+ "epoch": 0.851063829787234,
63
+ "grad_norm": 0.7392168045043945,
64
+ "learning_rate": 3.599290780141844e-05,
65
+ "loss": 0.5001,
66
+ "step": 80
67
+ },
68
+ {
69
+ "epoch": 0.9574468085106383,
70
+ "grad_norm": 1.0385117530822754,
71
+ "learning_rate": 3.4219858156028374e-05,
72
+ "loss": 0.4176,
73
+ "step": 90
74
+ },
75
+ {
76
+ "epoch": 1.0638297872340425,
77
+ "grad_norm": 0.8549280166625977,
78
+ "learning_rate": 3.2446808510638296e-05,
79
+ "loss": 0.4753,
80
+ "step": 100
81
+ },
82
+ {
83
+ "epoch": 1.1702127659574468,
84
+ "grad_norm": 0.6353434920310974,
85
+ "learning_rate": 3.067375886524823e-05,
86
+ "loss": 0.3597,
87
+ "step": 110
88
+ },
89
+ {
90
+ "epoch": 1.2765957446808511,
91
+ "grad_norm": 0.5479038953781128,
92
+ "learning_rate": 2.8900709219858156e-05,
93
+ "loss": 0.332,
94
+ "step": 120
95
+ },
96
+ {
97
+ "epoch": 1.3829787234042552,
98
+ "grad_norm": 0.7110474705696106,
99
+ "learning_rate": 2.7127659574468084e-05,
100
+ "loss": 0.4116,
101
+ "step": 130
102
+ },
103
+ {
104
+ "epoch": 1.4893617021276595,
105
+ "grad_norm": 0.7794738411903381,
106
+ "learning_rate": 2.5354609929078016e-05,
107
+ "loss": 0.3355,
108
+ "step": 140
109
+ },
110
+ {
111
+ "epoch": 1.5957446808510638,
112
+ "grad_norm": 0.5863938927650452,
113
+ "learning_rate": 2.3581560283687945e-05,
114
+ "loss": 0.3371,
115
+ "step": 150
116
+ },
117
+ {
118
+ "epoch": 1.702127659574468,
119
+ "grad_norm": 1.5034053325653076,
120
+ "learning_rate": 2.1808510638297873e-05,
121
+ "loss": 0.4327,
122
+ "step": 160
123
+ },
124
+ {
125
+ "epoch": 1.8085106382978724,
126
+ "grad_norm": 0.534168541431427,
127
+ "learning_rate": 2.0035460992907805e-05,
128
+ "loss": 0.3472,
129
+ "step": 170
130
+ },
131
+ {
132
+ "epoch": 1.9148936170212765,
133
+ "grad_norm": 0.5549598336219788,
134
+ "learning_rate": 1.8262411347517733e-05,
135
+ "loss": 0.3687,
136
+ "step": 180
137
+ },
138
+ {
139
+ "epoch": 2.021276595744681,
140
+ "grad_norm": 0.5196494460105896,
141
+ "learning_rate": 1.6489361702127658e-05,
142
+ "loss": 0.2445,
143
+ "step": 190
144
+ },
145
+ {
146
+ "epoch": 2.127659574468085,
147
+ "grad_norm": 0.5457848310470581,
148
+ "learning_rate": 1.4716312056737588e-05,
149
+ "loss": 0.2548,
150
+ "step": 200
151
+ },
152
+ {
153
+ "epoch": 2.2340425531914896,
154
+ "grad_norm": 0.5511345863342285,
155
+ "learning_rate": 1.2943262411347517e-05,
156
+ "loss": 0.3374,
157
+ "step": 210
158
+ },
159
+ {
160
+ "epoch": 2.3404255319148937,
161
+ "grad_norm": 0.4927542805671692,
162
+ "learning_rate": 1.1170212765957447e-05,
163
+ "loss": 0.3462,
164
+ "step": 220
165
+ },
166
+ {
167
+ "epoch": 2.4468085106382977,
168
+ "grad_norm": 0.5920850038528442,
169
+ "learning_rate": 9.397163120567375e-06,
170
+ "loss": 0.2803,
171
+ "step": 230
172
+ },
173
+ {
174
+ "epoch": 2.5531914893617023,
175
+ "grad_norm": 0.5556482076644897,
176
+ "learning_rate": 7.6241134751773054e-06,
177
+ "loss": 0.3066,
178
+ "step": 240
179
+ },
180
+ {
181
+ "epoch": 2.6595744680851063,
182
+ "grad_norm": 0.4986216723918915,
183
+ "learning_rate": 5.851063829787235e-06,
184
+ "loss": 0.3169,
185
+ "step": 250
186
+ },
187
+ {
188
+ "epoch": 2.7659574468085104,
189
+ "grad_norm": 0.7671786546707153,
190
+ "learning_rate": 4.078014184397164e-06,
191
+ "loss": 0.307,
192
+ "step": 260
193
+ },
194
+ {
195
+ "epoch": 2.872340425531915,
196
+ "grad_norm": 0.8555495142936707,
197
+ "learning_rate": 2.304964539007092e-06,
198
+ "loss": 0.2727,
199
+ "step": 270
200
+ },
201
+ {
202
+ "epoch": 2.978723404255319,
203
+ "grad_norm": 0.5921739935874939,
204
+ "learning_rate": 5.319148936170213e-07,
205
+ "loss": 0.3595,
206
+ "step": 280
207
+ }
208
+ ],
209
+ "logging_steps": 10,
210
+ "max_steps": 282,
211
+ "num_input_tokens_seen": 0,
212
+ "num_train_epochs": 3,
213
+ "save_steps": 500,
214
+ "stateful_callbacks": {
215
+ "TrainerControl": {
216
+ "args": {
217
+ "should_epoch_stop": false,
218
+ "should_evaluate": false,
219
+ "should_log": false,
220
+ "should_save": true,
221
+ "should_training_stop": true
222
+ },
223
+ "attributes": {}
224
+ }
225
+ },
226
+ "total_flos": 585555443712000.0,
227
+ "train_batch_size": 8,
228
+ "trial_name": null,
229
+ "trial_params": null
230
+ }
student_finetuned/checkpoint-282/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ad1542a29a393c2cf4c700b16fe2ec76a2dc0c61f7e0b88372a33769a5f036c
3
+ size 5304
student_finetuned/checkpoint-282/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
student_finetuned/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 50256,
10
+ "gradient_checkpointing": false,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "output_past": true,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "task_specific_params": {
31
+ "text-generation": {
32
+ "do_sample": true,
33
+ "max_length": 120
34
+ }
35
+ },
36
+ "tokenizer_class": "BertTokenizer",
37
+ "torch_dtype": "float32",
38
+ "transformers_version": "4.51.3",
39
+ "use_cache": true,
40
+ "vocab_size": 22557
41
+ }
student_finetuned/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.51.3"
6
+ }
student_finetuned/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b902ff392321f8b7ef9a1b9e12591d74125e98cd04e2b6cd9aec38649307716
3
+ size 412679808
student_finetuned/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
student_finetuned/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
student_finetuned/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
student_finetuned/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
student_lora_finetuned/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: model/student_finetuned
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.2
student_lora_finetuned/adapter_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "model/student_finetuned",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": true,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.1,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "c_attn"
28
+ ],
29
+ "task_type": "CAUSAL_LM",
30
+ "trainable_token_indices": null,
31
+ "use_dora": false,
32
+ "use_rslora": false
33
+ }
student_lora_finetuned/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bf506514e5640e36238ee90118f9a45f497597d388bf7135212bacb05ad56ac
3
+ size 1182680
student_lora_finetuned/checkpoint-930/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: model/student_finetuned
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.2
student_lora_finetuned/checkpoint-930/adapter_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "model/student_finetuned",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": true,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.1,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "c_attn"
28
+ ],
29
+ "task_type": "CAUSAL_LM",
30
+ "trainable_token_indices": null,
31
+ "use_dora": false,
32
+ "use_rslora": false
33
+ }
student_lora_finetuned/checkpoint-930/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bf506514e5640e36238ee90118f9a45f497597d388bf7135212bacb05ad56ac
3
+ size 1182680
student_lora_finetuned/checkpoint-930/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b65d10191a646b90e6f8d0e76d5d916100d26b21870c5759e1b0c2084cc14b2e
3
+ size 2379926
student_lora_finetuned/checkpoint-930/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c66f7c9b9ebf9edafc6b4316956e96432617d138b481b77fa1dff4b94baa30f
3
+ size 14244
student_lora_finetuned/checkpoint-930/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ce4e937afc9a48808fd42bd37a24978f04dd25b1d338a7919ad7a288b47ac94
3
+ size 988
student_lora_finetuned/checkpoint-930/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:845303d8f987d5364bdfcd6872d6713369fd584221ee35404491c9065b6ba966
3
+ size 1064
student_lora_finetuned/checkpoint-930/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
student_lora_finetuned/checkpoint-930/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
student_lora_finetuned/checkpoint-930/tokenizer_config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "max_length": 512,
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "never_split": null,
53
+ "pad_to_multiple_of": null,
54
+ "pad_token": "[PAD]",
55
+ "pad_token_type_id": 0,
56
+ "padding_side": "right",
57
+ "sep_token": "[SEP]",
58
+ "stride": 0,
59
+ "strip_accents": null,
60
+ "tokenize_chinese_chars": true,
61
+ "tokenizer_class": "BertTokenizer",
62
+ "truncation_side": "right",
63
+ "truncation_strategy": "longest_first",
64
+ "unk_token": "[UNK]"
65
+ }
student_lora_finetuned/checkpoint-930/trainer_state.json ADDED
@@ -0,0 +1,1336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 5.0,
6
+ "eval_steps": 500,
7
+ "global_step": 930,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.026881720430107527,
14
+ "grad_norm": 7.517879009246826,
15
+ "learning_rate": 0.00019913978494623657,
16
+ "loss": 25.7069,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.053763440860215055,
21
+ "grad_norm": 9.280155181884766,
22
+ "learning_rate": 0.00019806451612903227,
23
+ "loss": 23.6659,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.08064516129032258,
28
+ "grad_norm": 12.674245834350586,
29
+ "learning_rate": 0.00019698924731182798,
30
+ "loss": 20.9623,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.10752688172043011,
35
+ "grad_norm": 10.096162796020508,
36
+ "learning_rate": 0.00019591397849462368,
37
+ "loss": 19.079,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.13440860215053763,
42
+ "grad_norm": 6.408749103546143,
43
+ "learning_rate": 0.00019483870967741936,
44
+ "loss": 17.2508,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.16129032258064516,
49
+ "grad_norm": 3.341172456741333,
50
+ "learning_rate": 0.00019376344086021507,
51
+ "loss": 16.2135,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.1881720430107527,
56
+ "grad_norm": 2.305873155593872,
57
+ "learning_rate": 0.00019268817204301077,
58
+ "loss": 15.5422,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.21505376344086022,
63
+ "grad_norm": 2.549955129623413,
64
+ "learning_rate": 0.00019161290322580645,
65
+ "loss": 15.1318,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.24193548387096775,
70
+ "grad_norm": 2.837451457977295,
71
+ "learning_rate": 0.00019053763440860216,
72
+ "loss": 14.4746,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.26881720430107525,
77
+ "grad_norm": 2.3930716514587402,
78
+ "learning_rate": 0.00018946236559139786,
79
+ "loss": 14.0982,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.2956989247311828,
84
+ "grad_norm": 2.1569137573242188,
85
+ "learning_rate": 0.00018838709677419354,
86
+ "loss": 13.6985,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.3225806451612903,
91
+ "grad_norm": 2.421198844909668,
92
+ "learning_rate": 0.00018731182795698925,
93
+ "loss": 13.1276,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.34946236559139787,
98
+ "grad_norm": 2.6210012435913086,
99
+ "learning_rate": 0.00018623655913978495,
100
+ "loss": 12.6512,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.3763440860215054,
105
+ "grad_norm": 3.0429224967956543,
106
+ "learning_rate": 0.00018516129032258066,
107
+ "loss": 11.9908,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.4032258064516129,
112
+ "grad_norm": 3.4718668460845947,
113
+ "learning_rate": 0.00018408602150537634,
114
+ "loss": 11.2452,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.43010752688172044,
119
+ "grad_norm": 4.288656234741211,
120
+ "learning_rate": 0.00018301075268817204,
121
+ "loss": 10.4865,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.45698924731182794,
126
+ "grad_norm": 5.515636444091797,
127
+ "learning_rate": 0.00018193548387096775,
128
+ "loss": 9.4616,
129
+ "step": 85
130
+ },
131
+ {
132
+ "epoch": 0.4838709677419355,
133
+ "grad_norm": 8.327155113220215,
134
+ "learning_rate": 0.00018086021505376345,
135
+ "loss": 8.167,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.510752688172043,
140
+ "grad_norm": 10.667390823364258,
141
+ "learning_rate": 0.00017978494623655916,
142
+ "loss": 6.0323,
143
+ "step": 95
144
+ },
145
+ {
146
+ "epoch": 0.5376344086021505,
147
+ "grad_norm": 8.344476699829102,
148
+ "learning_rate": 0.00017870967741935484,
149
+ "loss": 3.804,
150
+ "step": 100
151
+ },
152
+ {
153
+ "epoch": 0.5645161290322581,
154
+ "grad_norm": 5.413161754608154,
155
+ "learning_rate": 0.00017763440860215054,
156
+ "loss": 2.9261,
157
+ "step": 105
158
+ },
159
+ {
160
+ "epoch": 0.5913978494623656,
161
+ "grad_norm": 6.431153297424316,
162
+ "learning_rate": 0.00017655913978494625,
163
+ "loss": 2.3795,
164
+ "step": 110
165
+ },
166
+ {
167
+ "epoch": 0.6182795698924731,
168
+ "grad_norm": 4.849709987640381,
169
+ "learning_rate": 0.00017548387096774195,
170
+ "loss": 1.7969,
171
+ "step": 115
172
+ },
173
+ {
174
+ "epoch": 0.6451612903225806,
175
+ "grad_norm": 1.55392324924469,
176
+ "learning_rate": 0.00017440860215053766,
177
+ "loss": 1.54,
178
+ "step": 120
179
+ },
180
+ {
181
+ "epoch": 0.6720430107526881,
182
+ "grad_norm": 0.9671318531036377,
183
+ "learning_rate": 0.00017333333333333334,
184
+ "loss": 1.3664,
185
+ "step": 125
186
+ },
187
+ {
188
+ "epoch": 0.6989247311827957,
189
+ "grad_norm": 0.7121822834014893,
190
+ "learning_rate": 0.00017225806451612904,
191
+ "loss": 1.2365,
192
+ "step": 130
193
+ },
194
+ {
195
+ "epoch": 0.7258064516129032,
196
+ "grad_norm": 0.5870563983917236,
197
+ "learning_rate": 0.00017118279569892475,
198
+ "loss": 1.1677,
199
+ "step": 135
200
+ },
201
+ {
202
+ "epoch": 0.7526881720430108,
203
+ "grad_norm": 0.5803866386413574,
204
+ "learning_rate": 0.00017010752688172045,
205
+ "loss": 1.11,
206
+ "step": 140
207
+ },
208
+ {
209
+ "epoch": 0.7795698924731183,
210
+ "grad_norm": 0.5652913451194763,
211
+ "learning_rate": 0.00016903225806451616,
212
+ "loss": 1.0724,
213
+ "step": 145
214
+ },
215
+ {
216
+ "epoch": 0.8064516129032258,
217
+ "grad_norm": 0.5393310785293579,
218
+ "learning_rate": 0.00016795698924731184,
219
+ "loss": 1.0111,
220
+ "step": 150
221
+ },
222
+ {
223
+ "epoch": 0.8333333333333334,
224
+ "grad_norm": 0.6877985000610352,
225
+ "learning_rate": 0.00016688172043010751,
226
+ "loss": 0.986,
227
+ "step": 155
228
+ },
229
+ {
230
+ "epoch": 0.8602150537634409,
231
+ "grad_norm": 0.7941627502441406,
232
+ "learning_rate": 0.00016580645161290322,
233
+ "loss": 0.9341,
234
+ "step": 160
235
+ },
236
+ {
237
+ "epoch": 0.8870967741935484,
238
+ "grad_norm": 0.635101318359375,
239
+ "learning_rate": 0.00016473118279569893,
240
+ "loss": 0.9572,
241
+ "step": 165
242
+ },
243
+ {
244
+ "epoch": 0.9139784946236559,
245
+ "grad_norm": 0.8213858604431152,
246
+ "learning_rate": 0.00016365591397849463,
247
+ "loss": 0.9316,
248
+ "step": 170
249
+ },
250
+ {
251
+ "epoch": 0.9408602150537635,
252
+ "grad_norm": 0.5716636776924133,
253
+ "learning_rate": 0.00016258064516129034,
254
+ "loss": 0.8542,
255
+ "step": 175
256
+ },
257
+ {
258
+ "epoch": 0.967741935483871,
259
+ "grad_norm": 0.4731377363204956,
260
+ "learning_rate": 0.00016150537634408601,
261
+ "loss": 0.8484,
262
+ "step": 180
263
+ },
264
+ {
265
+ "epoch": 0.9946236559139785,
266
+ "grad_norm": 0.49750402569770813,
267
+ "learning_rate": 0.00016043010752688172,
268
+ "loss": 0.8229,
269
+ "step": 185
270
+ },
271
+ {
272
+ "epoch": 1.021505376344086,
273
+ "grad_norm": 0.4677596092224121,
274
+ "learning_rate": 0.00015935483870967743,
275
+ "loss": 0.8301,
276
+ "step": 190
277
+ },
278
+ {
279
+ "epoch": 1.0483870967741935,
280
+ "grad_norm": 0.5717447996139526,
281
+ "learning_rate": 0.00015827956989247313,
282
+ "loss": 0.7741,
283
+ "step": 195
284
+ },
285
+ {
286
+ "epoch": 1.075268817204301,
287
+ "grad_norm": 0.5728289484977722,
288
+ "learning_rate": 0.0001572043010752688,
289
+ "loss": 0.7707,
290
+ "step": 200
291
+ },
292
+ {
293
+ "epoch": 1.1021505376344085,
294
+ "grad_norm": 0.5678717494010925,
295
+ "learning_rate": 0.00015612903225806451,
296
+ "loss": 0.765,
297
+ "step": 205
298
+ },
299
+ {
300
+ "epoch": 1.129032258064516,
301
+ "grad_norm": 0.6754461526870728,
302
+ "learning_rate": 0.00015505376344086022,
303
+ "loss": 0.7611,
304
+ "step": 210
305
+ },
306
+ {
307
+ "epoch": 1.1559139784946237,
308
+ "grad_norm": 0.9893506169319153,
309
+ "learning_rate": 0.00015397849462365593,
310
+ "loss": 0.7654,
311
+ "step": 215
312
+ },
313
+ {
314
+ "epoch": 1.1827956989247312,
315
+ "grad_norm": 0.6708158254623413,
316
+ "learning_rate": 0.00015290322580645163,
317
+ "loss": 0.7437,
318
+ "step": 220
319
+ },
320
+ {
321
+ "epoch": 1.2096774193548387,
322
+ "grad_norm": 0.5096806883811951,
323
+ "learning_rate": 0.0001518279569892473,
324
+ "loss": 0.7001,
325
+ "step": 225
326
+ },
327
+ {
328
+ "epoch": 1.2365591397849462,
329
+ "grad_norm": 0.5710135102272034,
330
+ "learning_rate": 0.00015075268817204301,
331
+ "loss": 0.7156,
332
+ "step": 230
333
+ },
334
+ {
335
+ "epoch": 1.2634408602150538,
336
+ "grad_norm": 0.4659282863140106,
337
+ "learning_rate": 0.00014967741935483872,
338
+ "loss": 0.6673,
339
+ "step": 235
340
+ },
341
+ {
342
+ "epoch": 1.2903225806451613,
343
+ "grad_norm": 0.6818357706069946,
344
+ "learning_rate": 0.00014860215053763443,
345
+ "loss": 0.6982,
346
+ "step": 240
347
+ },
348
+ {
349
+ "epoch": 1.3172043010752688,
350
+ "grad_norm": 0.6473487615585327,
351
+ "learning_rate": 0.00014752688172043013,
352
+ "loss": 0.6886,
353
+ "step": 245
354
+ },
355
+ {
356
+ "epoch": 1.3440860215053765,
357
+ "grad_norm": 0.5552868247032166,
358
+ "learning_rate": 0.0001464516129032258,
359
+ "loss": 0.6913,
360
+ "step": 250
361
+ },
362
+ {
363
+ "epoch": 1.370967741935484,
364
+ "grad_norm": 0.5745120048522949,
365
+ "learning_rate": 0.00014537634408602151,
366
+ "loss": 0.6605,
367
+ "step": 255
368
+ },
369
+ {
370
+ "epoch": 1.3978494623655915,
371
+ "grad_norm": 0.4570547938346863,
372
+ "learning_rate": 0.00014430107526881722,
373
+ "loss": 0.6525,
374
+ "step": 260
375
+ },
376
+ {
377
+ "epoch": 1.424731182795699,
378
+ "grad_norm": 0.5155510902404785,
379
+ "learning_rate": 0.00014322580645161293,
380
+ "loss": 0.6236,
381
+ "step": 265
382
+ },
383
+ {
384
+ "epoch": 1.4516129032258065,
385
+ "grad_norm": 0.5411310195922852,
386
+ "learning_rate": 0.0001421505376344086,
387
+ "loss": 0.6688,
388
+ "step": 270
389
+ },
390
+ {
391
+ "epoch": 1.478494623655914,
392
+ "grad_norm": 0.5378429293632507,
393
+ "learning_rate": 0.0001410752688172043,
394
+ "loss": 0.6166,
395
+ "step": 275
396
+ },
397
+ {
398
+ "epoch": 1.5053763440860215,
399
+ "grad_norm": 0.6501957178115845,
400
+ "learning_rate": 0.00014,
401
+ "loss": 0.5889,
402
+ "step": 280
403
+ },
404
+ {
405
+ "epoch": 1.532258064516129,
406
+ "grad_norm": 0.5574774146080017,
407
+ "learning_rate": 0.0001389247311827957,
408
+ "loss": 0.6529,
409
+ "step": 285
410
+ },
411
+ {
412
+ "epoch": 1.5591397849462365,
413
+ "grad_norm": 0.4855782091617584,
414
+ "learning_rate": 0.0001378494623655914,
415
+ "loss": 0.6092,
416
+ "step": 290
417
+ },
418
+ {
419
+ "epoch": 1.586021505376344,
420
+ "grad_norm": 0.5358579754829407,
421
+ "learning_rate": 0.0001367741935483871,
422
+ "loss": 0.5836,
423
+ "step": 295
424
+ },
425
+ {
426
+ "epoch": 1.6129032258064515,
427
+ "grad_norm": 0.4744930565357208,
428
+ "learning_rate": 0.0001356989247311828,
429
+ "loss": 0.5813,
430
+ "step": 300
431
+ },
432
+ {
433
+ "epoch": 1.639784946236559,
434
+ "grad_norm": 0.5719123482704163,
435
+ "learning_rate": 0.0001346236559139785,
436
+ "loss": 0.6039,
437
+ "step": 305
438
+ },
439
+ {
440
+ "epoch": 1.6666666666666665,
441
+ "grad_norm": 0.49337533116340637,
442
+ "learning_rate": 0.0001335483870967742,
443
+ "loss": 0.5614,
444
+ "step": 310
445
+ },
446
+ {
447
+ "epoch": 1.6935483870967742,
448
+ "grad_norm": 0.6838189363479614,
449
+ "learning_rate": 0.0001324731182795699,
450
+ "loss": 0.5619,
451
+ "step": 315
452
+ },
453
+ {
454
+ "epoch": 1.7204301075268817,
455
+ "grad_norm": 0.5160232782363892,
456
+ "learning_rate": 0.0001313978494623656,
457
+ "loss": 0.5811,
458
+ "step": 320
459
+ },
460
+ {
461
+ "epoch": 1.7473118279569892,
462
+ "grad_norm": 0.8333256840705872,
463
+ "learning_rate": 0.0001303225806451613,
464
+ "loss": 0.5771,
465
+ "step": 325
466
+ },
467
+ {
468
+ "epoch": 1.7741935483870968,
469
+ "grad_norm": 0.9703850746154785,
470
+ "learning_rate": 0.000129247311827957,
471
+ "loss": 0.578,
472
+ "step": 330
473
+ },
474
+ {
475
+ "epoch": 1.8010752688172043,
476
+ "grad_norm": 0.6774953007698059,
477
+ "learning_rate": 0.0001281720430107527,
478
+ "loss": 0.5738,
479
+ "step": 335
480
+ },
481
+ {
482
+ "epoch": 1.827956989247312,
483
+ "grad_norm": 0.6810261011123657,
484
+ "learning_rate": 0.0001270967741935484,
485
+ "loss": 0.5823,
486
+ "step": 340
487
+ },
488
+ {
489
+ "epoch": 1.8548387096774195,
490
+ "grad_norm": 0.8924374580383301,
491
+ "learning_rate": 0.0001260215053763441,
492
+ "loss": 0.5535,
493
+ "step": 345
494
+ },
495
+ {
496
+ "epoch": 1.881720430107527,
497
+ "grad_norm": 0.7481217384338379,
498
+ "learning_rate": 0.00012494623655913978,
499
+ "loss": 0.5537,
500
+ "step": 350
501
+ },
502
+ {
503
+ "epoch": 1.9086021505376345,
504
+ "grad_norm": 0.8031575679779053,
505
+ "learning_rate": 0.0001238709677419355,
506
+ "loss": 0.538,
507
+ "step": 355
508
+ },
509
+ {
510
+ "epoch": 1.935483870967742,
511
+ "grad_norm": 1.2430750131607056,
512
+ "learning_rate": 0.0001227956989247312,
513
+ "loss": 0.5727,
514
+ "step": 360
515
+ },
516
+ {
517
+ "epoch": 1.9623655913978495,
518
+ "grad_norm": 0.8606559038162231,
519
+ "learning_rate": 0.0001217204301075269,
520
+ "loss": 0.5603,
521
+ "step": 365
522
+ },
523
+ {
524
+ "epoch": 1.989247311827957,
525
+ "grad_norm": 0.9421748518943787,
526
+ "learning_rate": 0.00012064516129032259,
527
+ "loss": 0.5462,
528
+ "step": 370
529
+ },
530
+ {
531
+ "epoch": 2.0161290322580645,
532
+ "grad_norm": 0.9712075591087341,
533
+ "learning_rate": 0.0001195698924731183,
534
+ "loss": 0.4992,
535
+ "step": 375
536
+ },
537
+ {
538
+ "epoch": 2.043010752688172,
539
+ "grad_norm": 0.6209003329277039,
540
+ "learning_rate": 0.00011849462365591399,
541
+ "loss": 0.5138,
542
+ "step": 380
543
+ },
544
+ {
545
+ "epoch": 2.0698924731182795,
546
+ "grad_norm": 1.1672589778900146,
547
+ "learning_rate": 0.00011741935483870967,
548
+ "loss": 0.5218,
549
+ "step": 385
550
+ },
551
+ {
552
+ "epoch": 2.096774193548387,
553
+ "grad_norm": 1.7826071977615356,
554
+ "learning_rate": 0.00011634408602150537,
555
+ "loss": 0.5599,
556
+ "step": 390
557
+ },
558
+ {
559
+ "epoch": 2.1236559139784945,
560
+ "grad_norm": 1.728200078010559,
561
+ "learning_rate": 0.00011526881720430108,
562
+ "loss": 0.4717,
563
+ "step": 395
564
+ },
565
+ {
566
+ "epoch": 2.150537634408602,
567
+ "grad_norm": 2.514089584350586,
568
+ "learning_rate": 0.00011419354838709677,
569
+ "loss": 0.527,
570
+ "step": 400
571
+ },
572
+ {
573
+ "epoch": 2.1774193548387095,
574
+ "grad_norm": 0.7007973790168762,
575
+ "learning_rate": 0.00011311827956989247,
576
+ "loss": 0.5024,
577
+ "step": 405
578
+ },
579
+ {
580
+ "epoch": 2.204301075268817,
581
+ "grad_norm": 0.7418361306190491,
582
+ "learning_rate": 0.00011204301075268817,
583
+ "loss": 0.4906,
584
+ "step": 410
585
+ },
586
+ {
587
+ "epoch": 2.2311827956989245,
588
+ "grad_norm": 1.0487796068191528,
589
+ "learning_rate": 0.00011096774193548387,
590
+ "loss": 0.4894,
591
+ "step": 415
592
+ },
593
+ {
594
+ "epoch": 2.258064516129032,
595
+ "grad_norm": 2.1650989055633545,
596
+ "learning_rate": 0.00010989247311827958,
597
+ "loss": 0.479,
598
+ "step": 420
599
+ },
600
+ {
601
+ "epoch": 2.28494623655914,
602
+ "grad_norm": 1.3648617267608643,
603
+ "learning_rate": 0.00010881720430107527,
604
+ "loss": 0.4844,
605
+ "step": 425
606
+ },
607
+ {
608
+ "epoch": 2.3118279569892475,
609
+ "grad_norm": 1.1497910022735596,
610
+ "learning_rate": 0.00010774193548387097,
611
+ "loss": 0.4692,
612
+ "step": 430
613
+ },
614
+ {
615
+ "epoch": 2.338709677419355,
616
+ "grad_norm": 0.6426465511322021,
617
+ "learning_rate": 0.00010666666666666667,
618
+ "loss": 0.496,
619
+ "step": 435
620
+ },
621
+ {
622
+ "epoch": 2.3655913978494625,
623
+ "grad_norm": 1.3800503015518188,
624
+ "learning_rate": 0.00010559139784946237,
625
+ "loss": 0.4621,
626
+ "step": 440
627
+ },
628
+ {
629
+ "epoch": 2.39247311827957,
630
+ "grad_norm": 1.3282463550567627,
631
+ "learning_rate": 0.00010451612903225806,
632
+ "loss": 0.4537,
633
+ "step": 445
634
+ },
635
+ {
636
+ "epoch": 2.4193548387096775,
637
+ "grad_norm": 0.9986529350280762,
638
+ "learning_rate": 0.00010344086021505377,
639
+ "loss": 0.4602,
640
+ "step": 450
641
+ },
642
+ {
643
+ "epoch": 2.446236559139785,
644
+ "grad_norm": 1.266798496246338,
645
+ "learning_rate": 0.00010236559139784947,
646
+ "loss": 0.4784,
647
+ "step": 455
648
+ },
649
+ {
650
+ "epoch": 2.4731182795698925,
651
+ "grad_norm": 1.2241142988204956,
652
+ "learning_rate": 0.00010129032258064517,
653
+ "loss": 0.4332,
654
+ "step": 460
655
+ },
656
+ {
657
+ "epoch": 2.5,
658
+ "grad_norm": 1.4897130727767944,
659
+ "learning_rate": 0.00010021505376344087,
660
+ "loss": 0.4274,
661
+ "step": 465
662
+ },
663
+ {
664
+ "epoch": 2.5268817204301075,
665
+ "grad_norm": 1.2320371866226196,
666
+ "learning_rate": 9.913978494623656e-05,
667
+ "loss": 0.4431,
668
+ "step": 470
669
+ },
670
+ {
671
+ "epoch": 2.553763440860215,
672
+ "grad_norm": 1.0926082134246826,
673
+ "learning_rate": 9.806451612903226e-05,
674
+ "loss": 0.4504,
675
+ "step": 475
676
+ },
677
+ {
678
+ "epoch": 2.5806451612903225,
679
+ "grad_norm": 0.7217853665351868,
680
+ "learning_rate": 9.698924731182796e-05,
681
+ "loss": 0.4533,
682
+ "step": 480
683
+ },
684
+ {
685
+ "epoch": 2.60752688172043,
686
+ "grad_norm": 1.0090914964675903,
687
+ "learning_rate": 9.591397849462365e-05,
688
+ "loss": 0.3997,
689
+ "step": 485
690
+ },
691
+ {
692
+ "epoch": 2.6344086021505375,
693
+ "grad_norm": 1.9857970476150513,
694
+ "learning_rate": 9.483870967741936e-05,
695
+ "loss": 0.4439,
696
+ "step": 490
697
+ },
698
+ {
699
+ "epoch": 2.661290322580645,
700
+ "grad_norm": 0.9593686461448669,
701
+ "learning_rate": 9.376344086021506e-05,
702
+ "loss": 0.4554,
703
+ "step": 495
704
+ },
705
+ {
706
+ "epoch": 2.688172043010753,
707
+ "grad_norm": 0.6287310123443604,
708
+ "learning_rate": 9.268817204301076e-05,
709
+ "loss": 0.4206,
710
+ "step": 500
711
+ },
712
+ {
713
+ "epoch": 2.71505376344086,
714
+ "grad_norm": 0.6817049980163574,
715
+ "learning_rate": 9.161290322580646e-05,
716
+ "loss": 0.4118,
717
+ "step": 505
718
+ },
719
+ {
720
+ "epoch": 2.741935483870968,
721
+ "grad_norm": 2.3625309467315674,
722
+ "learning_rate": 9.053763440860215e-05,
723
+ "loss": 0.4382,
724
+ "step": 510
725
+ },
726
+ {
727
+ "epoch": 2.768817204301075,
728
+ "grad_norm": 1.37618088722229,
729
+ "learning_rate": 8.946236559139786e-05,
730
+ "loss": 0.4092,
731
+ "step": 515
732
+ },
733
+ {
734
+ "epoch": 2.795698924731183,
735
+ "grad_norm": 2.8771934509277344,
736
+ "learning_rate": 8.838709677419355e-05,
737
+ "loss": 0.4403,
738
+ "step": 520
739
+ },
740
+ {
741
+ "epoch": 2.8225806451612905,
742
+ "grad_norm": 0.670045793056488,
743
+ "learning_rate": 8.731182795698926e-05,
744
+ "loss": 0.3922,
745
+ "step": 525
746
+ },
747
+ {
748
+ "epoch": 2.849462365591398,
749
+ "grad_norm": 0.6818873286247253,
750
+ "learning_rate": 8.623655913978495e-05,
751
+ "loss": 0.3811,
752
+ "step": 530
753
+ },
754
+ {
755
+ "epoch": 2.8763440860215055,
756
+ "grad_norm": 0.5645846128463745,
757
+ "learning_rate": 8.516129032258064e-05,
758
+ "loss": 0.3923,
759
+ "step": 535
760
+ },
761
+ {
762
+ "epoch": 2.903225806451613,
763
+ "grad_norm": 0.8240963816642761,
764
+ "learning_rate": 8.408602150537634e-05,
765
+ "loss": 0.3824,
766
+ "step": 540
767
+ },
768
+ {
769
+ "epoch": 2.9301075268817205,
770
+ "grad_norm": 0.8166781067848206,
771
+ "learning_rate": 8.301075268817205e-05,
772
+ "loss": 0.3977,
773
+ "step": 545
774
+ },
775
+ {
776
+ "epoch": 2.956989247311828,
777
+ "grad_norm": 0.6839639544487,
778
+ "learning_rate": 8.193548387096774e-05,
779
+ "loss": 0.3778,
780
+ "step": 550
781
+ },
782
+ {
783
+ "epoch": 2.9838709677419355,
784
+ "grad_norm": 0.6171432733535767,
785
+ "learning_rate": 8.086021505376345e-05,
786
+ "loss": 0.4205,
787
+ "step": 555
788
+ },
789
+ {
790
+ "epoch": 3.010752688172043,
791
+ "grad_norm": 0.6487573981285095,
792
+ "learning_rate": 7.978494623655914e-05,
793
+ "loss": 0.3774,
794
+ "step": 560
795
+ },
796
+ {
797
+ "epoch": 3.0376344086021505,
798
+ "grad_norm": 0.8863518834114075,
799
+ "learning_rate": 7.870967741935484e-05,
800
+ "loss": 0.3636,
801
+ "step": 565
802
+ },
803
+ {
804
+ "epoch": 3.064516129032258,
805
+ "grad_norm": 1.1676126718521118,
806
+ "learning_rate": 7.763440860215054e-05,
807
+ "loss": 0.3438,
808
+ "step": 570
809
+ },
810
+ {
811
+ "epoch": 3.0913978494623655,
812
+ "grad_norm": 1.3498252630233765,
813
+ "learning_rate": 7.655913978494624e-05,
814
+ "loss": 0.3812,
815
+ "step": 575
816
+ },
817
+ {
818
+ "epoch": 3.118279569892473,
819
+ "grad_norm": 0.8388305306434631,
820
+ "learning_rate": 7.548387096774195e-05,
821
+ "loss": 0.3551,
822
+ "step": 580
823
+ },
824
+ {
825
+ "epoch": 3.1451612903225805,
826
+ "grad_norm": 0.8704394698143005,
827
+ "learning_rate": 7.440860215053764e-05,
828
+ "loss": 0.3663,
829
+ "step": 585
830
+ },
831
+ {
832
+ "epoch": 3.172043010752688,
833
+ "grad_norm": 0.7780643701553345,
834
+ "learning_rate": 7.333333333333333e-05,
835
+ "loss": 0.379,
836
+ "step": 590
837
+ },
838
+ {
839
+ "epoch": 3.1989247311827955,
840
+ "grad_norm": 0.6339101195335388,
841
+ "learning_rate": 7.225806451612904e-05,
842
+ "loss": 0.3696,
843
+ "step": 595
844
+ },
845
+ {
846
+ "epoch": 3.225806451612903,
847
+ "grad_norm": 0.7547860145568848,
848
+ "learning_rate": 7.118279569892473e-05,
849
+ "loss": 0.3595,
850
+ "step": 600
851
+ },
852
+ {
853
+ "epoch": 3.252688172043011,
854
+ "grad_norm": 0.6610727310180664,
855
+ "learning_rate": 7.010752688172043e-05,
856
+ "loss": 0.3806,
857
+ "step": 605
858
+ },
859
+ {
860
+ "epoch": 3.279569892473118,
861
+ "grad_norm": 0.9694567322731018,
862
+ "learning_rate": 6.903225806451613e-05,
863
+ "loss": 0.4239,
864
+ "step": 610
865
+ },
866
+ {
867
+ "epoch": 3.306451612903226,
868
+ "grad_norm": 0.8362919688224792,
869
+ "learning_rate": 6.795698924731183e-05,
870
+ "loss": 0.3472,
871
+ "step": 615
872
+ },
873
+ {
874
+ "epoch": 3.3333333333333335,
875
+ "grad_norm": 1.2766687870025635,
876
+ "learning_rate": 6.688172043010754e-05,
877
+ "loss": 0.3855,
878
+ "step": 620
879
+ },
880
+ {
881
+ "epoch": 3.360215053763441,
882
+ "grad_norm": 2.0830249786376953,
883
+ "learning_rate": 6.580645161290323e-05,
884
+ "loss": 0.3528,
885
+ "step": 625
886
+ },
887
+ {
888
+ "epoch": 3.3870967741935485,
889
+ "grad_norm": 0.6089420914649963,
890
+ "learning_rate": 6.473118279569893e-05,
891
+ "loss": 0.339,
892
+ "step": 630
893
+ },
894
+ {
895
+ "epoch": 3.413978494623656,
896
+ "grad_norm": 0.8975178599357605,
897
+ "learning_rate": 6.365591397849463e-05,
898
+ "loss": 0.3466,
899
+ "step": 635
900
+ },
901
+ {
902
+ "epoch": 3.4408602150537635,
903
+ "grad_norm": 0.8945618867874146,
904
+ "learning_rate": 6.258064516129033e-05,
905
+ "loss": 0.3557,
906
+ "step": 640
907
+ },
908
+ {
909
+ "epoch": 3.467741935483871,
910
+ "grad_norm": 0.6987658143043518,
911
+ "learning_rate": 6.150537634408602e-05,
912
+ "loss": 0.3398,
913
+ "step": 645
914
+ },
915
+ {
916
+ "epoch": 3.4946236559139785,
917
+ "grad_norm": 0.6438629627227783,
918
+ "learning_rate": 6.0430107526881715e-05,
919
+ "loss": 0.3298,
920
+ "step": 650
921
+ },
922
+ {
923
+ "epoch": 3.521505376344086,
924
+ "grad_norm": 0.9585652351379395,
925
+ "learning_rate": 5.935483870967742e-05,
926
+ "loss": 0.3944,
927
+ "step": 655
928
+ },
929
+ {
930
+ "epoch": 3.5483870967741935,
931
+ "grad_norm": 0.8226910829544067,
932
+ "learning_rate": 5.827956989247312e-05,
933
+ "loss": 0.3783,
934
+ "step": 660
935
+ },
936
+ {
937
+ "epoch": 3.575268817204301,
938
+ "grad_norm": 0.6199684739112854,
939
+ "learning_rate": 5.720430107526882e-05,
940
+ "loss": 0.3262,
941
+ "step": 665
942
+ },
943
+ {
944
+ "epoch": 3.6021505376344085,
945
+ "grad_norm": 0.8326342105865479,
946
+ "learning_rate": 5.612903225806452e-05,
947
+ "loss": 0.3152,
948
+ "step": 670
949
+ },
950
+ {
951
+ "epoch": 3.629032258064516,
952
+ "grad_norm": 1.2666329145431519,
953
+ "learning_rate": 5.5053763440860215e-05,
954
+ "loss": 0.3958,
955
+ "step": 675
956
+ },
957
+ {
958
+ "epoch": 3.6559139784946235,
959
+ "grad_norm": 0.6879651546478271,
960
+ "learning_rate": 5.397849462365592e-05,
961
+ "loss": 0.3684,
962
+ "step": 680
963
+ },
964
+ {
965
+ "epoch": 3.682795698924731,
966
+ "grad_norm": 0.789516031742096,
967
+ "learning_rate": 5.290322580645162e-05,
968
+ "loss": 0.3575,
969
+ "step": 685
970
+ },
971
+ {
972
+ "epoch": 3.709677419354839,
973
+ "grad_norm": 0.7152576446533203,
974
+ "learning_rate": 5.182795698924732e-05,
975
+ "loss": 0.3048,
976
+ "step": 690
977
+ },
978
+ {
979
+ "epoch": 3.736559139784946,
980
+ "grad_norm": 0.6960220336914062,
981
+ "learning_rate": 5.075268817204302e-05,
982
+ "loss": 0.3395,
983
+ "step": 695
984
+ },
985
+ {
986
+ "epoch": 3.763440860215054,
987
+ "grad_norm": 0.66648930311203,
988
+ "learning_rate": 4.967741935483871e-05,
989
+ "loss": 0.3475,
990
+ "step": 700
991
+ },
992
+ {
993
+ "epoch": 3.790322580645161,
994
+ "grad_norm": 0.711923360824585,
995
+ "learning_rate": 4.8602150537634414e-05,
996
+ "loss": 0.327,
997
+ "step": 705
998
+ },
999
+ {
1000
+ "epoch": 3.817204301075269,
1001
+ "grad_norm": 1.0754563808441162,
1002
+ "learning_rate": 4.752688172043011e-05,
1003
+ "loss": 0.3554,
1004
+ "step": 710
1005
+ },
1006
+ {
1007
+ "epoch": 3.8440860215053765,
1008
+ "grad_norm": 0.6272746324539185,
1009
+ "learning_rate": 4.645161290322581e-05,
1010
+ "loss": 0.3262,
1011
+ "step": 715
1012
+ },
1013
+ {
1014
+ "epoch": 3.870967741935484,
1015
+ "grad_norm": 0.6247246861457825,
1016
+ "learning_rate": 4.53763440860215e-05,
1017
+ "loss": 0.3311,
1018
+ "step": 720
1019
+ },
1020
+ {
1021
+ "epoch": 3.8978494623655915,
1022
+ "grad_norm": 0.9296492338180542,
1023
+ "learning_rate": 4.43010752688172e-05,
1024
+ "loss": 0.3433,
1025
+ "step": 725
1026
+ },
1027
+ {
1028
+ "epoch": 3.924731182795699,
1029
+ "grad_norm": 1.0945050716400146,
1030
+ "learning_rate": 4.322580645161291e-05,
1031
+ "loss": 0.3295,
1032
+ "step": 730
1033
+ },
1034
+ {
1035
+ "epoch": 3.9516129032258065,
1036
+ "grad_norm": 0.840998649597168,
1037
+ "learning_rate": 4.2150537634408606e-05,
1038
+ "loss": 0.2839,
1039
+ "step": 735
1040
+ },
1041
+ {
1042
+ "epoch": 3.978494623655914,
1043
+ "grad_norm": 0.6789965629577637,
1044
+ "learning_rate": 4.1075268817204305e-05,
1045
+ "loss": 0.3398,
1046
+ "step": 740
1047
+ },
1048
+ {
1049
+ "epoch": 4.005376344086022,
1050
+ "grad_norm": 0.6800040602684021,
1051
+ "learning_rate": 4e-05,
1052
+ "loss": 0.3378,
1053
+ "step": 745
1054
+ },
1055
+ {
1056
+ "epoch": 4.032258064516129,
1057
+ "grad_norm": 0.9894293546676636,
1058
+ "learning_rate": 3.8924731182795695e-05,
1059
+ "loss": 0.334,
1060
+ "step": 750
1061
+ },
1062
+ {
1063
+ "epoch": 4.059139784946237,
1064
+ "grad_norm": 0.6852840185165405,
1065
+ "learning_rate": 3.78494623655914e-05,
1066
+ "loss": 0.3291,
1067
+ "step": 755
1068
+ },
1069
+ {
1070
+ "epoch": 4.086021505376344,
1071
+ "grad_norm": 0.9194632768630981,
1072
+ "learning_rate": 3.67741935483871e-05,
1073
+ "loss": 0.346,
1074
+ "step": 760
1075
+ },
1076
+ {
1077
+ "epoch": 4.112903225806452,
1078
+ "grad_norm": 0.7930384278297424,
1079
+ "learning_rate": 3.56989247311828e-05,
1080
+ "loss": 0.3765,
1081
+ "step": 765
1082
+ },
1083
+ {
1084
+ "epoch": 4.139784946236559,
1085
+ "grad_norm": 0.6173940896987915,
1086
+ "learning_rate": 3.4623655913978497e-05,
1087
+ "loss": 0.2881,
1088
+ "step": 770
1089
+ },
1090
+ {
1091
+ "epoch": 4.166666666666667,
1092
+ "grad_norm": 1.0658806562423706,
1093
+ "learning_rate": 3.3548387096774195e-05,
1094
+ "loss": 0.3393,
1095
+ "step": 775
1096
+ },
1097
+ {
1098
+ "epoch": 4.193548387096774,
1099
+ "grad_norm": 0.9105071425437927,
1100
+ "learning_rate": 3.2473118279569894e-05,
1101
+ "loss": 0.2922,
1102
+ "step": 780
1103
+ },
1104
+ {
1105
+ "epoch": 4.220430107526882,
1106
+ "grad_norm": 0.7287763953208923,
1107
+ "learning_rate": 3.139784946236559e-05,
1108
+ "loss": 0.3196,
1109
+ "step": 785
1110
+ },
1111
+ {
1112
+ "epoch": 4.247311827956989,
1113
+ "grad_norm": 0.692533552646637,
1114
+ "learning_rate": 3.032258064516129e-05,
1115
+ "loss": 0.2845,
1116
+ "step": 790
1117
+ },
1118
+ {
1119
+ "epoch": 4.274193548387097,
1120
+ "grad_norm": 0.8192632794380188,
1121
+ "learning_rate": 2.924731182795699e-05,
1122
+ "loss": 0.2845,
1123
+ "step": 795
1124
+ },
1125
+ {
1126
+ "epoch": 4.301075268817204,
1127
+ "grad_norm": 0.9781789779663086,
1128
+ "learning_rate": 2.8172043010752692e-05,
1129
+ "loss": 0.3508,
1130
+ "step": 800
1131
+ },
1132
+ {
1133
+ "epoch": 4.327956989247312,
1134
+ "grad_norm": 0.6340736746788025,
1135
+ "learning_rate": 2.709677419354839e-05,
1136
+ "loss": 0.3522,
1137
+ "step": 805
1138
+ },
1139
+ {
1140
+ "epoch": 4.354838709677419,
1141
+ "grad_norm": 0.7697390913963318,
1142
+ "learning_rate": 2.6021505376344086e-05,
1143
+ "loss": 0.2812,
1144
+ "step": 810
1145
+ },
1146
+ {
1147
+ "epoch": 4.381720430107527,
1148
+ "grad_norm": 1.2623796463012695,
1149
+ "learning_rate": 2.4946236559139784e-05,
1150
+ "loss": 0.3291,
1151
+ "step": 815
1152
+ },
1153
+ {
1154
+ "epoch": 4.408602150537634,
1155
+ "grad_norm": 0.5058888792991638,
1156
+ "learning_rate": 2.3870967741935486e-05,
1157
+ "loss": 0.2865,
1158
+ "step": 820
1159
+ },
1160
+ {
1161
+ "epoch": 4.435483870967742,
1162
+ "grad_norm": 0.6579411029815674,
1163
+ "learning_rate": 2.2795698924731185e-05,
1164
+ "loss": 0.314,
1165
+ "step": 825
1166
+ },
1167
+ {
1168
+ "epoch": 4.462365591397849,
1169
+ "grad_norm": 0.794482946395874,
1170
+ "learning_rate": 2.172043010752688e-05,
1171
+ "loss": 0.3424,
1172
+ "step": 830
1173
+ },
1174
+ {
1175
+ "epoch": 4.489247311827957,
1176
+ "grad_norm": 0.7736402750015259,
1177
+ "learning_rate": 2.0645161290322582e-05,
1178
+ "loss": 0.3459,
1179
+ "step": 835
1180
+ },
1181
+ {
1182
+ "epoch": 4.516129032258064,
1183
+ "grad_norm": 0.8923993706703186,
1184
+ "learning_rate": 1.956989247311828e-05,
1185
+ "loss": 0.3199,
1186
+ "step": 840
1187
+ },
1188
+ {
1189
+ "epoch": 4.543010752688172,
1190
+ "grad_norm": 0.6480904221534729,
1191
+ "learning_rate": 1.849462365591398e-05,
1192
+ "loss": 0.3114,
1193
+ "step": 845
1194
+ },
1195
+ {
1196
+ "epoch": 4.56989247311828,
1197
+ "grad_norm": 0.7036060094833374,
1198
+ "learning_rate": 1.741935483870968e-05,
1199
+ "loss": 0.2878,
1200
+ "step": 850
1201
+ },
1202
+ {
1203
+ "epoch": 4.596774193548387,
1204
+ "grad_norm": 0.9546545743942261,
1205
+ "learning_rate": 1.6344086021505377e-05,
1206
+ "loss": 0.3354,
1207
+ "step": 855
1208
+ },
1209
+ {
1210
+ "epoch": 4.623655913978495,
1211
+ "grad_norm": 0.8179369568824768,
1212
+ "learning_rate": 1.5268817204301076e-05,
1213
+ "loss": 0.3203,
1214
+ "step": 860
1215
+ },
1216
+ {
1217
+ "epoch": 4.650537634408602,
1218
+ "grad_norm": 0.7543355822563171,
1219
+ "learning_rate": 1.4193548387096774e-05,
1220
+ "loss": 0.3204,
1221
+ "step": 865
1222
+ },
1223
+ {
1224
+ "epoch": 4.67741935483871,
1225
+ "grad_norm": 1.0491559505462646,
1226
+ "learning_rate": 1.3118279569892475e-05,
1227
+ "loss": 0.3376,
1228
+ "step": 870
1229
+ },
1230
+ {
1231
+ "epoch": 4.704301075268817,
1232
+ "grad_norm": 0.6792603135108948,
1233
+ "learning_rate": 1.2043010752688173e-05,
1234
+ "loss": 0.297,
1235
+ "step": 875
1236
+ },
1237
+ {
1238
+ "epoch": 4.731182795698925,
1239
+ "grad_norm": 0.711181640625,
1240
+ "learning_rate": 1.0967741935483872e-05,
1241
+ "loss": 0.32,
1242
+ "step": 880
1243
+ },
1244
+ {
1245
+ "epoch": 4.758064516129032,
1246
+ "grad_norm": 0.7287176847457886,
1247
+ "learning_rate": 9.89247311827957e-06,
1248
+ "loss": 0.3097,
1249
+ "step": 885
1250
+ },
1251
+ {
1252
+ "epoch": 4.78494623655914,
1253
+ "grad_norm": 0.8330285549163818,
1254
+ "learning_rate": 8.81720430107527e-06,
1255
+ "loss": 0.3242,
1256
+ "step": 890
1257
+ },
1258
+ {
1259
+ "epoch": 4.811827956989247,
1260
+ "grad_norm": 0.6423484086990356,
1261
+ "learning_rate": 7.741935483870968e-06,
1262
+ "loss": 0.3087,
1263
+ "step": 895
1264
+ },
1265
+ {
1266
+ "epoch": 4.838709677419355,
1267
+ "grad_norm": 1.1993571519851685,
1268
+ "learning_rate": 6.666666666666667e-06,
1269
+ "loss": 0.3257,
1270
+ "step": 900
1271
+ },
1272
+ {
1273
+ "epoch": 4.865591397849462,
1274
+ "grad_norm": 0.711089551448822,
1275
+ "learning_rate": 5.591397849462366e-06,
1276
+ "loss": 0.2974,
1277
+ "step": 905
1278
+ },
1279
+ {
1280
+ "epoch": 4.89247311827957,
1281
+ "grad_norm": 0.7673178315162659,
1282
+ "learning_rate": 4.516129032258065e-06,
1283
+ "loss": 0.3056,
1284
+ "step": 910
1285
+ },
1286
+ {
1287
+ "epoch": 4.919354838709677,
1288
+ "grad_norm": 0.5399670004844666,
1289
+ "learning_rate": 3.4408602150537635e-06,
1290
+ "loss": 0.2649,
1291
+ "step": 915
1292
+ },
1293
+ {
1294
+ "epoch": 4.946236559139785,
1295
+ "grad_norm": 0.7354225516319275,
1296
+ "learning_rate": 2.3655913978494625e-06,
1297
+ "loss": 0.3402,
1298
+ "step": 920
1299
+ },
1300
+ {
1301
+ "epoch": 4.973118279569892,
1302
+ "grad_norm": 0.9460377097129822,
1303
+ "learning_rate": 1.2903225806451614e-06,
1304
+ "loss": 0.3072,
1305
+ "step": 925
1306
+ },
1307
+ {
1308
+ "epoch": 5.0,
1309
+ "grad_norm": 0.9830946326255798,
1310
+ "learning_rate": 2.1505376344086022e-07,
1311
+ "loss": 0.2995,
1312
+ "step": 930
1313
+ }
1314
+ ],
1315
+ "logging_steps": 5,
1316
+ "max_steps": 930,
1317
+ "num_input_tokens_seen": 0,
1318
+ "num_train_epochs": 5,
1319
+ "save_steps": 500,
1320
+ "stateful_callbacks": {
1321
+ "TrainerControl": {
1322
+ "args": {
1323
+ "should_epoch_stop": false,
1324
+ "should_evaluate": false,
1325
+ "should_log": false,
1326
+ "should_save": true,
1327
+ "should_training_stop": true
1328
+ },
1329
+ "attributes": {}
1330
+ }
1331
+ },
1332
+ "total_flos": 975376566190080.0,
1333
+ "train_batch_size": 4,
1334
+ "trial_name": null,
1335
+ "trial_params": null
1336
+ }
student_lora_finetuned/checkpoint-930/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7651964235f11f3c6cc8169952914ae8667be060baf51461c65e20e2daa85a88
3
+ size 5304
student_lora_finetuned/checkpoint-930/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
student_lora_finetuned/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
student_lora_finetuned/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
student_lora_finetuned/tokenizer_config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "max_length": 512,
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "never_split": null,
53
+ "pad_to_multiple_of": null,
54
+ "pad_token": "[PAD]",
55
+ "pad_token_type_id": 0,
56
+ "padding_side": "right",
57
+ "sep_token": "[SEP]",
58
+ "stride": 0,
59
+ "strip_accents": null,
60
+ "tokenize_chinese_chars": true,
61
+ "tokenizer_class": "BertTokenizer",
62
+ "truncation_side": "right",
63
+ "truncation_strategy": "longest_first",
64
+ "unk_token": "[UNK]"
65
+ }
student_lora_finetuned/vocab.txt ADDED
The diff for this file is too large to render. See raw diff