SirajRLX commited on
Commit
85c5958
·
verified ·
1 Parent(s): 4d338a0

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. sft_qwen_14B_v2/best_adapter/README.md +207 -0
  3. sft_qwen_14B_v2/best_adapter/adapter_config.json +43 -0
  4. sft_qwen_14B_v2/best_adapter/adapter_model.safetensors +3 -0
  5. sft_qwen_14B_v2/best_adapter/training_args.bin +3 -0
  6. sft_qwen_14B_v2/checkpoints/checkpoint-1000/README.md +207 -0
  7. sft_qwen_14B_v2/checkpoints/checkpoint-1000/adapter_config.json +43 -0
  8. sft_qwen_14B_v2/checkpoints/checkpoint-1000/adapter_model.safetensors +3 -0
  9. sft_qwen_14B_v2/checkpoints/checkpoint-1000/optimizer.pt +3 -0
  10. sft_qwen_14B_v2/checkpoints/checkpoint-1000/rng_state.pth +3 -0
  11. sft_qwen_14B_v2/checkpoints/checkpoint-1000/scheduler.pt +3 -0
  12. sft_qwen_14B_v2/checkpoints/checkpoint-1000/trainer_state.json +3623 -0
  13. sft_qwen_14B_v2/checkpoints/checkpoint-1000/training_args.bin +3 -0
  14. sft_qwen_14B_v2/checkpoints/checkpoint-1500/README.md +207 -0
  15. sft_qwen_14B_v2/checkpoints/checkpoint-1500/adapter_config.json +43 -0
  16. sft_qwen_14B_v2/checkpoints/checkpoint-1500/adapter_model.safetensors +3 -0
  17. sft_qwen_14B_v2/checkpoints/checkpoint-1500/optimizer.pt +3 -0
  18. sft_qwen_14B_v2/checkpoints/checkpoint-1500/rng_state.pth +3 -0
  19. sft_qwen_14B_v2/checkpoints/checkpoint-1500/scheduler.pt +3 -0
  20. sft_qwen_14B_v2/checkpoints/checkpoint-1500/trainer_state.json +0 -0
  21. sft_qwen_14B_v2/checkpoints/checkpoint-1500/training_args.bin +3 -0
  22. sft_qwen_14B_v2/checkpoints/checkpoint-2000/README.md +207 -0
  23. sft_qwen_14B_v2/checkpoints/checkpoint-2000/adapter_config.json +43 -0
  24. sft_qwen_14B_v2/checkpoints/checkpoint-2000/adapter_model.safetensors +3 -0
  25. sft_qwen_14B_v2/checkpoints/checkpoint-2000/optimizer.pt +3 -0
  26. sft_qwen_14B_v2/checkpoints/checkpoint-2000/rng_state.pth +3 -0
  27. sft_qwen_14B_v2/checkpoints/checkpoint-2000/scheduler.pt +3 -0
  28. sft_qwen_14B_v2/checkpoints/checkpoint-2000/trainer_state.json +0 -0
  29. sft_qwen_14B_v2/checkpoints/checkpoint-2000/training_args.bin +3 -0
  30. sft_qwen_14B_v2/checkpoints/checkpoint-2500/README.md +207 -0
  31. sft_qwen_14B_v2/checkpoints/checkpoint-2500/adapter_config.json +43 -0
  32. sft_qwen_14B_v2/checkpoints/checkpoint-2500/adapter_model.safetensors +3 -0
  33. sft_qwen_14B_v2/checkpoints/checkpoint-2500/optimizer.pt +3 -0
  34. sft_qwen_14B_v2/checkpoints/checkpoint-2500/rng_state.pth +3 -0
  35. sft_qwen_14B_v2/checkpoints/checkpoint-2500/scheduler.pt +3 -0
  36. sft_qwen_14B_v2/checkpoints/checkpoint-2500/trainer_state.json +0 -0
  37. sft_qwen_14B_v2/checkpoints/checkpoint-2500/training_args.bin +3 -0
  38. sft_qwen_14B_v2/checkpoints/checkpoint-3000/README.md +207 -0
  39. sft_qwen_14B_v2/checkpoints/checkpoint-3000/adapter_config.json +43 -0
  40. sft_qwen_14B_v2/checkpoints/checkpoint-3000/adapter_model.safetensors +3 -0
  41. sft_qwen_14B_v2/checkpoints/checkpoint-3000/optimizer.pt +3 -0
  42. sft_qwen_14B_v2/checkpoints/checkpoint-3000/rng_state.pth +3 -0
  43. sft_qwen_14B_v2/checkpoints/checkpoint-3000/scheduler.pt +3 -0
  44. sft_qwen_14B_v2/checkpoints/checkpoint-3000/trainer_state.json +0 -0
  45. sft_qwen_14B_v2/checkpoints/checkpoint-3000/training_args.bin +3 -0
  46. sft_qwen_14B_v2/checkpoints/checkpoint-3500/README.md +207 -0
  47. sft_qwen_14B_v2/checkpoints/checkpoint-3500/adapter_config.json +43 -0
  48. sft_qwen_14B_v2/checkpoints/checkpoint-3500/adapter_model.safetensors +3 -0
  49. sft_qwen_14B_v2/checkpoints/checkpoint-3500/optimizer.pt +3 -0
  50. sft_qwen_14B_v2/checkpoints/checkpoint-3500/rng_state.pth +3 -0
.gitattributes CHANGED
@@ -54,3 +54,4 @@ wandb/run-20251226_155650-wbzoafvt/run-wbzoafvt.wandb filter=lfs diff=lfs merge=
54
  sft_devstral_24B_v2/wandb/run-20251226_180613-i1cmzyri/run-i1cmzyri.wandb filter=lfs diff=lfs merge=lfs -text
55
  sft_devstral_24B_v2/wandb/run-20251226_180702-oordmylf/run-oordmylf.wandb filter=lfs diff=lfs merge=lfs -text
56
  sft_devstral_24B_v2/wandb/run-20251226_180808-ny9q48hd/run-ny9q48hd.wandb filter=lfs diff=lfs merge=lfs -text
 
 
54
  sft_devstral_24B_v2/wandb/run-20251226_180613-i1cmzyri/run-i1cmzyri.wandb filter=lfs diff=lfs merge=lfs -text
55
  sft_devstral_24B_v2/wandb/run-20251226_180702-oordmylf/run-oordmylf.wandb filter=lfs diff=lfs merge=lfs -text
56
  sft_devstral_24B_v2/wandb/run-20251226_180808-ny9q48hd/run-ny9q48hd.wandb filter=lfs diff=lfs merge=lfs -text
57
+ sft_qwen_14B_v2/wandb/run-20251226_181544-upub1jan/run-upub1jan.wandb filter=lfs diff=lfs merge=lfs -text
sft_qwen_14B_v2/best_adapter/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Models/Qwen2.5-Coder-14B-CPT
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Models/Qwen2.5-Coder-14B-CPT
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
sft_qwen_14B_v2/best_adapter/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Models/Qwen2.5-Coder-14B-CPT",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "v_proj",
34
+ "k_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
sft_qwen_14B_v2/best_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c19a29b40a83cc48759a1d5af0d41a375dcc4f15306fcdc65ea10ce6044a6f47
3
+ size 201378736
sft_qwen_14B_v2/best_adapter/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afae4a8ce1391f149eb00b14eed8f891c715e892ea546bf754d22db2c2bc5969
3
+ size 4792
sft_qwen_14B_v2/checkpoints/checkpoint-1000/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Models/Qwen2.5-Coder-14B-CPT
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Models/Qwen2.5-Coder-14B-CPT
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
sft_qwen_14B_v2/checkpoints/checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Models/Qwen2.5-Coder-14B-CPT",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "v_proj",
34
+ "k_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
sft_qwen_14B_v2/checkpoints/checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38ac379064d40f8514d7d5e379578e8d466f2ce3582db82555b1f1c5e8c2db6d
3
+ size 201378736
sft_qwen_14B_v2/checkpoints/checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7467ef9a63aa9e5adc3a95a80cee071ebf3381f166d2e2b850ba12d849fac9c0
3
+ size 402976786
sft_qwen_14B_v2/checkpoints/checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f37d067203ec01fd39a90eb184de470feca9355b97ee387b61a8f71801819194
3
+ size 14244
sft_qwen_14B_v2/checkpoints/checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0acd749c3b8c2670557a4f5e96ac98dd3da877521e179bc1412a007138308dc8
3
+ size 1064
sft_qwen_14B_v2/checkpoints/checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,3623 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 1000,
3
+ "best_metric": 0.9487298727035522,
4
+ "best_model_checkpoint": "task2file/sft_qwen_14B_v2/checkpoints/checkpoint-1000",
5
+ "epoch": 0.4219409282700422,
6
+ "eval_steps": 100,
7
+ "global_step": 1000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0008438818565400844,
14
+ "grad_norm": 0.5386583805084229,
15
+ "learning_rate": 1.7574692442882248e-07,
16
+ "loss": 1.6941628456115723,
17
+ "step": 2
18
+ },
19
+ {
20
+ "epoch": 0.0016877637130801688,
21
+ "grad_norm": 0.5477277636528015,
22
+ "learning_rate": 5.272407732864675e-07,
23
+ "loss": 1.7132279872894287,
24
+ "step": 4
25
+ },
26
+ {
27
+ "epoch": 0.002531645569620253,
28
+ "grad_norm": 0.5390765070915222,
29
+ "learning_rate": 8.787346221441126e-07,
30
+ "loss": 1.641180396080017,
31
+ "step": 6
32
+ },
33
+ {
34
+ "epoch": 0.0033755274261603376,
35
+ "grad_norm": 0.5023683905601501,
36
+ "learning_rate": 1.2302284710017575e-06,
37
+ "loss": 1.5616240501403809,
38
+ "step": 8
39
+ },
40
+ {
41
+ "epoch": 0.004219409282700422,
42
+ "grad_norm": 0.4899154603481293,
43
+ "learning_rate": 1.5817223198594026e-06,
44
+ "loss": 1.572033405303955,
45
+ "step": 10
46
+ },
47
+ {
48
+ "epoch": 0.005063291139240506,
49
+ "grad_norm": 0.5239788293838501,
50
+ "learning_rate": 1.9332161687170474e-06,
51
+ "loss": 1.6242921352386475,
52
+ "step": 12
53
+ },
54
+ {
55
+ "epoch": 0.00590717299578059,
56
+ "grad_norm": 0.5172926783561707,
57
+ "learning_rate": 2.2847100175746925e-06,
58
+ "loss": 1.6800041198730469,
59
+ "step": 14
60
+ },
61
+ {
62
+ "epoch": 0.006751054852320675,
63
+ "grad_norm": 0.5539224743843079,
64
+ "learning_rate": 2.6362038664323376e-06,
65
+ "loss": 1.6450834274291992,
66
+ "step": 16
67
+ },
68
+ {
69
+ "epoch": 0.007594936708860759,
70
+ "grad_norm": 0.5255337953567505,
71
+ "learning_rate": 2.9876977152899827e-06,
72
+ "loss": 1.6673263311386108,
73
+ "step": 18
74
+ },
75
+ {
76
+ "epoch": 0.008438818565400843,
77
+ "grad_norm": 0.5074548721313477,
78
+ "learning_rate": 3.3391915641476277e-06,
79
+ "loss": 1.531802773475647,
80
+ "step": 20
81
+ },
82
+ {
83
+ "epoch": 0.009282700421940928,
84
+ "grad_norm": 0.4160279333591461,
85
+ "learning_rate": 3.6906854130052724e-06,
86
+ "loss": 1.599354863166809,
87
+ "step": 22
88
+ },
89
+ {
90
+ "epoch": 0.010126582278481013,
91
+ "grad_norm": 0.5716474652290344,
92
+ "learning_rate": 4.0421792618629174e-06,
93
+ "loss": 1.6700962781906128,
94
+ "step": 24
95
+ },
96
+ {
97
+ "epoch": 0.010970464135021098,
98
+ "grad_norm": 0.5148899555206299,
99
+ "learning_rate": 4.3936731107205625e-06,
100
+ "loss": 1.66217839717865,
101
+ "step": 26
102
+ },
103
+ {
104
+ "epoch": 0.01181434599156118,
105
+ "grad_norm": 0.575722336769104,
106
+ "learning_rate": 4.7451669595782076e-06,
107
+ "loss": 1.6692266464233398,
108
+ "step": 28
109
+ },
110
+ {
111
+ "epoch": 0.012658227848101266,
112
+ "grad_norm": 0.5345953106880188,
113
+ "learning_rate": 5.096660808435853e-06,
114
+ "loss": 1.5518689155578613,
115
+ "step": 30
116
+ },
117
+ {
118
+ "epoch": 0.01350210970464135,
119
+ "grad_norm": 0.4462043344974518,
120
+ "learning_rate": 5.448154657293498e-06,
121
+ "loss": 1.5930007696151733,
122
+ "step": 32
123
+ },
124
+ {
125
+ "epoch": 0.014345991561181435,
126
+ "grad_norm": 0.5119605660438538,
127
+ "learning_rate": 5.799648506151143e-06,
128
+ "loss": 1.6069684028625488,
129
+ "step": 34
130
+ },
131
+ {
132
+ "epoch": 0.015189873417721518,
133
+ "grad_norm": 0.5328608751296997,
134
+ "learning_rate": 6.151142355008788e-06,
135
+ "loss": 1.5838109254837036,
136
+ "step": 36
137
+ },
138
+ {
139
+ "epoch": 0.016033755274261603,
140
+ "grad_norm": 0.5065920352935791,
141
+ "learning_rate": 6.502636203866433e-06,
142
+ "loss": 1.608130931854248,
143
+ "step": 38
144
+ },
145
+ {
146
+ "epoch": 0.016877637130801686,
147
+ "grad_norm": 0.4479359984397888,
148
+ "learning_rate": 6.854130052724078e-06,
149
+ "loss": 1.5942182540893555,
150
+ "step": 40
151
+ },
152
+ {
153
+ "epoch": 0.017721518987341773,
154
+ "grad_norm": 0.42844903469085693,
155
+ "learning_rate": 7.205623901581722e-06,
156
+ "loss": 1.6441553831100464,
157
+ "step": 42
158
+ },
159
+ {
160
+ "epoch": 0.018565400843881856,
161
+ "grad_norm": 0.476630836725235,
162
+ "learning_rate": 7.557117750439367e-06,
163
+ "loss": 1.6068111658096313,
164
+ "step": 44
165
+ },
166
+ {
167
+ "epoch": 0.019409282700421943,
168
+ "grad_norm": 0.4532654881477356,
169
+ "learning_rate": 7.908611599297012e-06,
170
+ "loss": 1.6618021726608276,
171
+ "step": 46
172
+ },
173
+ {
174
+ "epoch": 0.020253164556962026,
175
+ "grad_norm": 0.3701118230819702,
176
+ "learning_rate": 8.260105448154657e-06,
177
+ "loss": 1.4730033874511719,
178
+ "step": 48
179
+ },
180
+ {
181
+ "epoch": 0.02109704641350211,
182
+ "grad_norm": 0.38471561670303345,
183
+ "learning_rate": 8.611599297012302e-06,
184
+ "loss": 1.4828267097473145,
185
+ "step": 50
186
+ },
187
+ {
188
+ "epoch": 0.021940928270042195,
189
+ "grad_norm": 0.3602336347103119,
190
+ "learning_rate": 8.963093145869948e-06,
191
+ "loss": 1.3877452611923218,
192
+ "step": 52
193
+ },
194
+ {
195
+ "epoch": 0.02278481012658228,
196
+ "grad_norm": 0.40318572521209717,
197
+ "learning_rate": 9.314586994727593e-06,
198
+ "loss": 1.49052894115448,
199
+ "step": 54
200
+ },
201
+ {
202
+ "epoch": 0.02362869198312236,
203
+ "grad_norm": 0.3223826587200165,
204
+ "learning_rate": 9.666080843585238e-06,
205
+ "loss": 1.4912524223327637,
206
+ "step": 56
207
+ },
208
+ {
209
+ "epoch": 0.024472573839662448,
210
+ "grad_norm": 0.3873065114021301,
211
+ "learning_rate": 1.0017574692442883e-05,
212
+ "loss": 1.526674509048462,
213
+ "step": 58
214
+ },
215
+ {
216
+ "epoch": 0.02531645569620253,
217
+ "grad_norm": 0.410159707069397,
218
+ "learning_rate": 1.0369068541300528e-05,
219
+ "loss": 1.4480271339416504,
220
+ "step": 60
221
+ },
222
+ {
223
+ "epoch": 0.026160337552742614,
224
+ "grad_norm": 0.3632003962993622,
225
+ "learning_rate": 1.0720562390158173e-05,
226
+ "loss": 1.4222990274429321,
227
+ "step": 62
228
+ },
229
+ {
230
+ "epoch": 0.0270042194092827,
231
+ "grad_norm": 0.33118435740470886,
232
+ "learning_rate": 1.1072056239015818e-05,
233
+ "loss": 1.387171745300293,
234
+ "step": 64
235
+ },
236
+ {
237
+ "epoch": 0.027848101265822784,
238
+ "grad_norm": 0.3301764726638794,
239
+ "learning_rate": 1.1423550087873463e-05,
240
+ "loss": 1.3523777723312378,
241
+ "step": 66
242
+ },
243
+ {
244
+ "epoch": 0.02869198312236287,
245
+ "grad_norm": 0.34342435002326965,
246
+ "learning_rate": 1.1775043936731108e-05,
247
+ "loss": 1.4515162706375122,
248
+ "step": 68
249
+ },
250
+ {
251
+ "epoch": 0.029535864978902954,
252
+ "grad_norm": 0.3243122100830078,
253
+ "learning_rate": 1.2126537785588753e-05,
254
+ "loss": 1.3509243726730347,
255
+ "step": 70
256
+ },
257
+ {
258
+ "epoch": 0.030379746835443037,
259
+ "grad_norm": 0.3450150787830353,
260
+ "learning_rate": 1.2478031634446398e-05,
261
+ "loss": 1.4936245679855347,
262
+ "step": 72
263
+ },
264
+ {
265
+ "epoch": 0.031223628691983123,
266
+ "grad_norm": 0.38912028074264526,
267
+ "learning_rate": 1.2829525483304042e-05,
268
+ "loss": 1.3419109582901,
269
+ "step": 74
270
+ },
271
+ {
272
+ "epoch": 0.032067510548523206,
273
+ "grad_norm": 0.3019310235977173,
274
+ "learning_rate": 1.3181019332161687e-05,
275
+ "loss": 1.4284154176712036,
276
+ "step": 76
277
+ },
278
+ {
279
+ "epoch": 0.03291139240506329,
280
+ "grad_norm": 0.37803682684898376,
281
+ "learning_rate": 1.3532513181019332e-05,
282
+ "loss": 1.4256561994552612,
283
+ "step": 78
284
+ },
285
+ {
286
+ "epoch": 0.03375527426160337,
287
+ "grad_norm": 0.34191736578941345,
288
+ "learning_rate": 1.3884007029876977e-05,
289
+ "loss": 1.3256909847259521,
290
+ "step": 80
291
+ },
292
+ {
293
+ "epoch": 0.03459915611814346,
294
+ "grad_norm": 0.35242700576782227,
295
+ "learning_rate": 1.4235500878734624e-05,
296
+ "loss": 1.2710685729980469,
297
+ "step": 82
298
+ },
299
+ {
300
+ "epoch": 0.035443037974683546,
301
+ "grad_norm": 0.38094228506088257,
302
+ "learning_rate": 1.4586994727592269e-05,
303
+ "loss": 1.253411889076233,
304
+ "step": 84
305
+ },
306
+ {
307
+ "epoch": 0.036286919831223625,
308
+ "grad_norm": 0.36837366223335266,
309
+ "learning_rate": 1.4938488576449914e-05,
310
+ "loss": 1.3064342737197876,
311
+ "step": 86
312
+ },
313
+ {
314
+ "epoch": 0.03713080168776371,
315
+ "grad_norm": 0.3443569242954254,
316
+ "learning_rate": 1.5289982425307557e-05,
317
+ "loss": 1.293562412261963,
318
+ "step": 88
319
+ },
320
+ {
321
+ "epoch": 0.0379746835443038,
322
+ "grad_norm": 0.3799338936805725,
323
+ "learning_rate": 1.5641476274165202e-05,
324
+ "loss": 1.3382648229599,
325
+ "step": 90
326
+ },
327
+ {
328
+ "epoch": 0.038818565400843885,
329
+ "grad_norm": 0.40501922369003296,
330
+ "learning_rate": 1.599297012302285e-05,
331
+ "loss": 1.3925724029541016,
332
+ "step": 92
333
+ },
334
+ {
335
+ "epoch": 0.039662447257383965,
336
+ "grad_norm": 0.4419630467891693,
337
+ "learning_rate": 1.6344463971880492e-05,
338
+ "loss": 1.357171893119812,
339
+ "step": 94
340
+ },
341
+ {
342
+ "epoch": 0.04050632911392405,
343
+ "grad_norm": 0.3619817793369293,
344
+ "learning_rate": 1.6695957820738137e-05,
345
+ "loss": 1.3029985427856445,
346
+ "step": 96
347
+ },
348
+ {
349
+ "epoch": 0.04135021097046414,
350
+ "grad_norm": 0.4851357340812683,
351
+ "learning_rate": 1.7047451669595782e-05,
352
+ "loss": 1.3498191833496094,
353
+ "step": 98
354
+ },
355
+ {
356
+ "epoch": 0.04219409282700422,
357
+ "grad_norm": 0.418658584356308,
358
+ "learning_rate": 1.7398945518453427e-05,
359
+ "loss": 1.185287356376648,
360
+ "step": 100
361
+ },
362
+ {
363
+ "epoch": 0.04219409282700422,
364
+ "eval_loss": 1.2979938983917236,
365
+ "eval_runtime": 682.1979,
366
+ "eval_samples_per_second": 3.089,
367
+ "eval_steps_per_second": 3.089,
368
+ "step": 100
369
+ },
370
+ {
371
+ "epoch": 0.043037974683544304,
372
+ "grad_norm": 0.4464418888092041,
373
+ "learning_rate": 1.7750439367311073e-05,
374
+ "loss": 1.2217272520065308,
375
+ "step": 102
376
+ },
377
+ {
378
+ "epoch": 0.04388185654008439,
379
+ "grad_norm": 0.4706237316131592,
380
+ "learning_rate": 1.8101933216168718e-05,
381
+ "loss": 1.2052050828933716,
382
+ "step": 104
383
+ },
384
+ {
385
+ "epoch": 0.04472573839662447,
386
+ "grad_norm": 0.46394404768943787,
387
+ "learning_rate": 1.8453427065026363e-05,
388
+ "loss": 1.221343994140625,
389
+ "step": 106
390
+ },
391
+ {
392
+ "epoch": 0.04556962025316456,
393
+ "grad_norm": 0.4726889431476593,
394
+ "learning_rate": 1.8804920913884008e-05,
395
+ "loss": 1.2387475967407227,
396
+ "step": 108
397
+ },
398
+ {
399
+ "epoch": 0.046413502109704644,
400
+ "grad_norm": 0.42130985856056213,
401
+ "learning_rate": 1.9156414762741653e-05,
402
+ "loss": 1.2851309776306152,
403
+ "step": 110
404
+ },
405
+ {
406
+ "epoch": 0.04725738396624472,
407
+ "grad_norm": 0.4504576623439789,
408
+ "learning_rate": 1.9507908611599298e-05,
409
+ "loss": 1.2753145694732666,
410
+ "step": 112
411
+ },
412
+ {
413
+ "epoch": 0.04810126582278481,
414
+ "grad_norm": 0.396085262298584,
415
+ "learning_rate": 1.9859402460456943e-05,
416
+ "loss": 1.2427717447280884,
417
+ "step": 114
418
+ },
419
+ {
420
+ "epoch": 0.048945147679324896,
421
+ "grad_norm": 0.5106491446495056,
422
+ "learning_rate": 2.0210896309314588e-05,
423
+ "loss": 1.2943825721740723,
424
+ "step": 116
425
+ },
426
+ {
427
+ "epoch": 0.049789029535864976,
428
+ "grad_norm": 0.42351317405700684,
429
+ "learning_rate": 2.0562390158172233e-05,
430
+ "loss": 1.263301134109497,
431
+ "step": 118
432
+ },
433
+ {
434
+ "epoch": 0.05063291139240506,
435
+ "grad_norm": 0.4403539299964905,
436
+ "learning_rate": 2.0913884007029878e-05,
437
+ "loss": 1.2647849321365356,
438
+ "step": 120
439
+ },
440
+ {
441
+ "epoch": 0.05147679324894515,
442
+ "grad_norm": 0.5260752439498901,
443
+ "learning_rate": 2.1265377855887523e-05,
444
+ "loss": 1.2351393699645996,
445
+ "step": 122
446
+ },
447
+ {
448
+ "epoch": 0.05232067510548523,
449
+ "grad_norm": 0.44978851079940796,
450
+ "learning_rate": 2.1616871704745168e-05,
451
+ "loss": 1.0384471416473389,
452
+ "step": 124
453
+ },
454
+ {
455
+ "epoch": 0.053164556962025315,
456
+ "grad_norm": 0.47732362151145935,
457
+ "learning_rate": 2.1968365553602813e-05,
458
+ "loss": 1.1518068313598633,
459
+ "step": 126
460
+ },
461
+ {
462
+ "epoch": 0.0540084388185654,
463
+ "grad_norm": 0.5473551750183105,
464
+ "learning_rate": 2.231985940246046e-05,
465
+ "loss": 1.2264912128448486,
466
+ "step": 128
467
+ },
468
+ {
469
+ "epoch": 0.05485232067510549,
470
+ "grad_norm": 0.4473855197429657,
471
+ "learning_rate": 2.2671353251318103e-05,
472
+ "loss": 1.1615246534347534,
473
+ "step": 130
474
+ },
475
+ {
476
+ "epoch": 0.05569620253164557,
477
+ "grad_norm": 0.5980377197265625,
478
+ "learning_rate": 2.302284710017575e-05,
479
+ "loss": 1.1334880590438843,
480
+ "step": 132
481
+ },
482
+ {
483
+ "epoch": 0.056540084388185655,
484
+ "grad_norm": 0.5987792015075684,
485
+ "learning_rate": 2.3374340949033394e-05,
486
+ "loss": 1.1546804904937744,
487
+ "step": 134
488
+ },
489
+ {
490
+ "epoch": 0.05738396624472574,
491
+ "grad_norm": 0.45355498790740967,
492
+ "learning_rate": 2.372583479789104e-05,
493
+ "loss": 1.194953441619873,
494
+ "step": 136
495
+ },
496
+ {
497
+ "epoch": 0.05822784810126582,
498
+ "grad_norm": 0.5373698472976685,
499
+ "learning_rate": 2.4077328646748684e-05,
500
+ "loss": 1.1067466735839844,
501
+ "step": 138
502
+ },
503
+ {
504
+ "epoch": 0.05907172995780591,
505
+ "grad_norm": 0.48734328150749207,
506
+ "learning_rate": 2.442882249560633e-05,
507
+ "loss": 1.188468098640442,
508
+ "step": 140
509
+ },
510
+ {
511
+ "epoch": 0.059915611814345994,
512
+ "grad_norm": 0.4692173898220062,
513
+ "learning_rate": 2.478031634446397e-05,
514
+ "loss": 1.1624362468719482,
515
+ "step": 142
516
+ },
517
+ {
518
+ "epoch": 0.060759493670886074,
519
+ "grad_norm": 0.532554030418396,
520
+ "learning_rate": 2.513181019332162e-05,
521
+ "loss": 1.0978907346725464,
522
+ "step": 144
523
+ },
524
+ {
525
+ "epoch": 0.06160337552742616,
526
+ "grad_norm": 0.5853802561759949,
527
+ "learning_rate": 2.5483304042179264e-05,
528
+ "loss": 1.2030781507492065,
529
+ "step": 146
530
+ },
531
+ {
532
+ "epoch": 0.06244725738396625,
533
+ "grad_norm": 0.5061611533164978,
534
+ "learning_rate": 2.583479789103691e-05,
535
+ "loss": 1.082366943359375,
536
+ "step": 148
537
+ },
538
+ {
539
+ "epoch": 0.06329113924050633,
540
+ "grad_norm": 0.49426141381263733,
541
+ "learning_rate": 2.6186291739894554e-05,
542
+ "loss": 1.10564386844635,
543
+ "step": 150
544
+ },
545
+ {
546
+ "epoch": 0.06413502109704641,
547
+ "grad_norm": 0.5846618413925171,
548
+ "learning_rate": 2.6537785588752196e-05,
549
+ "loss": 1.1992807388305664,
550
+ "step": 152
551
+ },
552
+ {
553
+ "epoch": 0.06497890295358649,
554
+ "grad_norm": 0.5517552495002747,
555
+ "learning_rate": 2.6889279437609844e-05,
556
+ "loss": 1.1757566928863525,
557
+ "step": 154
558
+ },
559
+ {
560
+ "epoch": 0.06582278481012659,
561
+ "grad_norm": 0.5667305588722229,
562
+ "learning_rate": 2.7240773286467486e-05,
563
+ "loss": 1.0548783540725708,
564
+ "step": 156
565
+ },
566
+ {
567
+ "epoch": 0.06666666666666667,
568
+ "grad_norm": 0.6760414242744446,
569
+ "learning_rate": 2.7592267135325134e-05,
570
+ "loss": 1.184364914894104,
571
+ "step": 158
572
+ },
573
+ {
574
+ "epoch": 0.06751054852320675,
575
+ "grad_norm": 0.5261430740356445,
576
+ "learning_rate": 2.7943760984182776e-05,
577
+ "loss": 1.1945042610168457,
578
+ "step": 160
579
+ },
580
+ {
581
+ "epoch": 0.06835443037974684,
582
+ "grad_norm": 0.6155015230178833,
583
+ "learning_rate": 2.8295254833040425e-05,
584
+ "loss": 1.2021973133087158,
585
+ "step": 162
586
+ },
587
+ {
588
+ "epoch": 0.06919831223628692,
589
+ "grad_norm": 0.6131619215011597,
590
+ "learning_rate": 2.8646748681898066e-05,
591
+ "loss": 1.144123911857605,
592
+ "step": 164
593
+ },
594
+ {
595
+ "epoch": 0.070042194092827,
596
+ "grad_norm": 0.5749185681343079,
597
+ "learning_rate": 2.8998242530755715e-05,
598
+ "loss": 1.1329256296157837,
599
+ "step": 166
600
+ },
601
+ {
602
+ "epoch": 0.07088607594936709,
603
+ "grad_norm": 0.5243118405342102,
604
+ "learning_rate": 2.9349736379613356e-05,
605
+ "loss": 1.0892387628555298,
606
+ "step": 168
607
+ },
608
+ {
609
+ "epoch": 0.07172995780590717,
610
+ "grad_norm": 0.7190104722976685,
611
+ "learning_rate": 2.9701230228471005e-05,
612
+ "loss": 1.163260817527771,
613
+ "step": 170
614
+ },
615
+ {
616
+ "epoch": 0.07257383966244725,
617
+ "grad_norm": 0.5486982464790344,
618
+ "learning_rate": 3.0052724077328647e-05,
619
+ "loss": 1.0880777835845947,
620
+ "step": 172
621
+ },
622
+ {
623
+ "epoch": 0.07341772151898734,
624
+ "grad_norm": 0.5020889043807983,
625
+ "learning_rate": 3.0404217926186295e-05,
626
+ "loss": 1.0433368682861328,
627
+ "step": 174
628
+ },
629
+ {
630
+ "epoch": 0.07426160337552742,
631
+ "grad_norm": 0.47329774498939514,
632
+ "learning_rate": 3.075571177504394e-05,
633
+ "loss": 1.0528991222381592,
634
+ "step": 176
635
+ },
636
+ {
637
+ "epoch": 0.0751054852320675,
638
+ "grad_norm": 0.6635547876358032,
639
+ "learning_rate": 3.110720562390158e-05,
640
+ "loss": 1.1627811193466187,
641
+ "step": 178
642
+ },
643
+ {
644
+ "epoch": 0.0759493670886076,
645
+ "grad_norm": 0.5624618530273438,
646
+ "learning_rate": 3.145869947275923e-05,
647
+ "loss": 1.084869384765625,
648
+ "step": 180
649
+ },
650
+ {
651
+ "epoch": 0.07679324894514768,
652
+ "grad_norm": 0.6029536724090576,
653
+ "learning_rate": 3.181019332161687e-05,
654
+ "loss": 1.2227671146392822,
655
+ "step": 182
656
+ },
657
+ {
658
+ "epoch": 0.07763713080168777,
659
+ "grad_norm": 0.930959939956665,
660
+ "learning_rate": 3.216168717047452e-05,
661
+ "loss": 1.0955452919006348,
662
+ "step": 184
663
+ },
664
+ {
665
+ "epoch": 0.07848101265822785,
666
+ "grad_norm": 0.5326952338218689,
667
+ "learning_rate": 3.251318101933216e-05,
668
+ "loss": 1.0640798807144165,
669
+ "step": 186
670
+ },
671
+ {
672
+ "epoch": 0.07932489451476793,
673
+ "grad_norm": 0.5484727621078491,
674
+ "learning_rate": 3.286467486818981e-05,
675
+ "loss": 1.0700589418411255,
676
+ "step": 188
677
+ },
678
+ {
679
+ "epoch": 0.08016877637130802,
680
+ "grad_norm": 0.605273425579071,
681
+ "learning_rate": 3.3216168717047456e-05,
682
+ "loss": 1.1593081951141357,
683
+ "step": 190
684
+ },
685
+ {
686
+ "epoch": 0.0810126582278481,
687
+ "grad_norm": 0.5704394578933716,
688
+ "learning_rate": 3.35676625659051e-05,
689
+ "loss": 1.1617076396942139,
690
+ "step": 192
691
+ },
692
+ {
693
+ "epoch": 0.08185654008438818,
694
+ "grad_norm": 0.5929452180862427,
695
+ "learning_rate": 3.3919156414762746e-05,
696
+ "loss": 1.1346839666366577,
697
+ "step": 194
698
+ },
699
+ {
700
+ "epoch": 0.08270042194092828,
701
+ "grad_norm": 0.5624077916145325,
702
+ "learning_rate": 3.427065026362039e-05,
703
+ "loss": 1.0934710502624512,
704
+ "step": 196
705
+ },
706
+ {
707
+ "epoch": 0.08354430379746836,
708
+ "grad_norm": 0.6717425584793091,
709
+ "learning_rate": 3.4622144112478036e-05,
710
+ "loss": 1.1810534000396729,
711
+ "step": 198
712
+ },
713
+ {
714
+ "epoch": 0.08438818565400844,
715
+ "grad_norm": 0.5120199918746948,
716
+ "learning_rate": 3.4973637961335674e-05,
717
+ "loss": 1.1525514125823975,
718
+ "step": 200
719
+ },
720
+ {
721
+ "epoch": 0.08438818565400844,
722
+ "eval_loss": 1.142486810684204,
723
+ "eval_runtime": 668.2356,
724
+ "eval_samples_per_second": 3.153,
725
+ "eval_steps_per_second": 3.153,
726
+ "step": 200
727
+ },
728
+ {
729
+ "epoch": 0.08523206751054853,
730
+ "grad_norm": 0.5144487023353577,
731
+ "learning_rate": 3.5325131810193326e-05,
732
+ "loss": 1.0243735313415527,
733
+ "step": 202
734
+ },
735
+ {
736
+ "epoch": 0.08607594936708861,
737
+ "grad_norm": 0.6325069069862366,
738
+ "learning_rate": 3.5676625659050964e-05,
739
+ "loss": 1.118743896484375,
740
+ "step": 204
741
+ },
742
+ {
743
+ "epoch": 0.08691983122362869,
744
+ "grad_norm": 0.5501633882522583,
745
+ "learning_rate": 3.6028119507908616e-05,
746
+ "loss": 1.0380504131317139,
747
+ "step": 206
748
+ },
749
+ {
750
+ "epoch": 0.08776371308016878,
751
+ "grad_norm": 0.6133899688720703,
752
+ "learning_rate": 3.6379613356766254e-05,
753
+ "loss": 0.9837555885314941,
754
+ "step": 208
755
+ },
756
+ {
757
+ "epoch": 0.08860759493670886,
758
+ "grad_norm": 0.5799810886383057,
759
+ "learning_rate": 3.6731107205623906e-05,
760
+ "loss": 1.090720295906067,
761
+ "step": 210
762
+ },
763
+ {
764
+ "epoch": 0.08945147679324894,
765
+ "grad_norm": 0.6039511561393738,
766
+ "learning_rate": 3.7082601054481544e-05,
767
+ "loss": 1.120232343673706,
768
+ "step": 212
769
+ },
770
+ {
771
+ "epoch": 0.09029535864978903,
772
+ "grad_norm": 0.5983024835586548,
773
+ "learning_rate": 3.7434094903339196e-05,
774
+ "loss": 1.096949815750122,
775
+ "step": 214
776
+ },
777
+ {
778
+ "epoch": 0.09113924050632911,
779
+ "grad_norm": 0.5641079545021057,
780
+ "learning_rate": 3.7785588752196835e-05,
781
+ "loss": 1.1226298809051514,
782
+ "step": 216
783
+ },
784
+ {
785
+ "epoch": 0.0919831223628692,
786
+ "grad_norm": 0.655717134475708,
787
+ "learning_rate": 3.8137082601054486e-05,
788
+ "loss": 1.1260643005371094,
789
+ "step": 218
790
+ },
791
+ {
792
+ "epoch": 0.09282700421940929,
793
+ "grad_norm": 0.6111898422241211,
794
+ "learning_rate": 3.848857644991213e-05,
795
+ "loss": 1.0777709484100342,
796
+ "step": 220
797
+ },
798
+ {
799
+ "epoch": 0.09367088607594937,
800
+ "grad_norm": 0.6821302771568298,
801
+ "learning_rate": 3.884007029876977e-05,
802
+ "loss": 1.10588800907135,
803
+ "step": 222
804
+ },
805
+ {
806
+ "epoch": 0.09451476793248945,
807
+ "grad_norm": 0.693175733089447,
808
+ "learning_rate": 3.919156414762742e-05,
809
+ "loss": 1.1498671770095825,
810
+ "step": 224
811
+ },
812
+ {
813
+ "epoch": 0.09535864978902954,
814
+ "grad_norm": 0.5288166403770447,
815
+ "learning_rate": 3.954305799648506e-05,
816
+ "loss": 1.0587562322616577,
817
+ "step": 226
818
+ },
819
+ {
820
+ "epoch": 0.09620253164556962,
821
+ "grad_norm": 0.6882867813110352,
822
+ "learning_rate": 3.989455184534271e-05,
823
+ "loss": 1.1107512712478638,
824
+ "step": 228
825
+ },
826
+ {
827
+ "epoch": 0.0970464135021097,
828
+ "grad_norm": 0.5834154486656189,
829
+ "learning_rate": 4.024604569420035e-05,
830
+ "loss": 1.020510196685791,
831
+ "step": 230
832
+ },
833
+ {
834
+ "epoch": 0.09789029535864979,
835
+ "grad_norm": 0.7157064080238342,
836
+ "learning_rate": 4.0597539543058e-05,
837
+ "loss": 1.0642449855804443,
838
+ "step": 232
839
+ },
840
+ {
841
+ "epoch": 0.09873417721518987,
842
+ "grad_norm": 0.6530708074569702,
843
+ "learning_rate": 4.094903339191564e-05,
844
+ "loss": 1.0359872579574585,
845
+ "step": 234
846
+ },
847
+ {
848
+ "epoch": 0.09957805907172995,
849
+ "grad_norm": 0.6329686045646667,
850
+ "learning_rate": 4.130052724077329e-05,
851
+ "loss": 1.050504446029663,
852
+ "step": 236
853
+ },
854
+ {
855
+ "epoch": 0.10042194092827005,
856
+ "grad_norm": 0.6597026586532593,
857
+ "learning_rate": 4.165202108963093e-05,
858
+ "loss": 1.2621175050735474,
859
+ "step": 238
860
+ },
861
+ {
862
+ "epoch": 0.10126582278481013,
863
+ "grad_norm": 0.6195225119590759,
864
+ "learning_rate": 4.200351493848858e-05,
865
+ "loss": 1.1218310594558716,
866
+ "step": 240
867
+ },
868
+ {
869
+ "epoch": 0.1021097046413502,
870
+ "grad_norm": 0.6764137744903564,
871
+ "learning_rate": 4.235500878734622e-05,
872
+ "loss": 1.1250728368759155,
873
+ "step": 242
874
+ },
875
+ {
876
+ "epoch": 0.1029535864978903,
877
+ "grad_norm": 0.552363395690918,
878
+ "learning_rate": 4.270650263620387e-05,
879
+ "loss": 1.028212308883667,
880
+ "step": 244
881
+ },
882
+ {
883
+ "epoch": 0.10379746835443038,
884
+ "grad_norm": 0.5620495676994324,
885
+ "learning_rate": 4.305799648506151e-05,
886
+ "loss": 1.0425450801849365,
887
+ "step": 246
888
+ },
889
+ {
890
+ "epoch": 0.10464135021097046,
891
+ "grad_norm": 0.6860032081604004,
892
+ "learning_rate": 4.3409490333919156e-05,
893
+ "loss": 1.144278883934021,
894
+ "step": 248
895
+ },
896
+ {
897
+ "epoch": 0.10548523206751055,
898
+ "grad_norm": 0.6033259034156799,
899
+ "learning_rate": 4.37609841827768e-05,
900
+ "loss": 1.1223982572555542,
901
+ "step": 250
902
+ },
903
+ {
904
+ "epoch": 0.10632911392405063,
905
+ "grad_norm": 0.6292146444320679,
906
+ "learning_rate": 4.4112478031634446e-05,
907
+ "loss": 1.1609960794448853,
908
+ "step": 252
909
+ },
910
+ {
911
+ "epoch": 0.10717299578059072,
912
+ "grad_norm": 0.7982883453369141,
913
+ "learning_rate": 4.44639718804921e-05,
914
+ "loss": 1.063547968864441,
915
+ "step": 254
916
+ },
917
+ {
918
+ "epoch": 0.1080168776371308,
919
+ "grad_norm": 0.7719110250473022,
920
+ "learning_rate": 4.4815465729349736e-05,
921
+ "loss": 1.0719804763793945,
922
+ "step": 256
923
+ },
924
+ {
925
+ "epoch": 0.10886075949367088,
926
+ "grad_norm": 0.6101011633872986,
927
+ "learning_rate": 4.516695957820739e-05,
928
+ "loss": 1.0778400897979736,
929
+ "step": 258
930
+ },
931
+ {
932
+ "epoch": 0.10970464135021098,
933
+ "grad_norm": 0.7300994396209717,
934
+ "learning_rate": 4.5518453427065026e-05,
935
+ "loss": 1.2129558324813843,
936
+ "step": 260
937
+ },
938
+ {
939
+ "epoch": 0.11054852320675106,
940
+ "grad_norm": 0.8348747491836548,
941
+ "learning_rate": 4.586994727592268e-05,
942
+ "loss": 1.221714735031128,
943
+ "step": 262
944
+ },
945
+ {
946
+ "epoch": 0.11139240506329114,
947
+ "grad_norm": 0.5445612072944641,
948
+ "learning_rate": 4.6221441124780316e-05,
949
+ "loss": 1.0187978744506836,
950
+ "step": 264
951
+ },
952
+ {
953
+ "epoch": 0.11223628691983123,
954
+ "grad_norm": 0.6230319738388062,
955
+ "learning_rate": 4.657293497363797e-05,
956
+ "loss": 1.096561312675476,
957
+ "step": 266
958
+ },
959
+ {
960
+ "epoch": 0.11308016877637131,
961
+ "grad_norm": 0.6231237649917603,
962
+ "learning_rate": 4.6924428822495606e-05,
963
+ "loss": 1.089842438697815,
964
+ "step": 268
965
+ },
966
+ {
967
+ "epoch": 0.11392405063291139,
968
+ "grad_norm": 0.7178627252578735,
969
+ "learning_rate": 4.727592267135325e-05,
970
+ "loss": 1.0696645975112915,
971
+ "step": 270
972
+ },
973
+ {
974
+ "epoch": 0.11476793248945148,
975
+ "grad_norm": 0.6895854473114014,
976
+ "learning_rate": 4.7627416520210896e-05,
977
+ "loss": 1.0511361360549927,
978
+ "step": 272
979
+ },
980
+ {
981
+ "epoch": 0.11561181434599156,
982
+ "grad_norm": 0.6046878695487976,
983
+ "learning_rate": 4.797891036906854e-05,
984
+ "loss": 1.1373958587646484,
985
+ "step": 274
986
+ },
987
+ {
988
+ "epoch": 0.11645569620253164,
989
+ "grad_norm": 0.6524552702903748,
990
+ "learning_rate": 4.833040421792619e-05,
991
+ "loss": 1.0734186172485352,
992
+ "step": 276
993
+ },
994
+ {
995
+ "epoch": 0.11729957805907174,
996
+ "grad_norm": 0.6331019997596741,
997
+ "learning_rate": 4.868189806678383e-05,
998
+ "loss": 1.123913049697876,
999
+ "step": 278
1000
+ },
1001
+ {
1002
+ "epoch": 0.11814345991561181,
1003
+ "grad_norm": 0.5919018983840942,
1004
+ "learning_rate": 4.903339191564148e-05,
1005
+ "loss": 1.0635710954666138,
1006
+ "step": 280
1007
+ },
1008
+ {
1009
+ "epoch": 0.1189873417721519,
1010
+ "grad_norm": 0.6067633032798767,
1011
+ "learning_rate": 4.938488576449912e-05,
1012
+ "loss": 1.0429247617721558,
1013
+ "step": 282
1014
+ },
1015
+ {
1016
+ "epoch": 0.11983122362869199,
1017
+ "grad_norm": 0.6583750247955322,
1018
+ "learning_rate": 4.9736379613356774e-05,
1019
+ "loss": 1.1397464275360107,
1020
+ "step": 284
1021
+ },
1022
+ {
1023
+ "epoch": 0.12067510548523207,
1024
+ "grad_norm": 0.6200069785118103,
1025
+ "learning_rate": 5.008787346221442e-05,
1026
+ "loss": 1.0590803623199463,
1027
+ "step": 286
1028
+ },
1029
+ {
1030
+ "epoch": 0.12151898734177215,
1031
+ "grad_norm": 0.6798665523529053,
1032
+ "learning_rate": 5.0439367311072064e-05,
1033
+ "loss": 1.1318789720535278,
1034
+ "step": 288
1035
+ },
1036
+ {
1037
+ "epoch": 0.12236286919831224,
1038
+ "grad_norm": 0.7508794069290161,
1039
+ "learning_rate": 5.07908611599297e-05,
1040
+ "loss": 1.0934956073760986,
1041
+ "step": 290
1042
+ },
1043
+ {
1044
+ "epoch": 0.12320675105485232,
1045
+ "grad_norm": 0.6901452541351318,
1046
+ "learning_rate": 5.114235500878735e-05,
1047
+ "loss": 1.163407802581787,
1048
+ "step": 292
1049
+ },
1050
+ {
1051
+ "epoch": 0.1240506329113924,
1052
+ "grad_norm": 0.6423285603523254,
1053
+ "learning_rate": 5.1493848857644985e-05,
1054
+ "loss": 1.09059476852417,
1055
+ "step": 294
1056
+ },
1057
+ {
1058
+ "epoch": 0.1248945147679325,
1059
+ "grad_norm": 0.6839275360107422,
1060
+ "learning_rate": 5.1845342706502644e-05,
1061
+ "loss": 1.0690211057662964,
1062
+ "step": 296
1063
+ },
1064
+ {
1065
+ "epoch": 0.1257383966244726,
1066
+ "grad_norm": 0.6350128054618835,
1067
+ "learning_rate": 5.219683655536028e-05,
1068
+ "loss": 0.982322096824646,
1069
+ "step": 298
1070
+ },
1071
+ {
1072
+ "epoch": 0.12658227848101267,
1073
+ "grad_norm": 0.7136530876159668,
1074
+ "learning_rate": 5.254833040421793e-05,
1075
+ "loss": 1.1132930517196655,
1076
+ "step": 300
1077
+ },
1078
+ {
1079
+ "epoch": 0.12658227848101267,
1080
+ "eval_loss": 1.0952109098434448,
1081
+ "eval_runtime": 677.0652,
1082
+ "eval_samples_per_second": 3.112,
1083
+ "eval_steps_per_second": 3.112,
1084
+ "step": 300
1085
+ },
1086
+ {
1087
+ "epoch": 0.12742616033755275,
1088
+ "grad_norm": 0.7339721322059631,
1089
+ "learning_rate": 5.289982425307557e-05,
1090
+ "loss": 0.973595917224884,
1091
+ "step": 302
1092
+ },
1093
+ {
1094
+ "epoch": 0.12827004219409283,
1095
+ "grad_norm": 0.5941481590270996,
1096
+ "learning_rate": 5.3251318101933224e-05,
1097
+ "loss": 0.9819849729537964,
1098
+ "step": 304
1099
+ },
1100
+ {
1101
+ "epoch": 0.1291139240506329,
1102
+ "grad_norm": 0.7153938412666321,
1103
+ "learning_rate": 5.360281195079086e-05,
1104
+ "loss": 1.0315470695495605,
1105
+ "step": 306
1106
+ },
1107
+ {
1108
+ "epoch": 0.12995780590717299,
1109
+ "grad_norm": 0.5167180299758911,
1110
+ "learning_rate": 5.395430579964851e-05,
1111
+ "loss": 0.9492001533508301,
1112
+ "step": 308
1113
+ },
1114
+ {
1115
+ "epoch": 0.1308016877637131,
1116
+ "grad_norm": 0.6055944561958313,
1117
+ "learning_rate": 5.430579964850615e-05,
1118
+ "loss": 1.0156209468841553,
1119
+ "step": 310
1120
+ },
1121
+ {
1122
+ "epoch": 0.13164556962025317,
1123
+ "grad_norm": 0.7662386298179626,
1124
+ "learning_rate": 5.4657293497363805e-05,
1125
+ "loss": 1.1791651248931885,
1126
+ "step": 312
1127
+ },
1128
+ {
1129
+ "epoch": 0.13248945147679325,
1130
+ "grad_norm": 0.6065546274185181,
1131
+ "learning_rate": 5.500878734622145e-05,
1132
+ "loss": 1.0009297132492065,
1133
+ "step": 314
1134
+ },
1135
+ {
1136
+ "epoch": 0.13333333333333333,
1137
+ "grad_norm": 0.604225754737854,
1138
+ "learning_rate": 5.536028119507909e-05,
1139
+ "loss": 1.0208244323730469,
1140
+ "step": 316
1141
+ },
1142
+ {
1143
+ "epoch": 0.1341772151898734,
1144
+ "grad_norm": 0.6186763048171997,
1145
+ "learning_rate": 5.571177504393673e-05,
1146
+ "loss": 0.9968416690826416,
1147
+ "step": 318
1148
+ },
1149
+ {
1150
+ "epoch": 0.1350210970464135,
1151
+ "grad_norm": 0.7100363969802856,
1152
+ "learning_rate": 5.606326889279437e-05,
1153
+ "loss": 0.9540256857872009,
1154
+ "step": 320
1155
+ },
1156
+ {
1157
+ "epoch": 0.1358649789029536,
1158
+ "grad_norm": 0.6979711055755615,
1159
+ "learning_rate": 5.641476274165203e-05,
1160
+ "loss": 1.0631953477859497,
1161
+ "step": 322
1162
+ },
1163
+ {
1164
+ "epoch": 0.13670886075949368,
1165
+ "grad_norm": 0.6237109303474426,
1166
+ "learning_rate": 5.676625659050967e-05,
1167
+ "loss": 1.0170501470565796,
1168
+ "step": 324
1169
+ },
1170
+ {
1171
+ "epoch": 0.13755274261603376,
1172
+ "grad_norm": 0.6525548696517944,
1173
+ "learning_rate": 5.711775043936731e-05,
1174
+ "loss": 1.0715603828430176,
1175
+ "step": 326
1176
+ },
1177
+ {
1178
+ "epoch": 0.13839662447257384,
1179
+ "grad_norm": 0.6869221329689026,
1180
+ "learning_rate": 5.746924428822495e-05,
1181
+ "loss": 1.0111541748046875,
1182
+ "step": 328
1183
+ },
1184
+ {
1185
+ "epoch": 0.13924050632911392,
1186
+ "grad_norm": 0.553188145160675,
1187
+ "learning_rate": 5.782073813708261e-05,
1188
+ "loss": 1.0311682224273682,
1189
+ "step": 330
1190
+ },
1191
+ {
1192
+ "epoch": 0.140084388185654,
1193
+ "grad_norm": 0.6760852932929993,
1194
+ "learning_rate": 5.817223198594025e-05,
1195
+ "loss": 1.0213634967803955,
1196
+ "step": 332
1197
+ },
1198
+ {
1199
+ "epoch": 0.1409282700421941,
1200
+ "grad_norm": 0.5907419919967651,
1201
+ "learning_rate": 5.8523725834797894e-05,
1202
+ "loss": 0.9748594164848328,
1203
+ "step": 334
1204
+ },
1205
+ {
1206
+ "epoch": 0.14177215189873418,
1207
+ "grad_norm": 0.7044920921325684,
1208
+ "learning_rate": 5.887521968365554e-05,
1209
+ "loss": 1.05863356590271,
1210
+ "step": 336
1211
+ },
1212
+ {
1213
+ "epoch": 0.14261603375527426,
1214
+ "grad_norm": 0.679073691368103,
1215
+ "learning_rate": 5.922671353251318e-05,
1216
+ "loss": 1.1341127157211304,
1217
+ "step": 338
1218
+ },
1219
+ {
1220
+ "epoch": 0.14345991561181434,
1221
+ "grad_norm": 0.7676237225532532,
1222
+ "learning_rate": 5.957820738137083e-05,
1223
+ "loss": 0.9540836215019226,
1224
+ "step": 340
1225
+ },
1226
+ {
1227
+ "epoch": 0.14430379746835442,
1228
+ "grad_norm": 0.6313899755477905,
1229
+ "learning_rate": 5.9929701230228474e-05,
1230
+ "loss": 1.0585911273956299,
1231
+ "step": 342
1232
+ },
1233
+ {
1234
+ "epoch": 0.1451476793248945,
1235
+ "grad_norm": 0.7123099565505981,
1236
+ "learning_rate": 6.028119507908612e-05,
1237
+ "loss": 1.0760118961334229,
1238
+ "step": 344
1239
+ },
1240
+ {
1241
+ "epoch": 0.1459915611814346,
1242
+ "grad_norm": 0.585935652256012,
1243
+ "learning_rate": 6.063268892794376e-05,
1244
+ "loss": 1.036866307258606,
1245
+ "step": 346
1246
+ },
1247
+ {
1248
+ "epoch": 0.1468354430379747,
1249
+ "grad_norm": 0.5643263459205627,
1250
+ "learning_rate": 6.0984182776801416e-05,
1251
+ "loss": 1.0242938995361328,
1252
+ "step": 348
1253
+ },
1254
+ {
1255
+ "epoch": 0.14767932489451477,
1256
+ "grad_norm": 0.626761794090271,
1257
+ "learning_rate": 6.133567662565906e-05,
1258
+ "loss": 1.0497376918792725,
1259
+ "step": 350
1260
+ },
1261
+ {
1262
+ "epoch": 0.14852320675105485,
1263
+ "grad_norm": 0.5106956958770752,
1264
+ "learning_rate": 6.16871704745167e-05,
1265
+ "loss": 0.9811885356903076,
1266
+ "step": 352
1267
+ },
1268
+ {
1269
+ "epoch": 0.14936708860759493,
1270
+ "grad_norm": 0.6948089003562927,
1271
+ "learning_rate": 6.203866432337434e-05,
1272
+ "loss": 1.0715330839157104,
1273
+ "step": 354
1274
+ },
1275
+ {
1276
+ "epoch": 0.150210970464135,
1277
+ "grad_norm": 0.699713945388794,
1278
+ "learning_rate": 6.239015817223199e-05,
1279
+ "loss": 1.0405226945877075,
1280
+ "step": 356
1281
+ },
1282
+ {
1283
+ "epoch": 0.15105485232067511,
1284
+ "grad_norm": 0.6437667012214661,
1285
+ "learning_rate": 6.274165202108964e-05,
1286
+ "loss": 1.0490930080413818,
1287
+ "step": 358
1288
+ },
1289
+ {
1290
+ "epoch": 0.1518987341772152,
1291
+ "grad_norm": 0.6952699422836304,
1292
+ "learning_rate": 6.309314586994728e-05,
1293
+ "loss": 0.9267548322677612,
1294
+ "step": 360
1295
+ },
1296
+ {
1297
+ "epoch": 0.15274261603375527,
1298
+ "grad_norm": 0.6713186502456665,
1299
+ "learning_rate": 6.344463971880492e-05,
1300
+ "loss": 1.0427420139312744,
1301
+ "step": 362
1302
+ },
1303
+ {
1304
+ "epoch": 0.15358649789029535,
1305
+ "grad_norm": 0.6750379800796509,
1306
+ "learning_rate": 6.379613356766257e-05,
1307
+ "loss": 1.048950433731079,
1308
+ "step": 364
1309
+ },
1310
+ {
1311
+ "epoch": 0.15443037974683543,
1312
+ "grad_norm": 0.6053379774093628,
1313
+ "learning_rate": 6.414762741652022e-05,
1314
+ "loss": 1.0156004428863525,
1315
+ "step": 366
1316
+ },
1317
+ {
1318
+ "epoch": 0.15527426160337554,
1319
+ "grad_norm": 0.8063633441925049,
1320
+ "learning_rate": 6.449912126537786e-05,
1321
+ "loss": 1.0020819902420044,
1322
+ "step": 368
1323
+ },
1324
+ {
1325
+ "epoch": 0.15611814345991562,
1326
+ "grad_norm": 0.8027494549751282,
1327
+ "learning_rate": 6.48506151142355e-05,
1328
+ "loss": 1.055633783340454,
1329
+ "step": 370
1330
+ },
1331
+ {
1332
+ "epoch": 0.1569620253164557,
1333
+ "grad_norm": 0.6580121517181396,
1334
+ "learning_rate": 6.520210896309315e-05,
1335
+ "loss": 1.0149940252304077,
1336
+ "step": 372
1337
+ },
1338
+ {
1339
+ "epoch": 0.15780590717299578,
1340
+ "grad_norm": 0.6561233997344971,
1341
+ "learning_rate": 6.55536028119508e-05,
1342
+ "loss": 0.9769611954689026,
1343
+ "step": 374
1344
+ },
1345
+ {
1346
+ "epoch": 0.15864978902953586,
1347
+ "grad_norm": 0.6444346308708191,
1348
+ "learning_rate": 6.590509666080844e-05,
1349
+ "loss": 0.9099349975585938,
1350
+ "step": 376
1351
+ },
1352
+ {
1353
+ "epoch": 0.15949367088607594,
1354
+ "grad_norm": 0.5879359245300293,
1355
+ "learning_rate": 6.625659050966608e-05,
1356
+ "loss": 1.0797548294067383,
1357
+ "step": 378
1358
+ },
1359
+ {
1360
+ "epoch": 0.16033755274261605,
1361
+ "grad_norm": 0.6994144916534424,
1362
+ "learning_rate": 6.660808435852373e-05,
1363
+ "loss": 1.0336791276931763,
1364
+ "step": 380
1365
+ },
1366
+ {
1367
+ "epoch": 0.16118143459915613,
1368
+ "grad_norm": 0.6128669381141663,
1369
+ "learning_rate": 6.695957820738138e-05,
1370
+ "loss": 1.018118143081665,
1371
+ "step": 382
1372
+ },
1373
+ {
1374
+ "epoch": 0.1620253164556962,
1375
+ "grad_norm": 1.0237540006637573,
1376
+ "learning_rate": 6.731107205623902e-05,
1377
+ "loss": 1.1405497789382935,
1378
+ "step": 384
1379
+ },
1380
+ {
1381
+ "epoch": 0.16286919831223629,
1382
+ "grad_norm": 0.6091578006744385,
1383
+ "learning_rate": 6.766256590509666e-05,
1384
+ "loss": 1.0314189195632935,
1385
+ "step": 386
1386
+ },
1387
+ {
1388
+ "epoch": 0.16371308016877636,
1389
+ "grad_norm": 0.5916037559509277,
1390
+ "learning_rate": 6.801405975395431e-05,
1391
+ "loss": 0.9564052820205688,
1392
+ "step": 388
1393
+ },
1394
+ {
1395
+ "epoch": 0.16455696202531644,
1396
+ "grad_norm": 0.771653950214386,
1397
+ "learning_rate": 6.836555360281195e-05,
1398
+ "loss": 1.0023859739303589,
1399
+ "step": 390
1400
+ },
1401
+ {
1402
+ "epoch": 0.16540084388185655,
1403
+ "grad_norm": 0.654658317565918,
1404
+ "learning_rate": 6.87170474516696e-05,
1405
+ "loss": 1.07024085521698,
1406
+ "step": 392
1407
+ },
1408
+ {
1409
+ "epoch": 0.16624472573839663,
1410
+ "grad_norm": 0.6611968874931335,
1411
+ "learning_rate": 6.906854130052724e-05,
1412
+ "loss": 1.0552500486373901,
1413
+ "step": 394
1414
+ },
1415
+ {
1416
+ "epoch": 0.1670886075949367,
1417
+ "grad_norm": 0.6955893039703369,
1418
+ "learning_rate": 6.942003514938489e-05,
1419
+ "loss": 1.0562875270843506,
1420
+ "step": 396
1421
+ },
1422
+ {
1423
+ "epoch": 0.1679324894514768,
1424
+ "grad_norm": 0.6666058301925659,
1425
+ "learning_rate": 6.977152899824253e-05,
1426
+ "loss": 0.9850592017173767,
1427
+ "step": 398
1428
+ },
1429
+ {
1430
+ "epoch": 0.16877637130801687,
1431
+ "grad_norm": 0.6131711006164551,
1432
+ "learning_rate": 7.012302284710018e-05,
1433
+ "loss": 1.0077755451202393,
1434
+ "step": 400
1435
+ },
1436
+ {
1437
+ "epoch": 0.16877637130801687,
1438
+ "eval_loss": 1.0625108480453491,
1439
+ "eval_runtime": 691.0068,
1440
+ "eval_samples_per_second": 3.049,
1441
+ "eval_steps_per_second": 3.049,
1442
+ "step": 400
1443
+ },
1444
+ {
1445
+ "epoch": 0.16962025316455695,
1446
+ "grad_norm": 0.6286499500274658,
1447
+ "learning_rate": 7.047451669595783e-05,
1448
+ "loss": 1.1012427806854248,
1449
+ "step": 402
1450
+ },
1451
+ {
1452
+ "epoch": 0.17046413502109706,
1453
+ "grad_norm": 0.6639351844787598,
1454
+ "learning_rate": 7.082601054481547e-05,
1455
+ "loss": 1.0379719734191895,
1456
+ "step": 404
1457
+ },
1458
+ {
1459
+ "epoch": 0.17130801687763714,
1460
+ "grad_norm": 0.750401496887207,
1461
+ "learning_rate": 7.117750439367311e-05,
1462
+ "loss": 1.031856656074524,
1463
+ "step": 406
1464
+ },
1465
+ {
1466
+ "epoch": 0.17215189873417722,
1467
+ "grad_norm": 0.8084847331047058,
1468
+ "learning_rate": 7.152899824253075e-05,
1469
+ "loss": 1.0493193864822388,
1470
+ "step": 408
1471
+ },
1472
+ {
1473
+ "epoch": 0.1729957805907173,
1474
+ "grad_norm": 0.7448462247848511,
1475
+ "learning_rate": 7.188049209138841e-05,
1476
+ "loss": 1.1012418270111084,
1477
+ "step": 410
1478
+ },
1479
+ {
1480
+ "epoch": 0.17383966244725738,
1481
+ "grad_norm": 0.5841867923736572,
1482
+ "learning_rate": 7.223198594024605e-05,
1483
+ "loss": 0.9926692247390747,
1484
+ "step": 412
1485
+ },
1486
+ {
1487
+ "epoch": 0.17468354430379746,
1488
+ "grad_norm": 0.7125606536865234,
1489
+ "learning_rate": 7.258347978910369e-05,
1490
+ "loss": 1.0588877201080322,
1491
+ "step": 414
1492
+ },
1493
+ {
1494
+ "epoch": 0.17552742616033756,
1495
+ "grad_norm": 0.5750942230224609,
1496
+ "learning_rate": 7.293497363796134e-05,
1497
+ "loss": 1.038270354270935,
1498
+ "step": 416
1499
+ },
1500
+ {
1501
+ "epoch": 0.17637130801687764,
1502
+ "grad_norm": 0.565444827079773,
1503
+ "learning_rate": 7.328646748681899e-05,
1504
+ "loss": 0.9843021035194397,
1505
+ "step": 418
1506
+ },
1507
+ {
1508
+ "epoch": 0.17721518987341772,
1509
+ "grad_norm": 0.5825693011283875,
1510
+ "learning_rate": 7.363796133567663e-05,
1511
+ "loss": 1.0731632709503174,
1512
+ "step": 420
1513
+ },
1514
+ {
1515
+ "epoch": 0.1780590717299578,
1516
+ "grad_norm": 0.6267391443252563,
1517
+ "learning_rate": 7.398945518453427e-05,
1518
+ "loss": 1.0061273574829102,
1519
+ "step": 422
1520
+ },
1521
+ {
1522
+ "epoch": 0.17890295358649788,
1523
+ "grad_norm": 0.6621372103691101,
1524
+ "learning_rate": 7.434094903339192e-05,
1525
+ "loss": 1.0461612939834595,
1526
+ "step": 424
1527
+ },
1528
+ {
1529
+ "epoch": 0.17974683544303796,
1530
+ "grad_norm": 0.6635435223579407,
1531
+ "learning_rate": 7.469244288224957e-05,
1532
+ "loss": 0.9789207577705383,
1533
+ "step": 426
1534
+ },
1535
+ {
1536
+ "epoch": 0.18059071729957807,
1537
+ "grad_norm": 0.6342346668243408,
1538
+ "learning_rate": 7.504393673110721e-05,
1539
+ "loss": 1.0527069568634033,
1540
+ "step": 428
1541
+ },
1542
+ {
1543
+ "epoch": 0.18143459915611815,
1544
+ "grad_norm": 0.6762149930000305,
1545
+ "learning_rate": 7.539543057996485e-05,
1546
+ "loss": 0.9708702564239502,
1547
+ "step": 430
1548
+ },
1549
+ {
1550
+ "epoch": 0.18227848101265823,
1551
+ "grad_norm": 0.7073282599449158,
1552
+ "learning_rate": 7.57469244288225e-05,
1553
+ "loss": 1.0509834289550781,
1554
+ "step": 432
1555
+ },
1556
+ {
1557
+ "epoch": 0.1831223628691983,
1558
+ "grad_norm": 0.6917856931686401,
1559
+ "learning_rate": 7.609841827768014e-05,
1560
+ "loss": 1.0128819942474365,
1561
+ "step": 434
1562
+ },
1563
+ {
1564
+ "epoch": 0.1839662447257384,
1565
+ "grad_norm": 0.5574942231178284,
1566
+ "learning_rate": 7.644991212653779e-05,
1567
+ "loss": 0.989395797252655,
1568
+ "step": 436
1569
+ },
1570
+ {
1571
+ "epoch": 0.1848101265822785,
1572
+ "grad_norm": 0.640765905380249,
1573
+ "learning_rate": 7.680140597539543e-05,
1574
+ "loss": 0.9846042990684509,
1575
+ "step": 438
1576
+ },
1577
+ {
1578
+ "epoch": 0.18565400843881857,
1579
+ "grad_norm": 0.6699127554893494,
1580
+ "learning_rate": 7.715289982425308e-05,
1581
+ "loss": 1.0344442129135132,
1582
+ "step": 440
1583
+ },
1584
+ {
1585
+ "epoch": 0.18649789029535865,
1586
+ "grad_norm": 0.6164930462837219,
1587
+ "learning_rate": 7.750439367311072e-05,
1588
+ "loss": 1.0179373025894165,
1589
+ "step": 442
1590
+ },
1591
+ {
1592
+ "epoch": 0.18734177215189873,
1593
+ "grad_norm": 0.6880720853805542,
1594
+ "learning_rate": 7.785588752196837e-05,
1595
+ "loss": 1.0518895387649536,
1596
+ "step": 444
1597
+ },
1598
+ {
1599
+ "epoch": 0.1881856540084388,
1600
+ "grad_norm": 0.6501413583755493,
1601
+ "learning_rate": 7.820738137082601e-05,
1602
+ "loss": 1.0442606210708618,
1603
+ "step": 446
1604
+ },
1605
+ {
1606
+ "epoch": 0.1890295358649789,
1607
+ "grad_norm": 0.6076085567474365,
1608
+ "learning_rate": 7.855887521968366e-05,
1609
+ "loss": 0.9828442335128784,
1610
+ "step": 448
1611
+ },
1612
+ {
1613
+ "epoch": 0.189873417721519,
1614
+ "grad_norm": 0.6418202519416809,
1615
+ "learning_rate": 7.89103690685413e-05,
1616
+ "loss": 1.0573710203170776,
1617
+ "step": 450
1618
+ },
1619
+ {
1620
+ "epoch": 0.19071729957805908,
1621
+ "grad_norm": 0.7055076360702515,
1622
+ "learning_rate": 7.926186291739895e-05,
1623
+ "loss": 1.0216103792190552,
1624
+ "step": 452
1625
+ },
1626
+ {
1627
+ "epoch": 0.19156118143459916,
1628
+ "grad_norm": 0.5668330192565918,
1629
+ "learning_rate": 7.961335676625659e-05,
1630
+ "loss": 0.9837722778320312,
1631
+ "step": 454
1632
+ },
1633
+ {
1634
+ "epoch": 0.19240506329113924,
1635
+ "grad_norm": 0.6419380307197571,
1636
+ "learning_rate": 7.996485061511424e-05,
1637
+ "loss": 1.0003894567489624,
1638
+ "step": 456
1639
+ },
1640
+ {
1641
+ "epoch": 0.19324894514767932,
1642
+ "grad_norm": 0.5949198007583618,
1643
+ "learning_rate": 8.031634446397188e-05,
1644
+ "loss": 1.0609031915664673,
1645
+ "step": 458
1646
+ },
1647
+ {
1648
+ "epoch": 0.1940928270042194,
1649
+ "grad_norm": 0.7032039761543274,
1650
+ "learning_rate": 8.066783831282952e-05,
1651
+ "loss": 1.0543403625488281,
1652
+ "step": 460
1653
+ },
1654
+ {
1655
+ "epoch": 0.1949367088607595,
1656
+ "grad_norm": 0.5775868892669678,
1657
+ "learning_rate": 8.101933216168718e-05,
1658
+ "loss": 0.9819303154945374,
1659
+ "step": 462
1660
+ },
1661
+ {
1662
+ "epoch": 0.19578059071729959,
1663
+ "grad_norm": 0.9301062226295471,
1664
+ "learning_rate": 8.137082601054482e-05,
1665
+ "loss": 1.0542067289352417,
1666
+ "step": 464
1667
+ },
1668
+ {
1669
+ "epoch": 0.19662447257383966,
1670
+ "grad_norm": 0.6193217039108276,
1671
+ "learning_rate": 8.172231985940246e-05,
1672
+ "loss": 0.9966341257095337,
1673
+ "step": 466
1674
+ },
1675
+ {
1676
+ "epoch": 0.19746835443037974,
1677
+ "grad_norm": 0.6286146640777588,
1678
+ "learning_rate": 8.20738137082601e-05,
1679
+ "loss": 1.0474121570587158,
1680
+ "step": 468
1681
+ },
1682
+ {
1683
+ "epoch": 0.19831223628691982,
1684
+ "grad_norm": 0.7418972253799438,
1685
+ "learning_rate": 8.242530755711776e-05,
1686
+ "loss": 0.9549239277839661,
1687
+ "step": 470
1688
+ },
1689
+ {
1690
+ "epoch": 0.1991561181434599,
1691
+ "grad_norm": 0.6122808456420898,
1692
+ "learning_rate": 8.27768014059754e-05,
1693
+ "loss": 1.0191338062286377,
1694
+ "step": 472
1695
+ },
1696
+ {
1697
+ "epoch": 0.2,
1698
+ "grad_norm": 0.6375362277030945,
1699
+ "learning_rate": 8.312829525483304e-05,
1700
+ "loss": 1.0987539291381836,
1701
+ "step": 474
1702
+ },
1703
+ {
1704
+ "epoch": 0.2008438818565401,
1705
+ "grad_norm": 0.6459513306617737,
1706
+ "learning_rate": 8.347978910369068e-05,
1707
+ "loss": 1.0369136333465576,
1708
+ "step": 476
1709
+ },
1710
+ {
1711
+ "epoch": 0.20168776371308017,
1712
+ "grad_norm": 0.7029640674591064,
1713
+ "learning_rate": 8.383128295254833e-05,
1714
+ "loss": 1.0582096576690674,
1715
+ "step": 478
1716
+ },
1717
+ {
1718
+ "epoch": 0.20253164556962025,
1719
+ "grad_norm": 0.6345387697219849,
1720
+ "learning_rate": 8.418277680140598e-05,
1721
+ "loss": 1.022916316986084,
1722
+ "step": 480
1723
+ },
1724
+ {
1725
+ "epoch": 0.20337552742616033,
1726
+ "grad_norm": 0.5764590501785278,
1727
+ "learning_rate": 8.453427065026362e-05,
1728
+ "loss": 0.973024308681488,
1729
+ "step": 482
1730
+ },
1731
+ {
1732
+ "epoch": 0.2042194092827004,
1733
+ "grad_norm": 0.5884482860565186,
1734
+ "learning_rate": 8.488576449912127e-05,
1735
+ "loss": 1.0292812585830688,
1736
+ "step": 484
1737
+ },
1738
+ {
1739
+ "epoch": 0.20506329113924052,
1740
+ "grad_norm": 0.616357147693634,
1741
+ "learning_rate": 8.523725834797891e-05,
1742
+ "loss": 1.0083447694778442,
1743
+ "step": 486
1744
+ },
1745
+ {
1746
+ "epoch": 0.2059071729957806,
1747
+ "grad_norm": 0.7671196460723877,
1748
+ "learning_rate": 8.558875219683656e-05,
1749
+ "loss": 0.9936985373497009,
1750
+ "step": 488
1751
+ },
1752
+ {
1753
+ "epoch": 0.20675105485232068,
1754
+ "grad_norm": 0.6197299957275391,
1755
+ "learning_rate": 8.59402460456942e-05,
1756
+ "loss": 1.051513910293579,
1757
+ "step": 490
1758
+ },
1759
+ {
1760
+ "epoch": 0.20759493670886076,
1761
+ "grad_norm": 0.6912890672683716,
1762
+ "learning_rate": 8.629173989455185e-05,
1763
+ "loss": 0.9474978446960449,
1764
+ "step": 492
1765
+ },
1766
+ {
1767
+ "epoch": 0.20843881856540084,
1768
+ "grad_norm": 0.6941592693328857,
1769
+ "learning_rate": 8.664323374340949e-05,
1770
+ "loss": 1.0671660900115967,
1771
+ "step": 494
1772
+ },
1773
+ {
1774
+ "epoch": 0.20928270042194091,
1775
+ "grad_norm": 0.5889528393745422,
1776
+ "learning_rate": 8.699472759226714e-05,
1777
+ "loss": 1.0020159482955933,
1778
+ "step": 496
1779
+ },
1780
+ {
1781
+ "epoch": 0.21012658227848102,
1782
+ "grad_norm": 0.6478549838066101,
1783
+ "learning_rate": 8.734622144112478e-05,
1784
+ "loss": 1.0165860652923584,
1785
+ "step": 498
1786
+ },
1787
+ {
1788
+ "epoch": 0.2109704641350211,
1789
+ "grad_norm": 0.6018255949020386,
1790
+ "learning_rate": 8.769771528998243e-05,
1791
+ "loss": 0.8798263072967529,
1792
+ "step": 500
1793
+ },
1794
+ {
1795
+ "epoch": 0.2109704641350211,
1796
+ "eval_loss": 1.042096495628357,
1797
+ "eval_runtime": 692.4361,
1798
+ "eval_samples_per_second": 3.043,
1799
+ "eval_steps_per_second": 3.043,
1800
+ "step": 500
1801
+ },
1802
+ {
1803
+ "epoch": 0.21181434599156118,
1804
+ "grad_norm": 0.578990638256073,
1805
+ "learning_rate": 8.804920913884007e-05,
1806
+ "loss": 1.092096209526062,
1807
+ "step": 502
1808
+ },
1809
+ {
1810
+ "epoch": 0.21265822784810126,
1811
+ "grad_norm": 0.6597883701324463,
1812
+ "learning_rate": 8.840070298769771e-05,
1813
+ "loss": 1.0413451194763184,
1814
+ "step": 504
1815
+ },
1816
+ {
1817
+ "epoch": 0.21350210970464134,
1818
+ "grad_norm": 0.6660305261611938,
1819
+ "learning_rate": 8.875219683655536e-05,
1820
+ "loss": 1.0073142051696777,
1821
+ "step": 506
1822
+ },
1823
+ {
1824
+ "epoch": 0.21434599156118145,
1825
+ "grad_norm": 0.6283115148544312,
1826
+ "learning_rate": 8.910369068541301e-05,
1827
+ "loss": 1.0319768190383911,
1828
+ "step": 508
1829
+ },
1830
+ {
1831
+ "epoch": 0.21518987341772153,
1832
+ "grad_norm": 0.6257343292236328,
1833
+ "learning_rate": 8.945518453427065e-05,
1834
+ "loss": 1.0046353340148926,
1835
+ "step": 510
1836
+ },
1837
+ {
1838
+ "epoch": 0.2160337552742616,
1839
+ "grad_norm": 0.5530875325202942,
1840
+ "learning_rate": 8.980667838312829e-05,
1841
+ "loss": 0.9169099926948547,
1842
+ "step": 512
1843
+ },
1844
+ {
1845
+ "epoch": 0.2168776371308017,
1846
+ "grad_norm": 0.5369633436203003,
1847
+ "learning_rate": 9.015817223198594e-05,
1848
+ "loss": 0.9081505537033081,
1849
+ "step": 514
1850
+ },
1851
+ {
1852
+ "epoch": 0.21772151898734177,
1853
+ "grad_norm": 0.6618232131004333,
1854
+ "learning_rate": 9.05096660808436e-05,
1855
+ "loss": 0.9165045022964478,
1856
+ "step": 516
1857
+ },
1858
+ {
1859
+ "epoch": 0.21856540084388185,
1860
+ "grad_norm": 0.600666344165802,
1861
+ "learning_rate": 9.086115992970123e-05,
1862
+ "loss": 0.91348797082901,
1863
+ "step": 518
1864
+ },
1865
+ {
1866
+ "epoch": 0.21940928270042195,
1867
+ "grad_norm": 0.5919831991195679,
1868
+ "learning_rate": 9.121265377855887e-05,
1869
+ "loss": 1.006508469581604,
1870
+ "step": 520
1871
+ },
1872
+ {
1873
+ "epoch": 0.22025316455696203,
1874
+ "grad_norm": 0.688058614730835,
1875
+ "learning_rate": 9.156414762741654e-05,
1876
+ "loss": 1.0013236999511719,
1877
+ "step": 522
1878
+ },
1879
+ {
1880
+ "epoch": 0.2210970464135021,
1881
+ "grad_norm": 0.6721227765083313,
1882
+ "learning_rate": 9.191564147627418e-05,
1883
+ "loss": 1.0909923315048218,
1884
+ "step": 524
1885
+ },
1886
+ {
1887
+ "epoch": 0.2219409282700422,
1888
+ "grad_norm": 0.5987313389778137,
1889
+ "learning_rate": 9.226713532513181e-05,
1890
+ "loss": 1.0117096900939941,
1891
+ "step": 526
1892
+ },
1893
+ {
1894
+ "epoch": 0.22278481012658227,
1895
+ "grad_norm": 0.6191489696502686,
1896
+ "learning_rate": 9.261862917398945e-05,
1897
+ "loss": 1.0153647661209106,
1898
+ "step": 528
1899
+ },
1900
+ {
1901
+ "epoch": 0.22362869198312235,
1902
+ "grad_norm": 0.6821563243865967,
1903
+ "learning_rate": 9.29701230228471e-05,
1904
+ "loss": 0.9649755954742432,
1905
+ "step": 530
1906
+ },
1907
+ {
1908
+ "epoch": 0.22447257383966246,
1909
+ "grad_norm": 1.760398268699646,
1910
+ "learning_rate": 9.332161687170476e-05,
1911
+ "loss": 0.8673232197761536,
1912
+ "step": 532
1913
+ },
1914
+ {
1915
+ "epoch": 0.22531645569620254,
1916
+ "grad_norm": 0.6670058369636536,
1917
+ "learning_rate": 9.36731107205624e-05,
1918
+ "loss": 0.9942440986633301,
1919
+ "step": 534
1920
+ },
1921
+ {
1922
+ "epoch": 0.22616033755274262,
1923
+ "grad_norm": 0.7345916032791138,
1924
+ "learning_rate": 9.402460456942003e-05,
1925
+ "loss": 1.0364389419555664,
1926
+ "step": 536
1927
+ },
1928
+ {
1929
+ "epoch": 0.2270042194092827,
1930
+ "grad_norm": 0.5946128964424133,
1931
+ "learning_rate": 9.437609841827768e-05,
1932
+ "loss": 0.9314924478530884,
1933
+ "step": 538
1934
+ },
1935
+ {
1936
+ "epoch": 0.22784810126582278,
1937
+ "grad_norm": 0.5800848603248596,
1938
+ "learning_rate": 9.472759226713534e-05,
1939
+ "loss": 1.0694862604141235,
1940
+ "step": 540
1941
+ },
1942
+ {
1943
+ "epoch": 0.22869198312236286,
1944
+ "grad_norm": 0.6712192893028259,
1945
+ "learning_rate": 9.507908611599297e-05,
1946
+ "loss": 1.03531014919281,
1947
+ "step": 542
1948
+ },
1949
+ {
1950
+ "epoch": 0.22953586497890296,
1951
+ "grad_norm": 0.5641416311264038,
1952
+ "learning_rate": 9.543057996485063e-05,
1953
+ "loss": 0.9795235991477966,
1954
+ "step": 544
1955
+ },
1956
+ {
1957
+ "epoch": 0.23037974683544304,
1958
+ "grad_norm": 0.50412517786026,
1959
+ "learning_rate": 9.578207381370826e-05,
1960
+ "loss": 0.9641494750976562,
1961
+ "step": 546
1962
+ },
1963
+ {
1964
+ "epoch": 0.23122362869198312,
1965
+ "grad_norm": 0.579118549823761,
1966
+ "learning_rate": 9.61335676625659e-05,
1967
+ "loss": 0.9375281929969788,
1968
+ "step": 548
1969
+ },
1970
+ {
1971
+ "epoch": 0.2320675105485232,
1972
+ "grad_norm": 0.5888341665267944,
1973
+ "learning_rate": 9.648506151142355e-05,
1974
+ "loss": 0.9414046406745911,
1975
+ "step": 550
1976
+ },
1977
+ {
1978
+ "epoch": 0.23291139240506328,
1979
+ "grad_norm": 0.5595056414604187,
1980
+ "learning_rate": 9.68365553602812e-05,
1981
+ "loss": 0.9005617499351501,
1982
+ "step": 552
1983
+ },
1984
+ {
1985
+ "epoch": 0.23375527426160336,
1986
+ "grad_norm": 0.6605326533317566,
1987
+ "learning_rate": 9.718804920913884e-05,
1988
+ "loss": 1.0283968448638916,
1989
+ "step": 554
1990
+ },
1991
+ {
1992
+ "epoch": 0.23459915611814347,
1993
+ "grad_norm": 0.5657313466072083,
1994
+ "learning_rate": 9.753954305799648e-05,
1995
+ "loss": 1.0058249235153198,
1996
+ "step": 556
1997
+ },
1998
+ {
1999
+ "epoch": 0.23544303797468355,
2000
+ "grad_norm": 0.5433364510536194,
2001
+ "learning_rate": 9.789103690685413e-05,
2002
+ "loss": 0.9835494756698608,
2003
+ "step": 558
2004
+ },
2005
+ {
2006
+ "epoch": 0.23628691983122363,
2007
+ "grad_norm": 0.6129802465438843,
2008
+ "learning_rate": 9.824253075571179e-05,
2009
+ "loss": 1.054532527923584,
2010
+ "step": 560
2011
+ },
2012
+ {
2013
+ "epoch": 0.2371308016877637,
2014
+ "grad_norm": 0.6496239304542542,
2015
+ "learning_rate": 9.859402460456942e-05,
2016
+ "loss": 1.0240973234176636,
2017
+ "step": 562
2018
+ },
2019
+ {
2020
+ "epoch": 0.2379746835443038,
2021
+ "grad_norm": 0.6380873918533325,
2022
+ "learning_rate": 9.894551845342706e-05,
2023
+ "loss": 1.0229179859161377,
2024
+ "step": 564
2025
+ },
2026
+ {
2027
+ "epoch": 0.23881856540084387,
2028
+ "grad_norm": 0.6151993870735168,
2029
+ "learning_rate": 9.929701230228471e-05,
2030
+ "loss": 1.0111570358276367,
2031
+ "step": 566
2032
+ },
2033
+ {
2034
+ "epoch": 0.23966244725738398,
2035
+ "grad_norm": 0.5727584958076477,
2036
+ "learning_rate": 9.964850615114237e-05,
2037
+ "loss": 0.9450829029083252,
2038
+ "step": 568
2039
+ },
2040
+ {
2041
+ "epoch": 0.24050632911392406,
2042
+ "grad_norm": 0.6620725989341736,
2043
+ "learning_rate": 0.0001,
2044
+ "loss": 0.9800319075584412,
2045
+ "step": 570
2046
+ },
2047
+ {
2048
+ "epoch": 0.24135021097046414,
2049
+ "grad_norm": 0.6151163578033447,
2050
+ "learning_rate": 0.00010035149384885764,
2051
+ "loss": 0.9757438898086548,
2052
+ "step": 572
2053
+ },
2054
+ {
2055
+ "epoch": 0.24219409282700421,
2056
+ "grad_norm": 0.5672140717506409,
2057
+ "learning_rate": 0.0001007029876977153,
2058
+ "loss": 0.9104921817779541,
2059
+ "step": 574
2060
+ },
2061
+ {
2062
+ "epoch": 0.2430379746835443,
2063
+ "grad_norm": 0.5697256326675415,
2064
+ "learning_rate": 0.00010105448154657293,
2065
+ "loss": 1.1027376651763916,
2066
+ "step": 576
2067
+ },
2068
+ {
2069
+ "epoch": 0.2438818565400844,
2070
+ "grad_norm": 0.5590381622314453,
2071
+ "learning_rate": 0.00010140597539543057,
2072
+ "loss": 1.0055404901504517,
2073
+ "step": 578
2074
+ },
2075
+ {
2076
+ "epoch": 0.24472573839662448,
2077
+ "grad_norm": 0.5518567562103271,
2078
+ "learning_rate": 0.00010175746924428824,
2079
+ "loss": 1.020835518836975,
2080
+ "step": 580
2081
+ },
2082
+ {
2083
+ "epoch": 0.24556962025316456,
2084
+ "grad_norm": 0.6338496208190918,
2085
+ "learning_rate": 0.00010210896309314588,
2086
+ "loss": 0.9528344869613647,
2087
+ "step": 582
2088
+ },
2089
+ {
2090
+ "epoch": 0.24641350210970464,
2091
+ "grad_norm": 0.6497329473495483,
2092
+ "learning_rate": 0.00010246045694200353,
2093
+ "loss": 1.0088670253753662,
2094
+ "step": 584
2095
+ },
2096
+ {
2097
+ "epoch": 0.24725738396624472,
2098
+ "grad_norm": 0.49888095259666443,
2099
+ "learning_rate": 0.00010281195079086117,
2100
+ "loss": 0.9961200952529907,
2101
+ "step": 586
2102
+ },
2103
+ {
2104
+ "epoch": 0.2481012658227848,
2105
+ "grad_norm": 0.5680158734321594,
2106
+ "learning_rate": 0.0001031634446397188,
2107
+ "loss": 0.9635610580444336,
2108
+ "step": 588
2109
+ },
2110
+ {
2111
+ "epoch": 0.2489451476793249,
2112
+ "grad_norm": 0.658168375492096,
2113
+ "learning_rate": 0.00010351493848857646,
2114
+ "loss": 0.9392287135124207,
2115
+ "step": 590
2116
+ },
2117
+ {
2118
+ "epoch": 0.249789029535865,
2119
+ "grad_norm": 0.618262767791748,
2120
+ "learning_rate": 0.0001038664323374341,
2121
+ "loss": 0.9600516557693481,
2122
+ "step": 592
2123
+ },
2124
+ {
2125
+ "epoch": 0.25063291139240507,
2126
+ "grad_norm": 0.6003909111022949,
2127
+ "learning_rate": 0.00010421792618629173,
2128
+ "loss": 1.005476713180542,
2129
+ "step": 594
2130
+ },
2131
+ {
2132
+ "epoch": 0.2514767932489452,
2133
+ "grad_norm": 0.5437078475952148,
2134
+ "learning_rate": 0.00010456942003514938,
2135
+ "loss": 0.9523017406463623,
2136
+ "step": 596
2137
+ },
2138
+ {
2139
+ "epoch": 0.2523206751054852,
2140
+ "grad_norm": 0.5524541735649109,
2141
+ "learning_rate": 0.00010492091388400705,
2142
+ "loss": 0.9526668787002563,
2143
+ "step": 598
2144
+ },
2145
+ {
2146
+ "epoch": 0.25316455696202533,
2147
+ "grad_norm": 0.679504930973053,
2148
+ "learning_rate": 0.00010527240773286469,
2149
+ "loss": 1.019660472869873,
2150
+ "step": 600
2151
+ },
2152
+ {
2153
+ "epoch": 0.25316455696202533,
2154
+ "eval_loss": 1.0193854570388794,
2155
+ "eval_runtime": 677.9523,
2156
+ "eval_samples_per_second": 3.108,
2157
+ "eval_steps_per_second": 3.108,
2158
+ "step": 600
2159
+ },
2160
+ {
2161
+ "epoch": 0.2540084388185654,
2162
+ "grad_norm": 0.5646136999130249,
2163
+ "learning_rate": 0.00010562390158172233,
2164
+ "loss": 0.9910882711410522,
2165
+ "step": 602
2166
+ },
2167
+ {
2168
+ "epoch": 0.2548523206751055,
2169
+ "grad_norm": 0.5238093137741089,
2170
+ "learning_rate": 0.00010597539543057998,
2171
+ "loss": 0.9616432785987854,
2172
+ "step": 604
2173
+ },
2174
+ {
2175
+ "epoch": 0.25569620253164554,
2176
+ "grad_norm": 0.7483857274055481,
2177
+ "learning_rate": 0.00010632688927943762,
2178
+ "loss": 1.0078275203704834,
2179
+ "step": 606
2180
+ },
2181
+ {
2182
+ "epoch": 0.25654008438818565,
2183
+ "grad_norm": 0.578948974609375,
2184
+ "learning_rate": 0.00010667838312829525,
2185
+ "loss": 0.9827103018760681,
2186
+ "step": 608
2187
+ },
2188
+ {
2189
+ "epoch": 0.25738396624472576,
2190
+ "grad_norm": 0.5525906085968018,
2191
+ "learning_rate": 0.00010702987697715289,
2192
+ "loss": 1.0423277616500854,
2193
+ "step": 610
2194
+ },
2195
+ {
2196
+ "epoch": 0.2582278481012658,
2197
+ "grad_norm": 0.6721326112747192,
2198
+ "learning_rate": 0.00010738137082601054,
2199
+ "loss": 0.9561693072319031,
2200
+ "step": 612
2201
+ },
2202
+ {
2203
+ "epoch": 0.2590717299578059,
2204
+ "grad_norm": 0.5701051354408264,
2205
+ "learning_rate": 0.00010773286467486821,
2206
+ "loss": 0.9602992534637451,
2207
+ "step": 614
2208
+ },
2209
+ {
2210
+ "epoch": 0.25991561181434597,
2211
+ "grad_norm": 0.6349860429763794,
2212
+ "learning_rate": 0.00010808435852372585,
2213
+ "loss": 1.1422650814056396,
2214
+ "step": 616
2215
+ },
2216
+ {
2217
+ "epoch": 0.2607594936708861,
2218
+ "grad_norm": 0.5496085286140442,
2219
+ "learning_rate": 0.00010843585237258349,
2220
+ "loss": 0.9762773513793945,
2221
+ "step": 618
2222
+ },
2223
+ {
2224
+ "epoch": 0.2616033755274262,
2225
+ "grad_norm": 0.6080722808837891,
2226
+ "learning_rate": 0.00010878734622144114,
2227
+ "loss": 1.0133616924285889,
2228
+ "step": 620
2229
+ },
2230
+ {
2231
+ "epoch": 0.26244725738396624,
2232
+ "grad_norm": 0.5450218915939331,
2233
+ "learning_rate": 0.00010913884007029878,
2234
+ "loss": 0.9385587573051453,
2235
+ "step": 622
2236
+ },
2237
+ {
2238
+ "epoch": 0.26329113924050634,
2239
+ "grad_norm": 0.592106819152832,
2240
+ "learning_rate": 0.00010949033391915641,
2241
+ "loss": 0.9359989762306213,
2242
+ "step": 624
2243
+ },
2244
+ {
2245
+ "epoch": 0.2641350210970464,
2246
+ "grad_norm": 0.6449427604675293,
2247
+ "learning_rate": 0.00010984182776801407,
2248
+ "loss": 1.0266027450561523,
2249
+ "step": 626
2250
+ },
2251
+ {
2252
+ "epoch": 0.2649789029535865,
2253
+ "grad_norm": 0.538299560546875,
2254
+ "learning_rate": 0.0001101933216168717,
2255
+ "loss": 0.9303187131881714,
2256
+ "step": 628
2257
+ },
2258
+ {
2259
+ "epoch": 0.26582278481012656,
2260
+ "grad_norm": 0.546316921710968,
2261
+ "learning_rate": 0.00011054481546572934,
2262
+ "loss": 0.9368857145309448,
2263
+ "step": 630
2264
+ },
2265
+ {
2266
+ "epoch": 0.26666666666666666,
2267
+ "grad_norm": 0.5818730592727661,
2268
+ "learning_rate": 0.00011089630931458701,
2269
+ "loss": 0.9573145508766174,
2270
+ "step": 632
2271
+ },
2272
+ {
2273
+ "epoch": 0.26751054852320677,
2274
+ "grad_norm": 0.5958262085914612,
2275
+ "learning_rate": 0.00011124780316344465,
2276
+ "loss": 0.9345449805259705,
2277
+ "step": 634
2278
+ },
2279
+ {
2280
+ "epoch": 0.2683544303797468,
2281
+ "grad_norm": 0.6259077787399292,
2282
+ "learning_rate": 0.0001115992970123023,
2283
+ "loss": 1.0906590223312378,
2284
+ "step": 636
2285
+ },
2286
+ {
2287
+ "epoch": 0.26919831223628693,
2288
+ "grad_norm": 0.589672863483429,
2289
+ "learning_rate": 0.00011195079086115994,
2290
+ "loss": 1.0757447481155396,
2291
+ "step": 638
2292
+ },
2293
+ {
2294
+ "epoch": 0.270042194092827,
2295
+ "grad_norm": 0.5714080333709717,
2296
+ "learning_rate": 0.00011230228471001758,
2297
+ "loss": 0.9310855269432068,
2298
+ "step": 640
2299
+ },
2300
+ {
2301
+ "epoch": 0.2708860759493671,
2302
+ "grad_norm": 0.45342639088630676,
2303
+ "learning_rate": 0.00011265377855887523,
2304
+ "loss": 0.9276360273361206,
2305
+ "step": 642
2306
+ },
2307
+ {
2308
+ "epoch": 0.2717299578059072,
2309
+ "grad_norm": 0.6386750340461731,
2310
+ "learning_rate": 0.00011300527240773287,
2311
+ "loss": 1.084719181060791,
2312
+ "step": 644
2313
+ },
2314
+ {
2315
+ "epoch": 0.27257383966244725,
2316
+ "grad_norm": 0.6446163654327393,
2317
+ "learning_rate": 0.0001133567662565905,
2318
+ "loss": 0.9763918519020081,
2319
+ "step": 646
2320
+ },
2321
+ {
2322
+ "epoch": 0.27341772151898736,
2323
+ "grad_norm": 0.5925686359405518,
2324
+ "learning_rate": 0.00011370826010544816,
2325
+ "loss": 0.9517921805381775,
2326
+ "step": 648
2327
+ },
2328
+ {
2329
+ "epoch": 0.2742616033755274,
2330
+ "grad_norm": 0.5399773716926575,
2331
+ "learning_rate": 0.00011405975395430582,
2332
+ "loss": 1.0587927103042603,
2333
+ "step": 650
2334
+ },
2335
+ {
2336
+ "epoch": 0.2751054852320675,
2337
+ "grad_norm": 0.5872456431388855,
2338
+ "learning_rate": 0.00011441124780316346,
2339
+ "loss": 0.883341908454895,
2340
+ "step": 652
2341
+ },
2342
+ {
2343
+ "epoch": 0.2759493670886076,
2344
+ "grad_norm": 0.5574564337730408,
2345
+ "learning_rate": 0.0001147627416520211,
2346
+ "loss": 1.0306891202926636,
2347
+ "step": 654
2348
+ },
2349
+ {
2350
+ "epoch": 0.2767932489451477,
2351
+ "grad_norm": 0.47789013385772705,
2352
+ "learning_rate": 0.00011511423550087874,
2353
+ "loss": 0.8814032077789307,
2354
+ "step": 656
2355
+ },
2356
+ {
2357
+ "epoch": 0.2776371308016878,
2358
+ "grad_norm": 0.5565530061721802,
2359
+ "learning_rate": 0.00011546572934973639,
2360
+ "loss": 0.9460552334785461,
2361
+ "step": 658
2362
+ },
2363
+ {
2364
+ "epoch": 0.27848101265822783,
2365
+ "grad_norm": 0.5299761295318604,
2366
+ "learning_rate": 0.00011581722319859403,
2367
+ "loss": 0.9475110769271851,
2368
+ "step": 660
2369
+ },
2370
+ {
2371
+ "epoch": 0.27932489451476794,
2372
+ "grad_norm": 0.6503344178199768,
2373
+ "learning_rate": 0.00011616871704745166,
2374
+ "loss": 1.0630913972854614,
2375
+ "step": 662
2376
+ },
2377
+ {
2378
+ "epoch": 0.280168776371308,
2379
+ "grad_norm": 0.5794585943222046,
2380
+ "learning_rate": 0.00011652021089630932,
2381
+ "loss": 0.9389138221740723,
2382
+ "step": 664
2383
+ },
2384
+ {
2385
+ "epoch": 0.2810126582278481,
2386
+ "grad_norm": 0.5762867331504822,
2387
+ "learning_rate": 0.00011687170474516695,
2388
+ "loss": 0.8934136033058167,
2389
+ "step": 666
2390
+ },
2391
+ {
2392
+ "epoch": 0.2818565400843882,
2393
+ "grad_norm": 0.6565435528755188,
2394
+ "learning_rate": 0.00011722319859402462,
2395
+ "loss": 1.1072614192962646,
2396
+ "step": 668
2397
+ },
2398
+ {
2399
+ "epoch": 0.28270042194092826,
2400
+ "grad_norm": 0.5819830298423767,
2401
+ "learning_rate": 0.00011757469244288226,
2402
+ "loss": 1.0501434803009033,
2403
+ "step": 670
2404
+ },
2405
+ {
2406
+ "epoch": 0.28354430379746837,
2407
+ "grad_norm": 0.6071487069129944,
2408
+ "learning_rate": 0.00011792618629173991,
2409
+ "loss": 0.9880793690681458,
2410
+ "step": 672
2411
+ },
2412
+ {
2413
+ "epoch": 0.2843881856540084,
2414
+ "grad_norm": 0.5765058398246765,
2415
+ "learning_rate": 0.00011827768014059755,
2416
+ "loss": 0.9670693874359131,
2417
+ "step": 674
2418
+ },
2419
+ {
2420
+ "epoch": 0.2852320675105485,
2421
+ "grad_norm": 0.5245351791381836,
2422
+ "learning_rate": 0.00011862917398945519,
2423
+ "loss": 0.9602360725402832,
2424
+ "step": 676
2425
+ },
2426
+ {
2427
+ "epoch": 0.28607594936708863,
2428
+ "grad_norm": 0.6189922094345093,
2429
+ "learning_rate": 0.00011898066783831282,
2430
+ "loss": 0.9684560894966125,
2431
+ "step": 678
2432
+ },
2433
+ {
2434
+ "epoch": 0.2869198312236287,
2435
+ "grad_norm": 0.6138690710067749,
2436
+ "learning_rate": 0.00011933216168717048,
2437
+ "loss": 0.9465792775154114,
2438
+ "step": 680
2439
+ },
2440
+ {
2441
+ "epoch": 0.2877637130801688,
2442
+ "grad_norm": 0.5371595621109009,
2443
+ "learning_rate": 0.00011968365553602812,
2444
+ "loss": 0.8495944738388062,
2445
+ "step": 682
2446
+ },
2447
+ {
2448
+ "epoch": 0.28860759493670884,
2449
+ "grad_norm": 0.5549944639205933,
2450
+ "learning_rate": 0.00012003514938488578,
2451
+ "loss": 0.9663267135620117,
2452
+ "step": 684
2453
+ },
2454
+ {
2455
+ "epoch": 0.28945147679324895,
2456
+ "grad_norm": 0.6484189033508301,
2457
+ "learning_rate": 0.00012038664323374342,
2458
+ "loss": 0.9736058712005615,
2459
+ "step": 686
2460
+ },
2461
+ {
2462
+ "epoch": 0.290295358649789,
2463
+ "grad_norm": 0.540351390838623,
2464
+ "learning_rate": 0.00012073813708260107,
2465
+ "loss": 1.0591845512390137,
2466
+ "step": 688
2467
+ },
2468
+ {
2469
+ "epoch": 0.2911392405063291,
2470
+ "grad_norm": 0.5657922029495239,
2471
+ "learning_rate": 0.00012108963093145871,
2472
+ "loss": 0.944908618927002,
2473
+ "step": 690
2474
+ },
2475
+ {
2476
+ "epoch": 0.2919831223628692,
2477
+ "grad_norm": 0.6040505170822144,
2478
+ "learning_rate": 0.00012144112478031635,
2479
+ "loss": 1.0018219947814941,
2480
+ "step": 692
2481
+ },
2482
+ {
2483
+ "epoch": 0.29282700421940927,
2484
+ "grad_norm": 0.5435477495193481,
2485
+ "learning_rate": 0.000121792618629174,
2486
+ "loss": 1.0351502895355225,
2487
+ "step": 694
2488
+ },
2489
+ {
2490
+ "epoch": 0.2936708860759494,
2491
+ "grad_norm": 0.5712518692016602,
2492
+ "learning_rate": 0.00012214411247803164,
2493
+ "loss": 0.9935672283172607,
2494
+ "step": 696
2495
+ },
2496
+ {
2497
+ "epoch": 0.29451476793248943,
2498
+ "grad_norm": 0.6138222813606262,
2499
+ "learning_rate": 0.00012249560632688928,
2500
+ "loss": 1.0165108442306519,
2501
+ "step": 698
2502
+ },
2503
+ {
2504
+ "epoch": 0.29535864978902954,
2505
+ "grad_norm": 0.4495212435722351,
2506
+ "learning_rate": 0.00012284710017574691,
2507
+ "loss": 0.9334425926208496,
2508
+ "step": 700
2509
+ },
2510
+ {
2511
+ "epoch": 0.29535864978902954,
2512
+ "eval_loss": 0.996929407119751,
2513
+ "eval_runtime": 668.6398,
2514
+ "eval_samples_per_second": 3.151,
2515
+ "eval_steps_per_second": 3.151,
2516
+ "step": 700
2517
+ },
2518
+ {
2519
+ "epoch": 0.29620253164556964,
2520
+ "grad_norm": 0.5321539044380188,
2521
+ "learning_rate": 0.00012319859402460458,
2522
+ "loss": 1.0516537427902222,
2523
+ "step": 702
2524
+ },
2525
+ {
2526
+ "epoch": 0.2970464135021097,
2527
+ "grad_norm": 0.5716516971588135,
2528
+ "learning_rate": 0.00012355008787346222,
2529
+ "loss": 0.9387198686599731,
2530
+ "step": 704
2531
+ },
2532
+ {
2533
+ "epoch": 0.2978902953586498,
2534
+ "grad_norm": 0.5617920160293579,
2535
+ "learning_rate": 0.00012390158172231988,
2536
+ "loss": 0.99737948179245,
2537
+ "step": 706
2538
+ },
2539
+ {
2540
+ "epoch": 0.29873417721518986,
2541
+ "grad_norm": 0.4922899007797241,
2542
+ "learning_rate": 0.00012425307557117752,
2543
+ "loss": 0.9955025911331177,
2544
+ "step": 708
2545
+ },
2546
+ {
2547
+ "epoch": 0.29957805907172996,
2548
+ "grad_norm": 0.543501615524292,
2549
+ "learning_rate": 0.00012460456942003516,
2550
+ "loss": 0.9124280214309692,
2551
+ "step": 710
2552
+ },
2553
+ {
2554
+ "epoch": 0.30042194092827,
2555
+ "grad_norm": 0.49590054154396057,
2556
+ "learning_rate": 0.0001249560632688928,
2557
+ "loss": 0.9820216298103333,
2558
+ "step": 712
2559
+ },
2560
+ {
2561
+ "epoch": 0.3012658227848101,
2562
+ "grad_norm": 0.5984305739402771,
2563
+ "learning_rate": 0.00012530755711775044,
2564
+ "loss": 1.0152074098587036,
2565
+ "step": 714
2566
+ },
2567
+ {
2568
+ "epoch": 0.30210970464135023,
2569
+ "grad_norm": 0.9343504905700684,
2570
+ "learning_rate": 0.00012565905096660807,
2571
+ "loss": 1.0577725172042847,
2572
+ "step": 716
2573
+ },
2574
+ {
2575
+ "epoch": 0.3029535864978903,
2576
+ "grad_norm": 0.5118702054023743,
2577
+ "learning_rate": 0.0001260105448154657,
2578
+ "loss": 0.9830358028411865,
2579
+ "step": 718
2580
+ },
2581
+ {
2582
+ "epoch": 0.3037974683544304,
2583
+ "grad_norm": 0.4940392076969147,
2584
+ "learning_rate": 0.00012636203866432338,
2585
+ "loss": 0.9466043710708618,
2586
+ "step": 720
2587
+ },
2588
+ {
2589
+ "epoch": 0.30464135021097044,
2590
+ "grad_norm": 0.5965693593025208,
2591
+ "learning_rate": 0.00012671353251318104,
2592
+ "loss": 1.015270709991455,
2593
+ "step": 722
2594
+ },
2595
+ {
2596
+ "epoch": 0.30548523206751055,
2597
+ "grad_norm": 0.5020529627799988,
2598
+ "learning_rate": 0.00012706502636203868,
2599
+ "loss": 0.9703927636146545,
2600
+ "step": 724
2601
+ },
2602
+ {
2603
+ "epoch": 0.30632911392405066,
2604
+ "grad_norm": 0.6067010164260864,
2605
+ "learning_rate": 0.00012741652021089632,
2606
+ "loss": 1.0255526304244995,
2607
+ "step": 726
2608
+ },
2609
+ {
2610
+ "epoch": 0.3071729957805907,
2611
+ "grad_norm": 0.5931884050369263,
2612
+ "learning_rate": 0.00012776801405975396,
2613
+ "loss": 0.9335633516311646,
2614
+ "step": 728
2615
+ },
2616
+ {
2617
+ "epoch": 0.3080168776371308,
2618
+ "grad_norm": 0.5938752293586731,
2619
+ "learning_rate": 0.0001281195079086116,
2620
+ "loss": 1.0921578407287598,
2621
+ "step": 730
2622
+ },
2623
+ {
2624
+ "epoch": 0.30886075949367087,
2625
+ "grad_norm": 0.49728086590766907,
2626
+ "learning_rate": 0.00012847100175746923,
2627
+ "loss": 0.963066041469574,
2628
+ "step": 732
2629
+ },
2630
+ {
2631
+ "epoch": 0.309704641350211,
2632
+ "grad_norm": 0.5452080965042114,
2633
+ "learning_rate": 0.0001288224956063269,
2634
+ "loss": 0.9513075351715088,
2635
+ "step": 734
2636
+ },
2637
+ {
2638
+ "epoch": 0.3105485232067511,
2639
+ "grad_norm": 0.5497731566429138,
2640
+ "learning_rate": 0.00012917398945518454,
2641
+ "loss": 0.8576077222824097,
2642
+ "step": 736
2643
+ },
2644
+ {
2645
+ "epoch": 0.31139240506329113,
2646
+ "grad_norm": 0.5580397248268127,
2647
+ "learning_rate": 0.0001295254833040422,
2648
+ "loss": 0.9542577862739563,
2649
+ "step": 738
2650
+ },
2651
+ {
2652
+ "epoch": 0.31223628691983124,
2653
+ "grad_norm": 0.5890427827835083,
2654
+ "learning_rate": 0.00012987697715289984,
2655
+ "loss": 0.8992732167243958,
2656
+ "step": 740
2657
+ },
2658
+ {
2659
+ "epoch": 0.3130801687763713,
2660
+ "grad_norm": 0.5942965745925903,
2661
+ "learning_rate": 0.00013022847100175748,
2662
+ "loss": 1.0322896242141724,
2663
+ "step": 742
2664
+ },
2665
+ {
2666
+ "epoch": 0.3139240506329114,
2667
+ "grad_norm": 0.6341713070869446,
2668
+ "learning_rate": 0.00013057996485061512,
2669
+ "loss": 0.9217103719711304,
2670
+ "step": 744
2671
+ },
2672
+ {
2673
+ "epoch": 0.31476793248945145,
2674
+ "grad_norm": 0.5294105410575867,
2675
+ "learning_rate": 0.00013093145869947276,
2676
+ "loss": 0.951789915561676,
2677
+ "step": 746
2678
+ },
2679
+ {
2680
+ "epoch": 0.31561181434599156,
2681
+ "grad_norm": 0.6372058391571045,
2682
+ "learning_rate": 0.0001312829525483304,
2683
+ "loss": 0.9459875226020813,
2684
+ "step": 748
2685
+ },
2686
+ {
2687
+ "epoch": 0.31645569620253167,
2688
+ "grad_norm": 0.5979796648025513,
2689
+ "learning_rate": 0.00013163444639718806,
2690
+ "loss": 0.9626097679138184,
2691
+ "step": 750
2692
+ },
2693
+ {
2694
+ "epoch": 0.3172995780590717,
2695
+ "grad_norm": 0.5682399868965149,
2696
+ "learning_rate": 0.0001319859402460457,
2697
+ "loss": 1.0261781215667725,
2698
+ "step": 752
2699
+ },
2700
+ {
2701
+ "epoch": 0.3181434599156118,
2702
+ "grad_norm": 0.5349125266075134,
2703
+ "learning_rate": 0.00013233743409490336,
2704
+ "loss": 0.9319828152656555,
2705
+ "step": 754
2706
+ },
2707
+ {
2708
+ "epoch": 0.3189873417721519,
2709
+ "grad_norm": 0.6093934178352356,
2710
+ "learning_rate": 0.000132688927943761,
2711
+ "loss": 0.9216550588607788,
2712
+ "step": 756
2713
+ },
2714
+ {
2715
+ "epoch": 0.319831223628692,
2716
+ "grad_norm": 0.5188612341880798,
2717
+ "learning_rate": 0.00013304042179261864,
2718
+ "loss": 0.901739776134491,
2719
+ "step": 758
2720
+ },
2721
+ {
2722
+ "epoch": 0.3206751054852321,
2723
+ "grad_norm": 0.5877130627632141,
2724
+ "learning_rate": 0.00013339191564147628,
2725
+ "loss": 1.0362589359283447,
2726
+ "step": 760
2727
+ },
2728
+ {
2729
+ "epoch": 0.32151898734177214,
2730
+ "grad_norm": 0.5542771816253662,
2731
+ "learning_rate": 0.00013374340949033392,
2732
+ "loss": 0.8787116408348083,
2733
+ "step": 762
2734
+ },
2735
+ {
2736
+ "epoch": 0.32236286919831225,
2737
+ "grad_norm": 0.5084902048110962,
2738
+ "learning_rate": 0.00013409490333919156,
2739
+ "loss": 0.9237037301063538,
2740
+ "step": 764
2741
+ },
2742
+ {
2743
+ "epoch": 0.3232067510548523,
2744
+ "grad_norm": 0.5461528301239014,
2745
+ "learning_rate": 0.00013444639718804922,
2746
+ "loss": 1.0150731801986694,
2747
+ "step": 766
2748
+ },
2749
+ {
2750
+ "epoch": 0.3240506329113924,
2751
+ "grad_norm": 0.53483647108078,
2752
+ "learning_rate": 0.00013479789103690686,
2753
+ "loss": 0.8985214829444885,
2754
+ "step": 768
2755
+ },
2756
+ {
2757
+ "epoch": 0.32489451476793246,
2758
+ "grad_norm": 0.5580531358718872,
2759
+ "learning_rate": 0.0001351493848857645,
2760
+ "loss": 1.0225775241851807,
2761
+ "step": 770
2762
+ },
2763
+ {
2764
+ "epoch": 0.32573839662447257,
2765
+ "grad_norm": 0.5203377604484558,
2766
+ "learning_rate": 0.00013550087873462216,
2767
+ "loss": 0.9571293592453003,
2768
+ "step": 772
2769
+ },
2770
+ {
2771
+ "epoch": 0.3265822784810127,
2772
+ "grad_norm": 0.5049671530723572,
2773
+ "learning_rate": 0.0001358523725834798,
2774
+ "loss": 1.0468909740447998,
2775
+ "step": 774
2776
+ },
2777
+ {
2778
+ "epoch": 0.32742616033755273,
2779
+ "grad_norm": 0.4723063111305237,
2780
+ "learning_rate": 0.00013620386643233744,
2781
+ "loss": 0.7743215560913086,
2782
+ "step": 776
2783
+ },
2784
+ {
2785
+ "epoch": 0.32827004219409284,
2786
+ "grad_norm": 0.6310980916023254,
2787
+ "learning_rate": 0.00013655536028119508,
2788
+ "loss": 1.021510362625122,
2789
+ "step": 778
2790
+ },
2791
+ {
2792
+ "epoch": 0.3291139240506329,
2793
+ "grad_norm": 0.47066664695739746,
2794
+ "learning_rate": 0.00013690685413005274,
2795
+ "loss": 0.9134382605552673,
2796
+ "step": 780
2797
+ },
2798
+ {
2799
+ "epoch": 0.329957805907173,
2800
+ "grad_norm": 0.5725092887878418,
2801
+ "learning_rate": 0.00013725834797891038,
2802
+ "loss": 0.9797834753990173,
2803
+ "step": 782
2804
+ },
2805
+ {
2806
+ "epoch": 0.3308016877637131,
2807
+ "grad_norm": 0.5139563083648682,
2808
+ "learning_rate": 0.00013760984182776802,
2809
+ "loss": 0.9372621178627014,
2810
+ "step": 784
2811
+ },
2812
+ {
2813
+ "epoch": 0.33164556962025316,
2814
+ "grad_norm": 0.5275821685791016,
2815
+ "learning_rate": 0.00013796133567662566,
2816
+ "loss": 0.9528245329856873,
2817
+ "step": 786
2818
+ },
2819
+ {
2820
+ "epoch": 0.33248945147679326,
2821
+ "grad_norm": 0.5702582001686096,
2822
+ "learning_rate": 0.0001383128295254833,
2823
+ "loss": 0.9750176072120667,
2824
+ "step": 788
2825
+ },
2826
+ {
2827
+ "epoch": 0.3333333333333333,
2828
+ "grad_norm": 0.5281293392181396,
2829
+ "learning_rate": 0.00013866432337434096,
2830
+ "loss": 0.9412306547164917,
2831
+ "step": 790
2832
+ },
2833
+ {
2834
+ "epoch": 0.3341772151898734,
2835
+ "grad_norm": 0.5578986406326294,
2836
+ "learning_rate": 0.0001390158172231986,
2837
+ "loss": 0.997580885887146,
2838
+ "step": 792
2839
+ },
2840
+ {
2841
+ "epoch": 0.33502109704641353,
2842
+ "grad_norm": 0.50461345911026,
2843
+ "learning_rate": 0.00013936731107205624,
2844
+ "loss": 0.9082320928573608,
2845
+ "step": 794
2846
+ },
2847
+ {
2848
+ "epoch": 0.3358649789029536,
2849
+ "grad_norm": 0.5258530378341675,
2850
+ "learning_rate": 0.0001397188049209139,
2851
+ "loss": 1.0082844495773315,
2852
+ "step": 796
2853
+ },
2854
+ {
2855
+ "epoch": 0.3367088607594937,
2856
+ "grad_norm": 0.5548169016838074,
2857
+ "learning_rate": 0.00014007029876977154,
2858
+ "loss": 0.9729003310203552,
2859
+ "step": 798
2860
+ },
2861
+ {
2862
+ "epoch": 0.33755274261603374,
2863
+ "grad_norm": 0.48601076006889343,
2864
+ "learning_rate": 0.00014042179261862918,
2865
+ "loss": 0.9099526405334473,
2866
+ "step": 800
2867
+ },
2868
+ {
2869
+ "epoch": 0.33755274261603374,
2870
+ "eval_loss": 0.9800403714179993,
2871
+ "eval_runtime": 678.8306,
2872
+ "eval_samples_per_second": 3.104,
2873
+ "eval_steps_per_second": 3.104,
2874
+ "step": 800
2875
+ },
2876
+ {
2877
+ "epoch": 0.33839662447257385,
2878
+ "grad_norm": 0.5413158535957336,
2879
+ "learning_rate": 0.00014077328646748682,
2880
+ "loss": 0.8610644936561584,
2881
+ "step": 802
2882
+ },
2883
+ {
2884
+ "epoch": 0.3392405063291139,
2885
+ "grad_norm": 0.5147035717964172,
2886
+ "learning_rate": 0.00014112478031634446,
2887
+ "loss": 0.9584825038909912,
2888
+ "step": 804
2889
+ },
2890
+ {
2891
+ "epoch": 0.340084388185654,
2892
+ "grad_norm": 0.5931771397590637,
2893
+ "learning_rate": 0.0001414762741652021,
2894
+ "loss": 1.0142558813095093,
2895
+ "step": 806
2896
+ },
2897
+ {
2898
+ "epoch": 0.3409282700421941,
2899
+ "grad_norm": 0.5178377032279968,
2900
+ "learning_rate": 0.00014182776801405976,
2901
+ "loss": 1.0078763961791992,
2902
+ "step": 808
2903
+ },
2904
+ {
2905
+ "epoch": 0.34177215189873417,
2906
+ "grad_norm": 0.5453237295150757,
2907
+ "learning_rate": 0.0001421792618629174,
2908
+ "loss": 0.9107215404510498,
2909
+ "step": 810
2910
+ },
2911
+ {
2912
+ "epoch": 0.3426160337552743,
2913
+ "grad_norm": 0.5886152982711792,
2914
+ "learning_rate": 0.00014253075571177506,
2915
+ "loss": 0.9981362819671631,
2916
+ "step": 812
2917
+ },
2918
+ {
2919
+ "epoch": 0.3434599156118143,
2920
+ "grad_norm": 0.48040178418159485,
2921
+ "learning_rate": 0.0001428822495606327,
2922
+ "loss": 0.9636131525039673,
2923
+ "step": 814
2924
+ },
2925
+ {
2926
+ "epoch": 0.34430379746835443,
2927
+ "grad_norm": 0.5011753439903259,
2928
+ "learning_rate": 0.00014323374340949034,
2929
+ "loss": 0.9590586423873901,
2930
+ "step": 816
2931
+ },
2932
+ {
2933
+ "epoch": 0.34514767932489454,
2934
+ "grad_norm": 0.57858806848526,
2935
+ "learning_rate": 0.00014358523725834798,
2936
+ "loss": 0.978246808052063,
2937
+ "step": 818
2938
+ },
2939
+ {
2940
+ "epoch": 0.3459915611814346,
2941
+ "grad_norm": 0.46092939376831055,
2942
+ "learning_rate": 0.00014393673110720562,
2943
+ "loss": 0.8549934029579163,
2944
+ "step": 820
2945
+ },
2946
+ {
2947
+ "epoch": 0.3468354430379747,
2948
+ "grad_norm": 0.5756489038467407,
2949
+ "learning_rate": 0.00014428822495606326,
2950
+ "loss": 0.9771265387535095,
2951
+ "step": 822
2952
+ },
2953
+ {
2954
+ "epoch": 0.34767932489451475,
2955
+ "grad_norm": 0.5501731634140015,
2956
+ "learning_rate": 0.00014463971880492092,
2957
+ "loss": 0.8739748001098633,
2958
+ "step": 824
2959
+ },
2960
+ {
2961
+ "epoch": 0.34852320675105486,
2962
+ "grad_norm": 0.5451868176460266,
2963
+ "learning_rate": 0.0001449912126537786,
2964
+ "loss": 0.9129468202590942,
2965
+ "step": 826
2966
+ },
2967
+ {
2968
+ "epoch": 0.3493670886075949,
2969
+ "grad_norm": 0.4624619781970978,
2970
+ "learning_rate": 0.00014534270650263623,
2971
+ "loss": 0.9196704030036926,
2972
+ "step": 828
2973
+ },
2974
+ {
2975
+ "epoch": 0.350210970464135,
2976
+ "grad_norm": 0.520878791809082,
2977
+ "learning_rate": 0.00014569420035149386,
2978
+ "loss": 0.9976527690887451,
2979
+ "step": 830
2980
+ },
2981
+ {
2982
+ "epoch": 0.3510548523206751,
2983
+ "grad_norm": 0.4469171464443207,
2984
+ "learning_rate": 0.0001460456942003515,
2985
+ "loss": 0.7753443717956543,
2986
+ "step": 832
2987
+ },
2988
+ {
2989
+ "epoch": 0.3518987341772152,
2990
+ "grad_norm": 0.5105249881744385,
2991
+ "learning_rate": 0.00014639718804920914,
2992
+ "loss": 0.9584846496582031,
2993
+ "step": 834
2994
+ },
2995
+ {
2996
+ "epoch": 0.3527426160337553,
2997
+ "grad_norm": 0.5043913125991821,
2998
+ "learning_rate": 0.00014674868189806678,
2999
+ "loss": 0.9013500213623047,
3000
+ "step": 836
3001
+ },
3002
+ {
3003
+ "epoch": 0.35358649789029534,
3004
+ "grad_norm": 0.575850784778595,
3005
+ "learning_rate": 0.00014710017574692442,
3006
+ "loss": 0.9775562286376953,
3007
+ "step": 838
3008
+ },
3009
+ {
3010
+ "epoch": 0.35443037974683544,
3011
+ "grad_norm": 0.5128876566886902,
3012
+ "learning_rate": 0.00014745166959578208,
3013
+ "loss": 0.9278940558433533,
3014
+ "step": 840
3015
+ },
3016
+ {
3017
+ "epoch": 0.35527426160337555,
3018
+ "grad_norm": 0.5757885575294495,
3019
+ "learning_rate": 0.00014780316344463975,
3020
+ "loss": 1.0091488361358643,
3021
+ "step": 842
3022
+ },
3023
+ {
3024
+ "epoch": 0.3561181434599156,
3025
+ "grad_norm": 0.500934898853302,
3026
+ "learning_rate": 0.00014815465729349739,
3027
+ "loss": 0.9286836981773376,
3028
+ "step": 844
3029
+ },
3030
+ {
3031
+ "epoch": 0.3569620253164557,
3032
+ "grad_norm": 0.5220686197280884,
3033
+ "learning_rate": 0.00014850615114235502,
3034
+ "loss": 0.9484171867370605,
3035
+ "step": 846
3036
+ },
3037
+ {
3038
+ "epoch": 0.35780590717299576,
3039
+ "grad_norm": 0.5494697690010071,
3040
+ "learning_rate": 0.00014885764499121266,
3041
+ "loss": 1.0556397438049316,
3042
+ "step": 848
3043
+ },
3044
+ {
3045
+ "epoch": 0.35864978902953587,
3046
+ "grad_norm": 0.46633943915367126,
3047
+ "learning_rate": 0.0001492091388400703,
3048
+ "loss": 0.933089017868042,
3049
+ "step": 850
3050
+ },
3051
+ {
3052
+ "epoch": 0.3594936708860759,
3053
+ "grad_norm": 0.4864962697029114,
3054
+ "learning_rate": 0.00014956063268892794,
3055
+ "loss": 1.0016963481903076,
3056
+ "step": 852
3057
+ },
3058
+ {
3059
+ "epoch": 0.36033755274261603,
3060
+ "grad_norm": 0.5032764673233032,
3061
+ "learning_rate": 0.0001499121265377856,
3062
+ "loss": 0.9085348844528198,
3063
+ "step": 854
3064
+ },
3065
+ {
3066
+ "epoch": 0.36118143459915614,
3067
+ "grad_norm": 0.5424998998641968,
3068
+ "learning_rate": 0.00015026362038664324,
3069
+ "loss": 0.923502504825592,
3070
+ "step": 856
3071
+ },
3072
+ {
3073
+ "epoch": 0.3620253164556962,
3074
+ "grad_norm": 0.5181655287742615,
3075
+ "learning_rate": 0.00015061511423550088,
3076
+ "loss": 0.919174313545227,
3077
+ "step": 858
3078
+ },
3079
+ {
3080
+ "epoch": 0.3628691983122363,
3081
+ "grad_norm": 0.5129443407058716,
3082
+ "learning_rate": 0.00015096660808435855,
3083
+ "loss": 0.851981520652771,
3084
+ "step": 860
3085
+ },
3086
+ {
3087
+ "epoch": 0.36371308016877635,
3088
+ "grad_norm": 0.49540698528289795,
3089
+ "learning_rate": 0.00015131810193321618,
3090
+ "loss": 0.8633858561515808,
3091
+ "step": 862
3092
+ },
3093
+ {
3094
+ "epoch": 0.36455696202531646,
3095
+ "grad_norm": 0.4706701934337616,
3096
+ "learning_rate": 0.00015166959578207382,
3097
+ "loss": 0.9473677277565002,
3098
+ "step": 864
3099
+ },
3100
+ {
3101
+ "epoch": 0.36540084388185656,
3102
+ "grad_norm": 0.587704598903656,
3103
+ "learning_rate": 0.00015202108963093146,
3104
+ "loss": 0.942383885383606,
3105
+ "step": 866
3106
+ },
3107
+ {
3108
+ "epoch": 0.3662447257383966,
3109
+ "grad_norm": 0.5851273536682129,
3110
+ "learning_rate": 0.0001523725834797891,
3111
+ "loss": 0.9811251163482666,
3112
+ "step": 868
3113
+ },
3114
+ {
3115
+ "epoch": 0.3670886075949367,
3116
+ "grad_norm": 0.46357613801956177,
3117
+ "learning_rate": 0.00015272407732864676,
3118
+ "loss": 0.8151084184646606,
3119
+ "step": 870
3120
+ },
3121
+ {
3122
+ "epoch": 0.3679324894514768,
3123
+ "grad_norm": 0.47250670194625854,
3124
+ "learning_rate": 0.0001530755711775044,
3125
+ "loss": 0.9023333191871643,
3126
+ "step": 872
3127
+ },
3128
+ {
3129
+ "epoch": 0.3687763713080169,
3130
+ "grad_norm": 0.47510042786598206,
3131
+ "learning_rate": 0.00015342706502636204,
3132
+ "loss": 0.8758499622344971,
3133
+ "step": 874
3134
+ },
3135
+ {
3136
+ "epoch": 0.369620253164557,
3137
+ "grad_norm": 0.5687124729156494,
3138
+ "learning_rate": 0.00015377855887521968,
3139
+ "loss": 0.9837421774864197,
3140
+ "step": 876
3141
+ },
3142
+ {
3143
+ "epoch": 0.37046413502109704,
3144
+ "grad_norm": 0.49064236879348755,
3145
+ "learning_rate": 0.00015413005272407735,
3146
+ "loss": 0.9084216356277466,
3147
+ "step": 878
3148
+ },
3149
+ {
3150
+ "epoch": 0.37130801687763715,
3151
+ "grad_norm": 0.5288164615631104,
3152
+ "learning_rate": 0.00015448154657293498,
3153
+ "loss": 0.8425542712211609,
3154
+ "step": 880
3155
+ },
3156
+ {
3157
+ "epoch": 0.3721518987341772,
3158
+ "grad_norm": 0.5446951985359192,
3159
+ "learning_rate": 0.00015483304042179262,
3160
+ "loss": 1.006197452545166,
3161
+ "step": 882
3162
+ },
3163
+ {
3164
+ "epoch": 0.3729957805907173,
3165
+ "grad_norm": 0.46872642636299133,
3166
+ "learning_rate": 0.00015518453427065026,
3167
+ "loss": 0.9779444932937622,
3168
+ "step": 884
3169
+ },
3170
+ {
3171
+ "epoch": 0.37383966244725736,
3172
+ "grad_norm": 0.4852714240550995,
3173
+ "learning_rate": 0.00015553602811950793,
3174
+ "loss": 0.9099963307380676,
3175
+ "step": 886
3176
+ },
3177
+ {
3178
+ "epoch": 0.37468354430379747,
3179
+ "grad_norm": 0.5219841003417969,
3180
+ "learning_rate": 0.00015588752196836556,
3181
+ "loss": 0.9730570316314697,
3182
+ "step": 888
3183
+ },
3184
+ {
3185
+ "epoch": 0.3755274261603376,
3186
+ "grad_norm": 0.5258626341819763,
3187
+ "learning_rate": 0.0001562390158172232,
3188
+ "loss": 0.9304586052894592,
3189
+ "step": 890
3190
+ },
3191
+ {
3192
+ "epoch": 0.3763713080168776,
3193
+ "grad_norm": 0.4266716241836548,
3194
+ "learning_rate": 0.00015659050966608084,
3195
+ "loss": 0.8680716156959534,
3196
+ "step": 892
3197
+ },
3198
+ {
3199
+ "epoch": 0.37721518987341773,
3200
+ "grad_norm": 0.46361327171325684,
3201
+ "learning_rate": 0.0001569420035149385,
3202
+ "loss": 0.8428018093109131,
3203
+ "step": 894
3204
+ },
3205
+ {
3206
+ "epoch": 0.3780590717299578,
3207
+ "grad_norm": 0.5313687920570374,
3208
+ "learning_rate": 0.00015729349736379614,
3209
+ "loss": 0.8465750217437744,
3210
+ "step": 896
3211
+ },
3212
+ {
3213
+ "epoch": 0.3789029535864979,
3214
+ "grad_norm": 0.47166210412979126,
3215
+ "learning_rate": 0.00015764499121265378,
3216
+ "loss": 0.9120327234268188,
3217
+ "step": 898
3218
+ },
3219
+ {
3220
+ "epoch": 0.379746835443038,
3221
+ "grad_norm": 0.4647318720817566,
3222
+ "learning_rate": 0.00015799648506151145,
3223
+ "loss": 0.8950425982475281,
3224
+ "step": 900
3225
+ },
3226
+ {
3227
+ "epoch": 0.379746835443038,
3228
+ "eval_loss": 0.9643027186393738,
3229
+ "eval_runtime": 691.7929,
3230
+ "eval_samples_per_second": 3.046,
3231
+ "eval_steps_per_second": 3.046,
3232
+ "step": 900
3233
+ },
3234
+ {
3235
+ "epoch": 0.38059071729957805,
3236
+ "grad_norm": 0.5445119738578796,
3237
+ "learning_rate": 0.00015834797891036909,
3238
+ "loss": 0.908163845539093,
3239
+ "step": 902
3240
+ },
3241
+ {
3242
+ "epoch": 0.38143459915611816,
3243
+ "grad_norm": 0.4311858117580414,
3244
+ "learning_rate": 0.00015869947275922672,
3245
+ "loss": 0.8945821523666382,
3246
+ "step": 904
3247
+ },
3248
+ {
3249
+ "epoch": 0.3822784810126582,
3250
+ "grad_norm": 0.5590984225273132,
3251
+ "learning_rate": 0.00015905096660808436,
3252
+ "loss": 0.9478458762168884,
3253
+ "step": 906
3254
+ },
3255
+ {
3256
+ "epoch": 0.3831223628691983,
3257
+ "grad_norm": 0.5470241904258728,
3258
+ "learning_rate": 0.000159402460456942,
3259
+ "loss": 0.9259957671165466,
3260
+ "step": 908
3261
+ },
3262
+ {
3263
+ "epoch": 0.38396624472573837,
3264
+ "grad_norm": 0.5498791337013245,
3265
+ "learning_rate": 0.00015975395430579964,
3266
+ "loss": 0.8824930787086487,
3267
+ "step": 910
3268
+ },
3269
+ {
3270
+ "epoch": 0.3848101265822785,
3271
+ "grad_norm": 0.4779198467731476,
3272
+ "learning_rate": 0.0001601054481546573,
3273
+ "loss": 0.8842340707778931,
3274
+ "step": 912
3275
+ },
3276
+ {
3277
+ "epoch": 0.3856540084388186,
3278
+ "grad_norm": 0.5390620827674866,
3279
+ "learning_rate": 0.00016045694200351494,
3280
+ "loss": 0.92950040102005,
3281
+ "step": 914
3282
+ },
3283
+ {
3284
+ "epoch": 0.38649789029535864,
3285
+ "grad_norm": 0.505519688129425,
3286
+ "learning_rate": 0.0001608084358523726,
3287
+ "loss": 0.8420897126197815,
3288
+ "step": 916
3289
+ },
3290
+ {
3291
+ "epoch": 0.38734177215189874,
3292
+ "grad_norm": 0.4463907778263092,
3293
+ "learning_rate": 0.00016115992970123025,
3294
+ "loss": 0.8256624341011047,
3295
+ "step": 918
3296
+ },
3297
+ {
3298
+ "epoch": 0.3881856540084388,
3299
+ "grad_norm": 0.5321422219276428,
3300
+ "learning_rate": 0.00016151142355008788,
3301
+ "loss": 0.8701168298721313,
3302
+ "step": 920
3303
+ },
3304
+ {
3305
+ "epoch": 0.3890295358649789,
3306
+ "grad_norm": 0.4343073070049286,
3307
+ "learning_rate": 0.00016186291739894552,
3308
+ "loss": 0.9019309878349304,
3309
+ "step": 922
3310
+ },
3311
+ {
3312
+ "epoch": 0.389873417721519,
3313
+ "grad_norm": 0.5311984419822693,
3314
+ "learning_rate": 0.00016221441124780316,
3315
+ "loss": 0.8560551404953003,
3316
+ "step": 924
3317
+ },
3318
+ {
3319
+ "epoch": 0.39071729957805906,
3320
+ "grad_norm": 0.588691771030426,
3321
+ "learning_rate": 0.0001625659050966608,
3322
+ "loss": 0.9700050354003906,
3323
+ "step": 926
3324
+ },
3325
+ {
3326
+ "epoch": 0.39156118143459917,
3327
+ "grad_norm": 0.5425586104393005,
3328
+ "learning_rate": 0.00016291739894551844,
3329
+ "loss": 1.043768048286438,
3330
+ "step": 928
3331
+ },
3332
+ {
3333
+ "epoch": 0.3924050632911392,
3334
+ "grad_norm": 0.5228736996650696,
3335
+ "learning_rate": 0.0001632688927943761,
3336
+ "loss": 0.9501712918281555,
3337
+ "step": 930
3338
+ },
3339
+ {
3340
+ "epoch": 0.39324894514767933,
3341
+ "grad_norm": 0.48960360884666443,
3342
+ "learning_rate": 0.00016362038664323377,
3343
+ "loss": 0.9223058223724365,
3344
+ "step": 932
3345
+ },
3346
+ {
3347
+ "epoch": 0.39409282700421944,
3348
+ "grad_norm": 0.45204755663871765,
3349
+ "learning_rate": 0.0001639718804920914,
3350
+ "loss": 0.9692960977554321,
3351
+ "step": 934
3352
+ },
3353
+ {
3354
+ "epoch": 0.3949367088607595,
3355
+ "grad_norm": 0.5299274921417236,
3356
+ "learning_rate": 0.00016432337434094905,
3357
+ "loss": 0.9467466473579407,
3358
+ "step": 936
3359
+ },
3360
+ {
3361
+ "epoch": 0.3957805907172996,
3362
+ "grad_norm": 0.5607715249061584,
3363
+ "learning_rate": 0.00016467486818980668,
3364
+ "loss": 0.9118053317070007,
3365
+ "step": 938
3366
+ },
3367
+ {
3368
+ "epoch": 0.39662447257383965,
3369
+ "grad_norm": 0.5271831154823303,
3370
+ "learning_rate": 0.00016502636203866432,
3371
+ "loss": 0.9131460189819336,
3372
+ "step": 940
3373
+ },
3374
+ {
3375
+ "epoch": 0.39746835443037976,
3376
+ "grad_norm": 0.5075286030769348,
3377
+ "learning_rate": 0.00016537785588752196,
3378
+ "loss": 0.9358300566673279,
3379
+ "step": 942
3380
+ },
3381
+ {
3382
+ "epoch": 0.3983122362869198,
3383
+ "grad_norm": 0.515731155872345,
3384
+ "learning_rate": 0.00016572934973637963,
3385
+ "loss": 0.8908210396766663,
3386
+ "step": 944
3387
+ },
3388
+ {
3389
+ "epoch": 0.3991561181434599,
3390
+ "grad_norm": 0.4856977164745331,
3391
+ "learning_rate": 0.00016608084358523726,
3392
+ "loss": 0.9775290489196777,
3393
+ "step": 946
3394
+ },
3395
+ {
3396
+ "epoch": 0.4,
3397
+ "grad_norm": 0.48846355080604553,
3398
+ "learning_rate": 0.00016643233743409493,
3399
+ "loss": 0.8957490921020508,
3400
+ "step": 948
3401
+ },
3402
+ {
3403
+ "epoch": 0.4008438818565401,
3404
+ "grad_norm": 0.42990800738334656,
3405
+ "learning_rate": 0.00016678383128295257,
3406
+ "loss": 0.9036174416542053,
3407
+ "step": 950
3408
+ },
3409
+ {
3410
+ "epoch": 0.4016877637130802,
3411
+ "grad_norm": 0.49552062153816223,
3412
+ "learning_rate": 0.0001671353251318102,
3413
+ "loss": 0.991032600402832,
3414
+ "step": 952
3415
+ },
3416
+ {
3417
+ "epoch": 0.40253164556962023,
3418
+ "grad_norm": 0.4565040171146393,
3419
+ "learning_rate": 0.00016748681898066784,
3420
+ "loss": 0.823063313961029,
3421
+ "step": 954
3422
+ },
3423
+ {
3424
+ "epoch": 0.40337552742616034,
3425
+ "grad_norm": 0.4290153682231903,
3426
+ "learning_rate": 0.00016783831282952548,
3427
+ "loss": 0.8785063624382019,
3428
+ "step": 956
3429
+ },
3430
+ {
3431
+ "epoch": 0.40421940928270045,
3432
+ "grad_norm": 0.5419702529907227,
3433
+ "learning_rate": 0.00016818980667838312,
3434
+ "loss": 0.8763971924781799,
3435
+ "step": 958
3436
+ },
3437
+ {
3438
+ "epoch": 0.4050632911392405,
3439
+ "grad_norm": 0.5177501440048218,
3440
+ "learning_rate": 0.00016854130052724079,
3441
+ "loss": 0.9470553398132324,
3442
+ "step": 960
3443
+ },
3444
+ {
3445
+ "epoch": 0.4059071729957806,
3446
+ "grad_norm": 0.539725661277771,
3447
+ "learning_rate": 0.00016889279437609842,
3448
+ "loss": 0.9235025644302368,
3449
+ "step": 962
3450
+ },
3451
+ {
3452
+ "epoch": 0.40675105485232066,
3453
+ "grad_norm": 0.5324983596801758,
3454
+ "learning_rate": 0.0001692442882249561,
3455
+ "loss": 1.0248996019363403,
3456
+ "step": 964
3457
+ },
3458
+ {
3459
+ "epoch": 0.40759493670886077,
3460
+ "grad_norm": 0.4936407506465912,
3461
+ "learning_rate": 0.00016959578207381373,
3462
+ "loss": 0.9076873660087585,
3463
+ "step": 966
3464
+ },
3465
+ {
3466
+ "epoch": 0.4084388185654008,
3467
+ "grad_norm": 0.4960501194000244,
3468
+ "learning_rate": 0.00016994727592267137,
3469
+ "loss": 0.9162673950195312,
3470
+ "step": 968
3471
+ },
3472
+ {
3473
+ "epoch": 0.4092827004219409,
3474
+ "grad_norm": 0.45093682408332825,
3475
+ "learning_rate": 0.000170298769771529,
3476
+ "loss": 0.904100775718689,
3477
+ "step": 970
3478
+ },
3479
+ {
3480
+ "epoch": 0.41012658227848103,
3481
+ "grad_norm": 0.4560275077819824,
3482
+ "learning_rate": 0.00017065026362038664,
3483
+ "loss": 0.8633337020874023,
3484
+ "step": 972
3485
+ },
3486
+ {
3487
+ "epoch": 0.4109704641350211,
3488
+ "grad_norm": 0.44885000586509705,
3489
+ "learning_rate": 0.00017100175746924428,
3490
+ "loss": 0.8454209566116333,
3491
+ "step": 974
3492
+ },
3493
+ {
3494
+ "epoch": 0.4118143459915612,
3495
+ "grad_norm": 0.47251659631729126,
3496
+ "learning_rate": 0.00017135325131810195,
3497
+ "loss": 0.824730396270752,
3498
+ "step": 976
3499
+ },
3500
+ {
3501
+ "epoch": 0.41265822784810124,
3502
+ "grad_norm": 0.6597666144371033,
3503
+ "learning_rate": 0.00017170474516695959,
3504
+ "loss": 0.9496501684188843,
3505
+ "step": 978
3506
+ },
3507
+ {
3508
+ "epoch": 0.41350210970464135,
3509
+ "grad_norm": 0.39806297421455383,
3510
+ "learning_rate": 0.00017205623901581722,
3511
+ "loss": 0.9419087171554565,
3512
+ "step": 980
3513
+ },
3514
+ {
3515
+ "epoch": 0.41434599156118146,
3516
+ "grad_norm": 0.48231109976768494,
3517
+ "learning_rate": 0.0001724077328646749,
3518
+ "loss": 0.9182976484298706,
3519
+ "step": 982
3520
+ },
3521
+ {
3522
+ "epoch": 0.4151898734177215,
3523
+ "grad_norm": 0.5438776612281799,
3524
+ "learning_rate": 0.00017275922671353253,
3525
+ "loss": 0.9386967420578003,
3526
+ "step": 984
3527
+ },
3528
+ {
3529
+ "epoch": 0.4160337552742616,
3530
+ "grad_norm": 0.4959667921066284,
3531
+ "learning_rate": 0.00017311072056239017,
3532
+ "loss": 0.897849440574646,
3533
+ "step": 986
3534
+ },
3535
+ {
3536
+ "epoch": 0.41687763713080167,
3537
+ "grad_norm": 0.43533357977867126,
3538
+ "learning_rate": 0.0001734622144112478,
3539
+ "loss": 0.8776953816413879,
3540
+ "step": 988
3541
+ },
3542
+ {
3543
+ "epoch": 0.4177215189873418,
3544
+ "grad_norm": 0.47513946890830994,
3545
+ "learning_rate": 0.00017381370826010547,
3546
+ "loss": 0.9162989854812622,
3547
+ "step": 990
3548
+ },
3549
+ {
3550
+ "epoch": 0.41856540084388183,
3551
+ "grad_norm": 0.4907188415527344,
3552
+ "learning_rate": 0.0001741652021089631,
3553
+ "loss": 0.9482660889625549,
3554
+ "step": 992
3555
+ },
3556
+ {
3557
+ "epoch": 0.41940928270042194,
3558
+ "grad_norm": 0.44499966502189636,
3559
+ "learning_rate": 0.00017451669595782075,
3560
+ "loss": 0.8812930583953857,
3561
+ "step": 994
3562
+ },
3563
+ {
3564
+ "epoch": 0.42025316455696204,
3565
+ "grad_norm": 0.4535730481147766,
3566
+ "learning_rate": 0.00017486818980667838,
3567
+ "loss": 0.9439874887466431,
3568
+ "step": 996
3569
+ },
3570
+ {
3571
+ "epoch": 0.4210970464135021,
3572
+ "grad_norm": 0.5240745544433594,
3573
+ "learning_rate": 0.00017521968365553602,
3574
+ "loss": 0.8818395137786865,
3575
+ "step": 998
3576
+ },
3577
+ {
3578
+ "epoch": 0.4219409282700422,
3579
+ "grad_norm": 0.5301211476325989,
3580
+ "learning_rate": 0.0001755711775043937,
3581
+ "loss": 0.886186957359314,
3582
+ "step": 1000
3583
+ },
3584
+ {
3585
+ "epoch": 0.4219409282700422,
3586
+ "eval_loss": 0.9487298727035522,
3587
+ "eval_runtime": 689.4288,
3588
+ "eval_samples_per_second": 3.056,
3589
+ "eval_steps_per_second": 3.056,
3590
+ "step": 1000
3591
+ }
3592
+ ],
3593
+ "logging_steps": 2,
3594
+ "max_steps": 14220,
3595
+ "num_input_tokens_seen": 0,
3596
+ "num_train_epochs": 6,
3597
+ "save_steps": 500,
3598
+ "stateful_callbacks": {
3599
+ "EarlyStoppingCallback": {
3600
+ "args": {
3601
+ "early_stopping_patience": 5,
3602
+ "early_stopping_threshold": 0.001
3603
+ },
3604
+ "attributes": {
3605
+ "early_stopping_patience_counter": 0
3606
+ }
3607
+ },
3608
+ "TrainerControl": {
3609
+ "args": {
3610
+ "should_epoch_stop": false,
3611
+ "should_evaluate": false,
3612
+ "should_log": false,
3613
+ "should_save": true,
3614
+ "should_training_stop": false
3615
+ },
3616
+ "attributes": {}
3617
+ }
3618
+ },
3619
+ "total_flos": 5.979725639330796e+17,
3620
+ "train_batch_size": 1,
3621
+ "trial_name": null,
3622
+ "trial_params": null
3623
+ }
sft_qwen_14B_v2/checkpoints/checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afae4a8ce1391f149eb00b14eed8f891c715e892ea546bf754d22db2c2bc5969
3
+ size 4792
sft_qwen_14B_v2/checkpoints/checkpoint-1500/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Models/Qwen2.5-Coder-14B-CPT
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Models/Qwen2.5-Coder-14B-CPT
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
sft_qwen_14B_v2/checkpoints/checkpoint-1500/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Models/Qwen2.5-Coder-14B-CPT",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "v_proj",
34
+ "k_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
sft_qwen_14B_v2/checkpoints/checkpoint-1500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72b8f86cccb14317675ea126399dc4e8a7f18c71b7076f438c3d7e9c62fda470
3
+ size 201378736
sft_qwen_14B_v2/checkpoints/checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90432c235116acced0b5039484a54c5317932455079b5db44009c97433b3a03b
3
+ size 402976786
sft_qwen_14B_v2/checkpoints/checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a22e5aeea526845f66de80330bef50d5eab9ae3204b490a24a0489bbbf8f66f2
3
+ size 14244
sft_qwen_14B_v2/checkpoints/checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c6d90f58358a00be3824aa6b916df4e44be43484ede05aad7a85da63345c506
3
+ size 1064
sft_qwen_14B_v2/checkpoints/checkpoint-1500/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
sft_qwen_14B_v2/checkpoints/checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afae4a8ce1391f149eb00b14eed8f891c715e892ea546bf754d22db2c2bc5969
3
+ size 4792
sft_qwen_14B_v2/checkpoints/checkpoint-2000/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Models/Qwen2.5-Coder-14B-CPT
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Models/Qwen2.5-Coder-14B-CPT
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
sft_qwen_14B_v2/checkpoints/checkpoint-2000/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Models/Qwen2.5-Coder-14B-CPT",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "v_proj",
34
+ "k_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
sft_qwen_14B_v2/checkpoints/checkpoint-2000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f36ab65082bb1805ada7a0d63595c09b088ed7d6feed08092d0234cd38ae0c0
3
+ size 201378736
sft_qwen_14B_v2/checkpoints/checkpoint-2000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d041d1505290b0d22cce08cd5ddc5af28c3ba276e2afbc492822b36858ef6b6e
3
+ size 402976786
sft_qwen_14B_v2/checkpoints/checkpoint-2000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86dda28e04ef063abc8ec80a126ae11e59bcc0222e7ffedfe8ffc3e09e5ff1d1
3
+ size 14244
sft_qwen_14B_v2/checkpoints/checkpoint-2000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47ee9234e7cfe3ba106b70e03151eb5b4653fe797445e980958ee4b2951365df
3
+ size 1064
sft_qwen_14B_v2/checkpoints/checkpoint-2000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
sft_qwen_14B_v2/checkpoints/checkpoint-2000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afae4a8ce1391f149eb00b14eed8f891c715e892ea546bf754d22db2c2bc5969
3
+ size 4792
sft_qwen_14B_v2/checkpoints/checkpoint-2500/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Models/Qwen2.5-Coder-14B-CPT
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Models/Qwen2.5-Coder-14B-CPT
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
sft_qwen_14B_v2/checkpoints/checkpoint-2500/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Models/Qwen2.5-Coder-14B-CPT",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "v_proj",
34
+ "k_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
sft_qwen_14B_v2/checkpoints/checkpoint-2500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a636e1d293ef747a9d4ba56f26013a4c53586a2f6ea38007f2f9ae4160279b10
3
+ size 201378736
sft_qwen_14B_v2/checkpoints/checkpoint-2500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c28f62d25813fcda2055109e78038dad11c58245a033d42ff1be2f0c8ace5d2
3
+ size 402976786
sft_qwen_14B_v2/checkpoints/checkpoint-2500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c52cacce7e167165387adaceb3af68d71fcfa2d3727df90524af91c93858d7af
3
+ size 14244
sft_qwen_14B_v2/checkpoints/checkpoint-2500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efc1d8a0a2b86aa25004510e00c98c486065b74b7c5a64dc37307cefa17ed157
3
+ size 1064
sft_qwen_14B_v2/checkpoints/checkpoint-2500/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
sft_qwen_14B_v2/checkpoints/checkpoint-2500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afae4a8ce1391f149eb00b14eed8f891c715e892ea546bf754d22db2c2bc5969
3
+ size 4792
sft_qwen_14B_v2/checkpoints/checkpoint-3000/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Models/Qwen2.5-Coder-14B-CPT
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Models/Qwen2.5-Coder-14B-CPT
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
sft_qwen_14B_v2/checkpoints/checkpoint-3000/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Models/Qwen2.5-Coder-14B-CPT",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "v_proj",
34
+ "k_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
sft_qwen_14B_v2/checkpoints/checkpoint-3000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84541ff7632f88bb0f7123c3f0560425ec3bb43bb106c7a7e32e03063e130ed0
3
+ size 201378736
sft_qwen_14B_v2/checkpoints/checkpoint-3000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b13b0455977b17d0e21b9257bce6ddb3bc7913419710555f0c98d7f56663e413
3
+ size 402976786
sft_qwen_14B_v2/checkpoints/checkpoint-3000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0c19d6218afb9104df41c5b27e6edf90eb6e07a47e8b43e4f82ac1a8a44b8eb
3
+ size 14244
sft_qwen_14B_v2/checkpoints/checkpoint-3000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:608dbd365b0ca511f27dbb9deb12e1f149332571e34f6a5c04cfaf79a3d71f1f
3
+ size 1064
sft_qwen_14B_v2/checkpoints/checkpoint-3000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
sft_qwen_14B_v2/checkpoints/checkpoint-3000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afae4a8ce1391f149eb00b14eed8f891c715e892ea546bf754d22db2c2bc5969
3
+ size 4792
sft_qwen_14B_v2/checkpoints/checkpoint-3500/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Models/Qwen2.5-Coder-14B-CPT
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Models/Qwen2.5-Coder-14B-CPT
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
sft_qwen_14B_v2/checkpoints/checkpoint-3500/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Models/Qwen2.5-Coder-14B-CPT",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "v_proj",
34
+ "k_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
sft_qwen_14B_v2/checkpoints/checkpoint-3500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3197e2bedd953dc5715517e86b05e3920fa3d94bd95cca48ae271d35832d38a4
3
+ size 201378736
sft_qwen_14B_v2/checkpoints/checkpoint-3500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7d76ed9c500b950c64503cd5076327e5834eed723bf3f4de15ff79d8e544b02
3
+ size 402976786
sft_qwen_14B_v2/checkpoints/checkpoint-3500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93eebadcd407d6d286a888a8799c348021b397445be0c00df5150b9c54d0e62b
3
+ size 14244