SirajRLX commited on
Commit
61ac574
·
verified ·
1 Parent(s): 97d09eb

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +9 -0
  2. cpt_qwen_14B/best_adapter/README.md +207 -0
  3. cpt_qwen_14B/best_adapter/adapter_config.json +43 -0
  4. cpt_qwen_14B/best_adapter/adapter_model.safetensors +3 -0
  5. cpt_qwen_14B/best_adapter/chat_template.jinja +54 -0
  6. cpt_qwen_14B/best_adapter/tokenizer.json +3 -0
  7. cpt_qwen_14B/best_adapter/tokenizer_config.json +29 -0
  8. cpt_qwen_14B/best_adapter/training_args.bin +3 -0
  9. cpt_qwen_14B/checkpoints/checkpoint-100/README.md +207 -0
  10. cpt_qwen_14B/checkpoints/checkpoint-100/adapter_config.json +43 -0
  11. cpt_qwen_14B/checkpoints/checkpoint-100/adapter_model.safetensors +3 -0
  12. cpt_qwen_14B/checkpoints/checkpoint-100/chat_template.jinja +54 -0
  13. cpt_qwen_14B/checkpoints/checkpoint-100/optimizer.pt +3 -0
  14. cpt_qwen_14B/checkpoints/checkpoint-100/rng_state.pth +3 -0
  15. cpt_qwen_14B/checkpoints/checkpoint-100/scheduler.pt +3 -0
  16. cpt_qwen_14B/checkpoints/checkpoint-100/tokenizer.json +3 -0
  17. cpt_qwen_14B/checkpoints/checkpoint-100/tokenizer_config.json +29 -0
  18. cpt_qwen_14B/checkpoints/checkpoint-100/trainer_state.json +750 -0
  19. cpt_qwen_14B/checkpoints/checkpoint-100/training_args.bin +3 -0
  20. cpt_qwen_14B/checkpoints/checkpoint-200/README.md +207 -0
  21. cpt_qwen_14B/checkpoints/checkpoint-200/adapter_config.json +43 -0
  22. cpt_qwen_14B/checkpoints/checkpoint-200/adapter_model.safetensors +3 -0
  23. cpt_qwen_14B/checkpoints/checkpoint-200/chat_template.jinja +54 -0
  24. cpt_qwen_14B/checkpoints/checkpoint-200/optimizer.pt +3 -0
  25. cpt_qwen_14B/checkpoints/checkpoint-200/rng_state.pth +3 -0
  26. cpt_qwen_14B/checkpoints/checkpoint-200/scheduler.pt +3 -0
  27. cpt_qwen_14B/checkpoints/checkpoint-200/tokenizer.json +3 -0
  28. cpt_qwen_14B/checkpoints/checkpoint-200/tokenizer_config.json +29 -0
  29. cpt_qwen_14B/checkpoints/checkpoint-200/trainer_state.json +1466 -0
  30. cpt_qwen_14B/checkpoints/checkpoint-200/training_args.bin +3 -0
  31. cpt_qwen_14B/checkpoints/checkpoint-300/README.md +207 -0
  32. cpt_qwen_14B/checkpoints/checkpoint-300/adapter_config.json +43 -0
  33. cpt_qwen_14B/checkpoints/checkpoint-300/adapter_model.safetensors +3 -0
  34. cpt_qwen_14B/checkpoints/checkpoint-300/chat_template.jinja +54 -0
  35. cpt_qwen_14B/checkpoints/checkpoint-300/optimizer.pt +3 -0
  36. cpt_qwen_14B/checkpoints/checkpoint-300/rng_state.pth +3 -0
  37. cpt_qwen_14B/checkpoints/checkpoint-300/scheduler.pt +3 -0
  38. cpt_qwen_14B/checkpoints/checkpoint-300/tokenizer.json +3 -0
  39. cpt_qwen_14B/checkpoints/checkpoint-300/tokenizer_config.json +29 -0
  40. cpt_qwen_14B/checkpoints/checkpoint-300/trainer_state.json +2182 -0
  41. cpt_qwen_14B/checkpoints/checkpoint-300/training_args.bin +3 -0
  42. cpt_qwen_14B/checkpoints/checkpoint-400/README.md +207 -0
  43. cpt_qwen_14B/checkpoints/checkpoint-400/adapter_config.json +43 -0
  44. cpt_qwen_14B/checkpoints/checkpoint-400/adapter_model.safetensors +3 -0
  45. cpt_qwen_14B/checkpoints/checkpoint-400/chat_template.jinja +54 -0
  46. cpt_qwen_14B/checkpoints/checkpoint-400/optimizer.pt +3 -0
  47. cpt_qwen_14B/checkpoints/checkpoint-400/rng_state.pth +3 -0
  48. cpt_qwen_14B/checkpoints/checkpoint-400/scheduler.pt +3 -0
  49. cpt_qwen_14B/checkpoints/checkpoint-400/tokenizer.json +3 -0
  50. cpt_qwen_14B/checkpoints/checkpoint-400/tokenizer_config.json +29 -0
.gitattributes CHANGED
@@ -33,3 +33,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ cpt_qwen_14B/best_adapter/tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ cpt_qwen_14B/checkpoints/checkpoint-100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
+ cpt_qwen_14B/checkpoints/checkpoint-200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
+ cpt_qwen_14B/checkpoints/checkpoint-300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
+ cpt_qwen_14B/checkpoints/checkpoint-400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
+ cpt_qwen_14B/checkpoints/checkpoint-500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
42
+ cpt_qwen_14B/checkpoints/checkpoint-600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
43
+ cpt_qwen_14B/checkpoints/checkpoint-656/tokenizer.json filter=lfs diff=lfs merge=lfs -text
44
+ cpt_qwen_14B/wandb/offline-run-20251223_125436-g6vlcw0j/run-g6vlcw0j.wandb filter=lfs diff=lfs merge=lfs -text
cpt_qwen_14B/best_adapter/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/Models/Qwen2.5-Coder-14B
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:/workspace/Models/Qwen2.5-Coder-14B
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
cpt_qwen_14B/best_adapter/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "/workspace/Models/Qwen2.5-Coder-14B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "k_proj",
33
+ "o_proj",
34
+ "q_proj",
35
+ "v_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
cpt_qwen_14B/best_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:732e678c9e22bba352641afc71ed5fc2394671dd0d66707e288224822a906558
3
+ size 201378736
cpt_qwen_14B/best_adapter/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
cpt_qwen_14B/best_adapter/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
3
+ size 11421892
cpt_qwen_14B/best_adapter/tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|endoftext|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": true,
24
+ "model_max_length": 32768,
25
+ "pad_token": "<|endoftext|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }
cpt_qwen_14B/best_adapter/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a501e145e724e2b92f08ecb0badd762f4f3a8472eb7b53a96edc24d2ff6150ce
3
+ size 5201
cpt_qwen_14B/checkpoints/checkpoint-100/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/Models/Qwen2.5-Coder-14B
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:/workspace/Models/Qwen2.5-Coder-14B
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
cpt_qwen_14B/checkpoints/checkpoint-100/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "/workspace/Models/Qwen2.5-Coder-14B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "v_proj",
33
+ "q_proj",
34
+ "o_proj",
35
+ "k_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
cpt_qwen_14B/checkpoints/checkpoint-100/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5eb9828020a915a338a62e297a70ad08859fd2caf23d051f1106384bd1013c18
3
+ size 201378736
cpt_qwen_14B/checkpoints/checkpoint-100/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
cpt_qwen_14B/checkpoints/checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7210a9536a1373d78b688acc34126df7e1e110a1466aba22b339760271bbd078
3
+ size 102698471
cpt_qwen_14B/checkpoints/checkpoint-100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f9c723f3b3ec93075e4df80d4b9fca594bf5084eeca69dfc4bee734176b2011
3
+ size 14645
cpt_qwen_14B/checkpoints/checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da9afcff37b2d4b9e726256840193d30b8c1205dbf5b286ebb18b671682f2424
3
+ size 1465
cpt_qwen_14B/checkpoints/checkpoint-100/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
3
+ size 11421892
cpt_qwen_14B/checkpoints/checkpoint-100/tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|endoftext|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": true,
24
+ "model_max_length": 32768,
25
+ "pad_token": "<|endoftext|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }
cpt_qwen_14B/checkpoints/checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,750 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 100,
3
+ "best_metric": 0.884428083896637,
4
+ "best_model_checkpoint": "runs/cpt_run_14b/checkpoints/checkpoint-100",
5
+ "epoch": 0.30569354222392053,
6
+ "eval_steps": 50,
7
+ "global_step": 100,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.003056935422239205,
14
+ "grad_norm": 0.06516239047050476,
15
+ "learning_rate": 0.0,
16
+ "loss": 1.138384461402893,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.00611387084447841,
21
+ "grad_norm": 0.05343673378229141,
22
+ "learning_rate": 3.0303030303030305e-07,
23
+ "loss": 0.983342707157135,
24
+ "step": 2
25
+ },
26
+ {
27
+ "epoch": 0.009170806266717615,
28
+ "grad_norm": 0.05608418956398964,
29
+ "learning_rate": 6.060606060606061e-07,
30
+ "loss": 1.0762118101119995,
31
+ "step": 3
32
+ },
33
+ {
34
+ "epoch": 0.01222774168895682,
35
+ "grad_norm": 0.06523486226797104,
36
+ "learning_rate": 9.090909090909091e-07,
37
+ "loss": 1.084489345550537,
38
+ "step": 4
39
+ },
40
+ {
41
+ "epoch": 0.015284677111196026,
42
+ "grad_norm": 0.06582186371088028,
43
+ "learning_rate": 1.2121212121212122e-06,
44
+ "loss": 1.2037022113800049,
45
+ "step": 5
46
+ },
47
+ {
48
+ "epoch": 0.01834161253343523,
49
+ "grad_norm": 0.06097998470067978,
50
+ "learning_rate": 1.5151515151515152e-06,
51
+ "loss": 1.10005784034729,
52
+ "step": 6
53
+ },
54
+ {
55
+ "epoch": 0.021398547955674436,
56
+ "grad_norm": 0.10365528613328934,
57
+ "learning_rate": 1.8181818181818183e-06,
58
+ "loss": 1.0895193815231323,
59
+ "step": 7
60
+ },
61
+ {
62
+ "epoch": 0.02445548337791364,
63
+ "grad_norm": 0.06312141567468643,
64
+ "learning_rate": 2.1212121212121216e-06,
65
+ "loss": 1.0593242645263672,
66
+ "step": 8
67
+ },
68
+ {
69
+ "epoch": 0.027512418800152847,
70
+ "grad_norm": 0.05508403480052948,
71
+ "learning_rate": 2.4242424242424244e-06,
72
+ "loss": 0.9772955179214478,
73
+ "step": 9
74
+ },
75
+ {
76
+ "epoch": 0.030569354222392053,
77
+ "grad_norm": 0.06006711348891258,
78
+ "learning_rate": 2.7272727272727272e-06,
79
+ "loss": 1.084238886833191,
80
+ "step": 10
81
+ },
82
+ {
83
+ "epoch": 0.033626289644631255,
84
+ "grad_norm": 0.0588749423623085,
85
+ "learning_rate": 3.0303030303030305e-06,
86
+ "loss": 1.0786534547805786,
87
+ "step": 11
88
+ },
89
+ {
90
+ "epoch": 0.03668322506687046,
91
+ "grad_norm": 0.046551357954740524,
92
+ "learning_rate": 3.3333333333333333e-06,
93
+ "loss": 1.0370622873306274,
94
+ "step": 12
95
+ },
96
+ {
97
+ "epoch": 0.039740160489109666,
98
+ "grad_norm": 0.061659567058086395,
99
+ "learning_rate": 3.6363636363636366e-06,
100
+ "loss": 1.0646986961364746,
101
+ "step": 13
102
+ },
103
+ {
104
+ "epoch": 0.04279709591134887,
105
+ "grad_norm": 0.06007347255945206,
106
+ "learning_rate": 3.93939393939394e-06,
107
+ "loss": 1.0311307907104492,
108
+ "step": 14
109
+ },
110
+ {
111
+ "epoch": 0.04585403133358808,
112
+ "grad_norm": 0.07314135134220123,
113
+ "learning_rate": 4.242424242424243e-06,
114
+ "loss": 1.1300500631332397,
115
+ "step": 15
116
+ },
117
+ {
118
+ "epoch": 0.04891096675582728,
119
+ "grad_norm": 0.060934022068977356,
120
+ "learning_rate": 4.5454545454545455e-06,
121
+ "loss": 1.0197452306747437,
122
+ "step": 16
123
+ },
124
+ {
125
+ "epoch": 0.05196790217806649,
126
+ "grad_norm": 0.056856051087379456,
127
+ "learning_rate": 4.848484848484849e-06,
128
+ "loss": 1.0438549518585205,
129
+ "step": 17
130
+ },
131
+ {
132
+ "epoch": 0.055024837600305694,
133
+ "grad_norm": 0.05908689647912979,
134
+ "learning_rate": 5.151515151515152e-06,
135
+ "loss": 1.0398856401443481,
136
+ "step": 18
137
+ },
138
+ {
139
+ "epoch": 0.0580817730225449,
140
+ "grad_norm": 0.07411840558052063,
141
+ "learning_rate": 5.4545454545454545e-06,
142
+ "loss": 1.107885479927063,
143
+ "step": 19
144
+ },
145
+ {
146
+ "epoch": 0.061138708444784105,
147
+ "grad_norm": 0.0749165341258049,
148
+ "learning_rate": 5.7575757575757586e-06,
149
+ "loss": 1.1060967445373535,
150
+ "step": 20
151
+ },
152
+ {
153
+ "epoch": 0.06419564386702331,
154
+ "grad_norm": 0.06720177084207535,
155
+ "learning_rate": 6.060606060606061e-06,
156
+ "loss": 1.0471720695495605,
157
+ "step": 21
158
+ },
159
+ {
160
+ "epoch": 0.06725257928926251,
161
+ "grad_norm": 0.05990725755691528,
162
+ "learning_rate": 6.363636363636364e-06,
163
+ "loss": 1.0944981575012207,
164
+ "step": 22
165
+ },
166
+ {
167
+ "epoch": 0.07030951471150172,
168
+ "grad_norm": 0.06672193855047226,
169
+ "learning_rate": 6.666666666666667e-06,
170
+ "loss": 1.1477092504501343,
171
+ "step": 23
172
+ },
173
+ {
174
+ "epoch": 0.07336645013374092,
175
+ "grad_norm": 0.06145205348730087,
176
+ "learning_rate": 6.969696969696971e-06,
177
+ "loss": 1.0591784715652466,
178
+ "step": 24
179
+ },
180
+ {
181
+ "epoch": 0.07642338555598013,
182
+ "grad_norm": 0.0757482647895813,
183
+ "learning_rate": 7.272727272727273e-06,
184
+ "loss": 1.0500165224075317,
185
+ "step": 25
186
+ },
187
+ {
188
+ "epoch": 0.07948032097821933,
189
+ "grad_norm": 0.07848478108644485,
190
+ "learning_rate": 7.5757575757575764e-06,
191
+ "loss": 1.0747522115707397,
192
+ "step": 26
193
+ },
194
+ {
195
+ "epoch": 0.08253725640045854,
196
+ "grad_norm": 0.07740631699562073,
197
+ "learning_rate": 7.87878787878788e-06,
198
+ "loss": 1.132310152053833,
199
+ "step": 27
200
+ },
201
+ {
202
+ "epoch": 0.08559419182269774,
203
+ "grad_norm": 0.07476603239774704,
204
+ "learning_rate": 8.181818181818183e-06,
205
+ "loss": 1.0339502096176147,
206
+ "step": 28
207
+ },
208
+ {
209
+ "epoch": 0.08865112724493696,
210
+ "grad_norm": 0.0779196098446846,
211
+ "learning_rate": 8.484848484848486e-06,
212
+ "loss": 1.1047282218933105,
213
+ "step": 29
214
+ },
215
+ {
216
+ "epoch": 0.09170806266717615,
217
+ "grad_norm": 0.06962384283542633,
218
+ "learning_rate": 8.787878787878788e-06,
219
+ "loss": 1.004916787147522,
220
+ "step": 30
221
+ },
222
+ {
223
+ "epoch": 0.09476499808941537,
224
+ "grad_norm": 0.06369175016880035,
225
+ "learning_rate": 9.090909090909091e-06,
226
+ "loss": 0.9296417832374573,
227
+ "step": 31
228
+ },
229
+ {
230
+ "epoch": 0.09782193351165457,
231
+ "grad_norm": 0.07470260560512543,
232
+ "learning_rate": 9.393939393939396e-06,
233
+ "loss": 1.0721708536148071,
234
+ "step": 32
235
+ },
236
+ {
237
+ "epoch": 0.10087886893389378,
238
+ "grad_norm": 0.07948213815689087,
239
+ "learning_rate": 9.696969696969698e-06,
240
+ "loss": 1.0350117683410645,
241
+ "step": 33
242
+ },
243
+ {
244
+ "epoch": 0.10393580435613298,
245
+ "grad_norm": 0.07066022604703903,
246
+ "learning_rate": 1e-05,
247
+ "loss": 1.026305913925171,
248
+ "step": 34
249
+ },
250
+ {
251
+ "epoch": 0.10699273977837218,
252
+ "grad_norm": 0.07774543762207031,
253
+ "learning_rate": 1.0303030303030304e-05,
254
+ "loss": 1.0509816408157349,
255
+ "step": 35
256
+ },
257
+ {
258
+ "epoch": 0.11004967520061139,
259
+ "grad_norm": 0.07501248270273209,
260
+ "learning_rate": 1.0606060606060606e-05,
261
+ "loss": 1.0011574029922485,
262
+ "step": 36
263
+ },
264
+ {
265
+ "epoch": 0.11310661062285059,
266
+ "grad_norm": 0.6622501611709595,
267
+ "learning_rate": 1.0909090909090909e-05,
268
+ "loss": 0.9754424691200256,
269
+ "step": 37
270
+ },
271
+ {
272
+ "epoch": 0.1161635460450898,
273
+ "grad_norm": 0.07566080242395401,
274
+ "learning_rate": 1.1212121212121212e-05,
275
+ "loss": 1.0342774391174316,
276
+ "step": 38
277
+ },
278
+ {
279
+ "epoch": 0.119220481467329,
280
+ "grad_norm": 0.07573831081390381,
281
+ "learning_rate": 1.1515151515151517e-05,
282
+ "loss": 0.9714518785476685,
283
+ "step": 39
284
+ },
285
+ {
286
+ "epoch": 0.12227741688956821,
287
+ "grad_norm": 0.08083852380514145,
288
+ "learning_rate": 1.181818181818182e-05,
289
+ "loss": 1.1050316095352173,
290
+ "step": 40
291
+ },
292
+ {
293
+ "epoch": 0.12533435231180742,
294
+ "grad_norm": 0.08540588617324829,
295
+ "learning_rate": 1.2121212121212122e-05,
296
+ "loss": 1.0871070623397827,
297
+ "step": 41
298
+ },
299
+ {
300
+ "epoch": 0.12839128773404662,
301
+ "grad_norm": 0.07391592115163803,
302
+ "learning_rate": 1.2424242424242425e-05,
303
+ "loss": 1.0206722021102905,
304
+ "step": 42
305
+ },
306
+ {
307
+ "epoch": 0.13144822315628582,
308
+ "grad_norm": 0.07063689082860947,
309
+ "learning_rate": 1.2727272727272728e-05,
310
+ "loss": 0.9775047898292542,
311
+ "step": 43
312
+ },
313
+ {
314
+ "epoch": 0.13450515857852502,
315
+ "grad_norm": 0.07288888841867447,
316
+ "learning_rate": 1.3030303030303032e-05,
317
+ "loss": 1.1132858991622925,
318
+ "step": 44
319
+ },
320
+ {
321
+ "epoch": 0.13756209400076425,
322
+ "grad_norm": 0.07641777396202087,
323
+ "learning_rate": 1.3333333333333333e-05,
324
+ "loss": 1.0707701444625854,
325
+ "step": 45
326
+ },
327
+ {
328
+ "epoch": 0.14061902942300344,
329
+ "grad_norm": 0.06990326195955276,
330
+ "learning_rate": 1.3636363636363637e-05,
331
+ "loss": 0.9328265190124512,
332
+ "step": 46
333
+ },
334
+ {
335
+ "epoch": 0.14367596484524264,
336
+ "grad_norm": 0.0834241658449173,
337
+ "learning_rate": 1.3939393939393942e-05,
338
+ "loss": 1.0131721496582031,
339
+ "step": 47
340
+ },
341
+ {
342
+ "epoch": 0.14673290026748184,
343
+ "grad_norm": 0.0714937075972557,
344
+ "learning_rate": 1.4242424242424245e-05,
345
+ "loss": 0.940493106842041,
346
+ "step": 48
347
+ },
348
+ {
349
+ "epoch": 0.14978983568972107,
350
+ "grad_norm": 0.07770547270774841,
351
+ "learning_rate": 1.4545454545454546e-05,
352
+ "loss": 1.0435771942138672,
353
+ "step": 49
354
+ },
355
+ {
356
+ "epoch": 0.15284677111196027,
357
+ "grad_norm": 0.07950945198535919,
358
+ "learning_rate": 1.484848484848485e-05,
359
+ "loss": 1.0382137298583984,
360
+ "step": 50
361
+ },
362
+ {
363
+ "epoch": 0.15284677111196027,
364
+ "eval_loss": 1.0129202604293823,
365
+ "eval_runtime": 724.3664,
366
+ "eval_samples_per_second": 0.832,
367
+ "eval_steps_per_second": 0.832,
368
+ "step": 50
369
+ },
370
+ {
371
+ "epoch": 0.15590370653419947,
372
+ "grad_norm": 0.06961936503648758,
373
+ "learning_rate": 1.5151515151515153e-05,
374
+ "loss": 0.9690049886703491,
375
+ "step": 51
376
+ },
377
+ {
378
+ "epoch": 0.15896064195643866,
379
+ "grad_norm": 0.069523885846138,
380
+ "learning_rate": 1.5454545454545454e-05,
381
+ "loss": 0.9830482006072998,
382
+ "step": 52
383
+ },
384
+ {
385
+ "epoch": 0.16201757737867786,
386
+ "grad_norm": 0.0764622762799263,
387
+ "learning_rate": 1.575757575757576e-05,
388
+ "loss": 1.0895472764968872,
389
+ "step": 53
390
+ },
391
+ {
392
+ "epoch": 0.1650745128009171,
393
+ "grad_norm": 0.1413721889257431,
394
+ "learning_rate": 1.606060606060606e-05,
395
+ "loss": 1.0354574918746948,
396
+ "step": 54
397
+ },
398
+ {
399
+ "epoch": 0.1681314482231563,
400
+ "grad_norm": 0.06818042695522308,
401
+ "learning_rate": 1.6363636363636366e-05,
402
+ "loss": 0.8534265160560608,
403
+ "step": 55
404
+ },
405
+ {
406
+ "epoch": 0.1711883836453955,
407
+ "grad_norm": 0.0722246989607811,
408
+ "learning_rate": 1.6666666666666667e-05,
409
+ "loss": 0.9580274820327759,
410
+ "step": 56
411
+ },
412
+ {
413
+ "epoch": 0.17424531906763469,
414
+ "grad_norm": 0.07113443315029144,
415
+ "learning_rate": 1.6969696969696972e-05,
416
+ "loss": 1.0721848011016846,
417
+ "step": 57
418
+ },
419
+ {
420
+ "epoch": 0.1773022544898739,
421
+ "grad_norm": 0.08412107080221176,
422
+ "learning_rate": 1.7272727272727274e-05,
423
+ "loss": 1.1180150508880615,
424
+ "step": 58
425
+ },
426
+ {
427
+ "epoch": 0.1803591899121131,
428
+ "grad_norm": 0.07381036877632141,
429
+ "learning_rate": 1.7575757575757576e-05,
430
+ "loss": 1.0384547710418701,
431
+ "step": 59
432
+ },
433
+ {
434
+ "epoch": 0.1834161253343523,
435
+ "grad_norm": 0.07089001685380936,
436
+ "learning_rate": 1.787878787878788e-05,
437
+ "loss": 1.0446016788482666,
438
+ "step": 60
439
+ },
440
+ {
441
+ "epoch": 0.1864730607565915,
442
+ "grad_norm": 0.11576953530311584,
443
+ "learning_rate": 1.8181818181818182e-05,
444
+ "loss": 1.0015051364898682,
445
+ "step": 61
446
+ },
447
+ {
448
+ "epoch": 0.18952999617883073,
449
+ "grad_norm": 0.08030868321657181,
450
+ "learning_rate": 1.8484848484848487e-05,
451
+ "loss": 0.9642710089683533,
452
+ "step": 62
453
+ },
454
+ {
455
+ "epoch": 0.19258693160106993,
456
+ "grad_norm": 0.08332342654466629,
457
+ "learning_rate": 1.8787878787878792e-05,
458
+ "loss": 1.0722991228103638,
459
+ "step": 63
460
+ },
461
+ {
462
+ "epoch": 0.19564386702330913,
463
+ "grad_norm": 0.08000365644693375,
464
+ "learning_rate": 1.9090909090909094e-05,
465
+ "loss": 1.0104647874832153,
466
+ "step": 64
467
+ },
468
+ {
469
+ "epoch": 0.19870080244554833,
470
+ "grad_norm": 0.08139508217573166,
471
+ "learning_rate": 1.9393939393939395e-05,
472
+ "loss": 0.9445061087608337,
473
+ "step": 65
474
+ },
475
+ {
476
+ "epoch": 0.20175773786778756,
477
+ "grad_norm": 0.08749893307685852,
478
+ "learning_rate": 1.96969696969697e-05,
479
+ "loss": 1.080810308456421,
480
+ "step": 66
481
+ },
482
+ {
483
+ "epoch": 0.20481467329002676,
484
+ "grad_norm": 0.0786912813782692,
485
+ "learning_rate": 2e-05,
486
+ "loss": 0.9705753922462463,
487
+ "step": 67
488
+ },
489
+ {
490
+ "epoch": 0.20787160871226595,
491
+ "grad_norm": 0.08962028473615646,
492
+ "learning_rate": 1.9999858236410775e-05,
493
+ "loss": 0.962783694267273,
494
+ "step": 68
495
+ },
496
+ {
497
+ "epoch": 0.21092854413450515,
498
+ "grad_norm": 0.08402887731790543,
499
+ "learning_rate": 1.9999432949662483e-05,
500
+ "loss": 0.9959614872932434,
501
+ "step": 69
502
+ },
503
+ {
504
+ "epoch": 0.21398547955674435,
505
+ "grad_norm": 0.08036444336175919,
506
+ "learning_rate": 1.9998724151813157e-05,
507
+ "loss": 0.9569960832595825,
508
+ "step": 70
509
+ },
510
+ {
511
+ "epoch": 0.21704241497898358,
512
+ "grad_norm": 0.08247046917676926,
513
+ "learning_rate": 1.9997731862959143e-05,
514
+ "loss": 1.0012171268463135,
515
+ "step": 71
516
+ },
517
+ {
518
+ "epoch": 0.22009935040122278,
519
+ "grad_norm": 0.08966264873743057,
520
+ "learning_rate": 1.999645611123453e-05,
521
+ "loss": 1.0403809547424316,
522
+ "step": 72
523
+ },
524
+ {
525
+ "epoch": 0.22315628582346198,
526
+ "grad_norm": 0.08061660826206207,
527
+ "learning_rate": 1.999489693281034e-05,
528
+ "loss": 1.0089740753173828,
529
+ "step": 73
530
+ },
531
+ {
532
+ "epoch": 0.22621322124570117,
533
+ "grad_norm": 0.09005365520715714,
534
+ "learning_rate": 1.9993054371893526e-05,
535
+ "loss": 0.9333044290542603,
536
+ "step": 74
537
+ },
538
+ {
539
+ "epoch": 0.2292701566679404,
540
+ "grad_norm": 0.08651519566774368,
541
+ "learning_rate": 1.9990928480725694e-05,
542
+ "loss": 0.9284015893936157,
543
+ "step": 75
544
+ },
545
+ {
546
+ "epoch": 0.2323270920901796,
547
+ "grad_norm": 0.08141147345304489,
548
+ "learning_rate": 1.9988519319581637e-05,
549
+ "loss": 0.9782730340957642,
550
+ "step": 76
551
+ },
552
+ {
553
+ "epoch": 0.2353840275124188,
554
+ "grad_norm": 0.08344405144453049,
555
+ "learning_rate": 1.998582695676762e-05,
556
+ "loss": 0.9723064303398132,
557
+ "step": 77
558
+ },
559
+ {
560
+ "epoch": 0.238440962934658,
561
+ "grad_norm": 0.08019903302192688,
562
+ "learning_rate": 1.998285146861945e-05,
563
+ "loss": 0.9648997783660889,
564
+ "step": 78
565
+ },
566
+ {
567
+ "epoch": 0.24149789835689722,
568
+ "grad_norm": 0.08113416284322739,
569
+ "learning_rate": 1.99795929395003e-05,
570
+ "loss": 0.9263214468955994,
571
+ "step": 79
572
+ },
573
+ {
574
+ "epoch": 0.24455483377913642,
575
+ "grad_norm": 0.08127513527870178,
576
+ "learning_rate": 1.997605146179833e-05,
577
+ "loss": 0.8745232224464417,
578
+ "step": 80
579
+ },
580
+ {
581
+ "epoch": 0.24761176920137562,
582
+ "grad_norm": 0.09934187680482864,
583
+ "learning_rate": 1.997222713592405e-05,
584
+ "loss": 0.8722782135009766,
585
+ "step": 81
586
+ },
587
+ {
588
+ "epoch": 0.25066870462361485,
589
+ "grad_norm": 0.09701363742351532,
590
+ "learning_rate": 1.9968120070307503e-05,
591
+ "loss": 1.0084266662597656,
592
+ "step": 82
593
+ },
594
+ {
595
+ "epoch": 0.253725640045854,
596
+ "grad_norm": 0.08335654437541962,
597
+ "learning_rate": 1.9963730381395154e-05,
598
+ "loss": 0.9239332675933838,
599
+ "step": 83
600
+ },
601
+ {
602
+ "epoch": 0.25678257546809324,
603
+ "grad_norm": 0.09161650389432907,
604
+ "learning_rate": 1.9959058193646618e-05,
605
+ "loss": 0.9878032207489014,
606
+ "step": 84
607
+ },
608
+ {
609
+ "epoch": 0.2598395108903324,
610
+ "grad_norm": 0.08067663013935089,
611
+ "learning_rate": 1.9954103639531116e-05,
612
+ "loss": 0.9113098382949829,
613
+ "step": 85
614
+ },
615
+ {
616
+ "epoch": 0.26289644631257164,
617
+ "grad_norm": 0.09619539976119995,
618
+ "learning_rate": 1.9948866859523717e-05,
619
+ "loss": 0.9527600407600403,
620
+ "step": 86
621
+ },
622
+ {
623
+ "epoch": 0.26595338173481087,
624
+ "grad_norm": 0.10015493631362915,
625
+ "learning_rate": 1.9943348002101374e-05,
626
+ "loss": 0.9569152593612671,
627
+ "step": 87
628
+ },
629
+ {
630
+ "epoch": 0.26901031715705004,
631
+ "grad_norm": 0.09012345969676971,
632
+ "learning_rate": 1.993754722373869e-05,
633
+ "loss": 0.8912045359611511,
634
+ "step": 88
635
+ },
636
+ {
637
+ "epoch": 0.27206725257928926,
638
+ "grad_norm": 0.10342805832624435,
639
+ "learning_rate": 1.9931464688903502e-05,
640
+ "loss": 0.856104850769043,
641
+ "step": 89
642
+ },
643
+ {
644
+ "epoch": 0.2751241880015285,
645
+ "grad_norm": 0.10218493640422821,
646
+ "learning_rate": 1.9925100570052194e-05,
647
+ "loss": 0.9631397128105164,
648
+ "step": 90
649
+ },
650
+ {
651
+ "epoch": 0.27818112342376766,
652
+ "grad_norm": 0.10909046977758408,
653
+ "learning_rate": 1.9918455047624847e-05,
654
+ "loss": 0.8532565236091614,
655
+ "step": 91
656
+ },
657
+ {
658
+ "epoch": 0.2812380588460069,
659
+ "grad_norm": 0.10714197903871536,
660
+ "learning_rate": 1.9911528310040073e-05,
661
+ "loss": 0.9691859483718872,
662
+ "step": 92
663
+ },
664
+ {
665
+ "epoch": 0.28429499426824606,
666
+ "grad_norm": 0.1108694076538086,
667
+ "learning_rate": 1.990432055368971e-05,
668
+ "loss": 0.9374334812164307,
669
+ "step": 93
670
+ },
671
+ {
672
+ "epoch": 0.2873519296904853,
673
+ "grad_norm": 0.10037308186292648,
674
+ "learning_rate": 1.989683198293324e-05,
675
+ "loss": 0.9166896343231201,
676
+ "step": 94
677
+ },
678
+ {
679
+ "epoch": 0.2904088651127245,
680
+ "grad_norm": 0.10246684402227402,
681
+ "learning_rate": 1.9889062810092002e-05,
682
+ "loss": 1.0059239864349365,
683
+ "step": 95
684
+ },
685
+ {
686
+ "epoch": 0.2934658005349637,
687
+ "grad_norm": 0.09954962879419327,
688
+ "learning_rate": 1.9881013255443152e-05,
689
+ "loss": 1.00413179397583,
690
+ "step": 96
691
+ },
692
+ {
693
+ "epoch": 0.2965227359572029,
694
+ "grad_norm": 0.11006761342287064,
695
+ "learning_rate": 1.9872683547213446e-05,
696
+ "loss": 0.9414035677909851,
697
+ "step": 97
698
+ },
699
+ {
700
+ "epoch": 0.29957967137944214,
701
+ "grad_norm": 0.1014382541179657,
702
+ "learning_rate": 1.9864073921572756e-05,
703
+ "loss": 0.9155468940734863,
704
+ "step": 98
705
+ },
706
+ {
707
+ "epoch": 0.3026366068016813,
708
+ "grad_norm": 0.09883157908916473,
709
+ "learning_rate": 1.9855184622627362e-05,
710
+ "loss": 0.9429305195808411,
711
+ "step": 99
712
+ },
713
+ {
714
+ "epoch": 0.30569354222392053,
715
+ "grad_norm": 0.11199072748422623,
716
+ "learning_rate": 1.9846015902413053e-05,
717
+ "loss": 0.9143528342247009,
718
+ "step": 100
719
+ },
720
+ {
721
+ "epoch": 0.30569354222392053,
722
+ "eval_loss": 0.884428083896637,
723
+ "eval_runtime": 723.8143,
724
+ "eval_samples_per_second": 0.833,
725
+ "eval_steps_per_second": 0.833,
726
+ "step": 100
727
+ }
728
+ ],
729
+ "logging_steps": 1,
730
+ "max_steps": 656,
731
+ "num_input_tokens_seen": 0,
732
+ "num_train_epochs": 2,
733
+ "save_steps": 100,
734
+ "stateful_callbacks": {
735
+ "TrainerControl": {
736
+ "args": {
737
+ "should_epoch_stop": false,
738
+ "should_evaluate": false,
739
+ "should_log": false,
740
+ "should_save": true,
741
+ "should_training_stop": false
742
+ },
743
+ "attributes": {}
744
+ }
745
+ },
746
+ "total_flos": 5.521459497664512e+17,
747
+ "train_batch_size": 1,
748
+ "trial_name": null,
749
+ "trial_params": null
750
+ }
cpt_qwen_14B/checkpoints/checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6a8e308e47eb936f678712445b19ddc52638f354c37c813ecaa432f69120a2e
3
+ size 5201
cpt_qwen_14B/checkpoints/checkpoint-200/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/Models/Qwen2.5-Coder-14B
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:/workspace/Models/Qwen2.5-Coder-14B
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
cpt_qwen_14B/checkpoints/checkpoint-200/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "/workspace/Models/Qwen2.5-Coder-14B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "v_proj",
33
+ "q_proj",
34
+ "o_proj",
35
+ "k_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
cpt_qwen_14B/checkpoints/checkpoint-200/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e32cd39a05adef845494ef625c330566280da483f1df43a7d896cde3d72e625
3
+ size 201378736
cpt_qwen_14B/checkpoints/checkpoint-200/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
cpt_qwen_14B/checkpoints/checkpoint-200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cec9971f0f83d3992bfdb8378f1f654b8058c6e92f4735b925ed926ebefea84
3
+ size 102698471
cpt_qwen_14B/checkpoints/checkpoint-200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e70b27b141a53396d5df1322c439b8190c0da577dedafcca185e03254d54da1
3
+ size 14645
cpt_qwen_14B/checkpoints/checkpoint-200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2ea6b9b029d63c1e2f3731428c97dfbc99ba9388eac107e6ecbed49452b1af9
3
+ size 1465
cpt_qwen_14B/checkpoints/checkpoint-200/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
3
+ size 11421892
cpt_qwen_14B/checkpoints/checkpoint-200/tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|endoftext|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": true,
24
+ "model_max_length": 32768,
25
+ "pad_token": "<|endoftext|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }
cpt_qwen_14B/checkpoints/checkpoint-200/trainer_state.json ADDED
@@ -0,0 +1,1466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 200,
3
+ "best_metric": 0.7551760673522949,
4
+ "best_model_checkpoint": "runs/cpt_run_14b/checkpoints/checkpoint-200",
5
+ "epoch": 0.6113870844478411,
6
+ "eval_steps": 50,
7
+ "global_step": 200,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.003056935422239205,
14
+ "grad_norm": 0.06516239047050476,
15
+ "learning_rate": 0.0,
16
+ "loss": 1.138384461402893,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.00611387084447841,
21
+ "grad_norm": 0.05343673378229141,
22
+ "learning_rate": 3.0303030303030305e-07,
23
+ "loss": 0.983342707157135,
24
+ "step": 2
25
+ },
26
+ {
27
+ "epoch": 0.009170806266717615,
28
+ "grad_norm": 0.05608418956398964,
29
+ "learning_rate": 6.060606060606061e-07,
30
+ "loss": 1.0762118101119995,
31
+ "step": 3
32
+ },
33
+ {
34
+ "epoch": 0.01222774168895682,
35
+ "grad_norm": 0.06523486226797104,
36
+ "learning_rate": 9.090909090909091e-07,
37
+ "loss": 1.084489345550537,
38
+ "step": 4
39
+ },
40
+ {
41
+ "epoch": 0.015284677111196026,
42
+ "grad_norm": 0.06582186371088028,
43
+ "learning_rate": 1.2121212121212122e-06,
44
+ "loss": 1.2037022113800049,
45
+ "step": 5
46
+ },
47
+ {
48
+ "epoch": 0.01834161253343523,
49
+ "grad_norm": 0.06097998470067978,
50
+ "learning_rate": 1.5151515151515152e-06,
51
+ "loss": 1.10005784034729,
52
+ "step": 6
53
+ },
54
+ {
55
+ "epoch": 0.021398547955674436,
56
+ "grad_norm": 0.10365528613328934,
57
+ "learning_rate": 1.8181818181818183e-06,
58
+ "loss": 1.0895193815231323,
59
+ "step": 7
60
+ },
61
+ {
62
+ "epoch": 0.02445548337791364,
63
+ "grad_norm": 0.06312141567468643,
64
+ "learning_rate": 2.1212121212121216e-06,
65
+ "loss": 1.0593242645263672,
66
+ "step": 8
67
+ },
68
+ {
69
+ "epoch": 0.027512418800152847,
70
+ "grad_norm": 0.05508403480052948,
71
+ "learning_rate": 2.4242424242424244e-06,
72
+ "loss": 0.9772955179214478,
73
+ "step": 9
74
+ },
75
+ {
76
+ "epoch": 0.030569354222392053,
77
+ "grad_norm": 0.06006711348891258,
78
+ "learning_rate": 2.7272727272727272e-06,
79
+ "loss": 1.084238886833191,
80
+ "step": 10
81
+ },
82
+ {
83
+ "epoch": 0.033626289644631255,
84
+ "grad_norm": 0.0588749423623085,
85
+ "learning_rate": 3.0303030303030305e-06,
86
+ "loss": 1.0786534547805786,
87
+ "step": 11
88
+ },
89
+ {
90
+ "epoch": 0.03668322506687046,
91
+ "grad_norm": 0.046551357954740524,
92
+ "learning_rate": 3.3333333333333333e-06,
93
+ "loss": 1.0370622873306274,
94
+ "step": 12
95
+ },
96
+ {
97
+ "epoch": 0.039740160489109666,
98
+ "grad_norm": 0.061659567058086395,
99
+ "learning_rate": 3.6363636363636366e-06,
100
+ "loss": 1.0646986961364746,
101
+ "step": 13
102
+ },
103
+ {
104
+ "epoch": 0.04279709591134887,
105
+ "grad_norm": 0.06007347255945206,
106
+ "learning_rate": 3.93939393939394e-06,
107
+ "loss": 1.0311307907104492,
108
+ "step": 14
109
+ },
110
+ {
111
+ "epoch": 0.04585403133358808,
112
+ "grad_norm": 0.07314135134220123,
113
+ "learning_rate": 4.242424242424243e-06,
114
+ "loss": 1.1300500631332397,
115
+ "step": 15
116
+ },
117
+ {
118
+ "epoch": 0.04891096675582728,
119
+ "grad_norm": 0.060934022068977356,
120
+ "learning_rate": 4.5454545454545455e-06,
121
+ "loss": 1.0197452306747437,
122
+ "step": 16
123
+ },
124
+ {
125
+ "epoch": 0.05196790217806649,
126
+ "grad_norm": 0.056856051087379456,
127
+ "learning_rate": 4.848484848484849e-06,
128
+ "loss": 1.0438549518585205,
129
+ "step": 17
130
+ },
131
+ {
132
+ "epoch": 0.055024837600305694,
133
+ "grad_norm": 0.05908689647912979,
134
+ "learning_rate": 5.151515151515152e-06,
135
+ "loss": 1.0398856401443481,
136
+ "step": 18
137
+ },
138
+ {
139
+ "epoch": 0.0580817730225449,
140
+ "grad_norm": 0.07411840558052063,
141
+ "learning_rate": 5.4545454545454545e-06,
142
+ "loss": 1.107885479927063,
143
+ "step": 19
144
+ },
145
+ {
146
+ "epoch": 0.061138708444784105,
147
+ "grad_norm": 0.0749165341258049,
148
+ "learning_rate": 5.7575757575757586e-06,
149
+ "loss": 1.1060967445373535,
150
+ "step": 20
151
+ },
152
+ {
153
+ "epoch": 0.06419564386702331,
154
+ "grad_norm": 0.06720177084207535,
155
+ "learning_rate": 6.060606060606061e-06,
156
+ "loss": 1.0471720695495605,
157
+ "step": 21
158
+ },
159
+ {
160
+ "epoch": 0.06725257928926251,
161
+ "grad_norm": 0.05990725755691528,
162
+ "learning_rate": 6.363636363636364e-06,
163
+ "loss": 1.0944981575012207,
164
+ "step": 22
165
+ },
166
+ {
167
+ "epoch": 0.07030951471150172,
168
+ "grad_norm": 0.06672193855047226,
169
+ "learning_rate": 6.666666666666667e-06,
170
+ "loss": 1.1477092504501343,
171
+ "step": 23
172
+ },
173
+ {
174
+ "epoch": 0.07336645013374092,
175
+ "grad_norm": 0.06145205348730087,
176
+ "learning_rate": 6.969696969696971e-06,
177
+ "loss": 1.0591784715652466,
178
+ "step": 24
179
+ },
180
+ {
181
+ "epoch": 0.07642338555598013,
182
+ "grad_norm": 0.0757482647895813,
183
+ "learning_rate": 7.272727272727273e-06,
184
+ "loss": 1.0500165224075317,
185
+ "step": 25
186
+ },
187
+ {
188
+ "epoch": 0.07948032097821933,
189
+ "grad_norm": 0.07848478108644485,
190
+ "learning_rate": 7.5757575757575764e-06,
191
+ "loss": 1.0747522115707397,
192
+ "step": 26
193
+ },
194
+ {
195
+ "epoch": 0.08253725640045854,
196
+ "grad_norm": 0.07740631699562073,
197
+ "learning_rate": 7.87878787878788e-06,
198
+ "loss": 1.132310152053833,
199
+ "step": 27
200
+ },
201
+ {
202
+ "epoch": 0.08559419182269774,
203
+ "grad_norm": 0.07476603239774704,
204
+ "learning_rate": 8.181818181818183e-06,
205
+ "loss": 1.0339502096176147,
206
+ "step": 28
207
+ },
208
+ {
209
+ "epoch": 0.08865112724493696,
210
+ "grad_norm": 0.0779196098446846,
211
+ "learning_rate": 8.484848484848486e-06,
212
+ "loss": 1.1047282218933105,
213
+ "step": 29
214
+ },
215
+ {
216
+ "epoch": 0.09170806266717615,
217
+ "grad_norm": 0.06962384283542633,
218
+ "learning_rate": 8.787878787878788e-06,
219
+ "loss": 1.004916787147522,
220
+ "step": 30
221
+ },
222
+ {
223
+ "epoch": 0.09476499808941537,
224
+ "grad_norm": 0.06369175016880035,
225
+ "learning_rate": 9.090909090909091e-06,
226
+ "loss": 0.9296417832374573,
227
+ "step": 31
228
+ },
229
+ {
230
+ "epoch": 0.09782193351165457,
231
+ "grad_norm": 0.07470260560512543,
232
+ "learning_rate": 9.393939393939396e-06,
233
+ "loss": 1.0721708536148071,
234
+ "step": 32
235
+ },
236
+ {
237
+ "epoch": 0.10087886893389378,
238
+ "grad_norm": 0.07948213815689087,
239
+ "learning_rate": 9.696969696969698e-06,
240
+ "loss": 1.0350117683410645,
241
+ "step": 33
242
+ },
243
+ {
244
+ "epoch": 0.10393580435613298,
245
+ "grad_norm": 0.07066022604703903,
246
+ "learning_rate": 1e-05,
247
+ "loss": 1.026305913925171,
248
+ "step": 34
249
+ },
250
+ {
251
+ "epoch": 0.10699273977837218,
252
+ "grad_norm": 0.07774543762207031,
253
+ "learning_rate": 1.0303030303030304e-05,
254
+ "loss": 1.0509816408157349,
255
+ "step": 35
256
+ },
257
+ {
258
+ "epoch": 0.11004967520061139,
259
+ "grad_norm": 0.07501248270273209,
260
+ "learning_rate": 1.0606060606060606e-05,
261
+ "loss": 1.0011574029922485,
262
+ "step": 36
263
+ },
264
+ {
265
+ "epoch": 0.11310661062285059,
266
+ "grad_norm": 0.6622501611709595,
267
+ "learning_rate": 1.0909090909090909e-05,
268
+ "loss": 0.9754424691200256,
269
+ "step": 37
270
+ },
271
+ {
272
+ "epoch": 0.1161635460450898,
273
+ "grad_norm": 0.07566080242395401,
274
+ "learning_rate": 1.1212121212121212e-05,
275
+ "loss": 1.0342774391174316,
276
+ "step": 38
277
+ },
278
+ {
279
+ "epoch": 0.119220481467329,
280
+ "grad_norm": 0.07573831081390381,
281
+ "learning_rate": 1.1515151515151517e-05,
282
+ "loss": 0.9714518785476685,
283
+ "step": 39
284
+ },
285
+ {
286
+ "epoch": 0.12227741688956821,
287
+ "grad_norm": 0.08083852380514145,
288
+ "learning_rate": 1.181818181818182e-05,
289
+ "loss": 1.1050316095352173,
290
+ "step": 40
291
+ },
292
+ {
293
+ "epoch": 0.12533435231180742,
294
+ "grad_norm": 0.08540588617324829,
295
+ "learning_rate": 1.2121212121212122e-05,
296
+ "loss": 1.0871070623397827,
297
+ "step": 41
298
+ },
299
+ {
300
+ "epoch": 0.12839128773404662,
301
+ "grad_norm": 0.07391592115163803,
302
+ "learning_rate": 1.2424242424242425e-05,
303
+ "loss": 1.0206722021102905,
304
+ "step": 42
305
+ },
306
+ {
307
+ "epoch": 0.13144822315628582,
308
+ "grad_norm": 0.07063689082860947,
309
+ "learning_rate": 1.2727272727272728e-05,
310
+ "loss": 0.9775047898292542,
311
+ "step": 43
312
+ },
313
+ {
314
+ "epoch": 0.13450515857852502,
315
+ "grad_norm": 0.07288888841867447,
316
+ "learning_rate": 1.3030303030303032e-05,
317
+ "loss": 1.1132858991622925,
318
+ "step": 44
319
+ },
320
+ {
321
+ "epoch": 0.13756209400076425,
322
+ "grad_norm": 0.07641777396202087,
323
+ "learning_rate": 1.3333333333333333e-05,
324
+ "loss": 1.0707701444625854,
325
+ "step": 45
326
+ },
327
+ {
328
+ "epoch": 0.14061902942300344,
329
+ "grad_norm": 0.06990326195955276,
330
+ "learning_rate": 1.3636363636363637e-05,
331
+ "loss": 0.9328265190124512,
332
+ "step": 46
333
+ },
334
+ {
335
+ "epoch": 0.14367596484524264,
336
+ "grad_norm": 0.0834241658449173,
337
+ "learning_rate": 1.3939393939393942e-05,
338
+ "loss": 1.0131721496582031,
339
+ "step": 47
340
+ },
341
+ {
342
+ "epoch": 0.14673290026748184,
343
+ "grad_norm": 0.0714937075972557,
344
+ "learning_rate": 1.4242424242424245e-05,
345
+ "loss": 0.940493106842041,
346
+ "step": 48
347
+ },
348
+ {
349
+ "epoch": 0.14978983568972107,
350
+ "grad_norm": 0.07770547270774841,
351
+ "learning_rate": 1.4545454545454546e-05,
352
+ "loss": 1.0435771942138672,
353
+ "step": 49
354
+ },
355
+ {
356
+ "epoch": 0.15284677111196027,
357
+ "grad_norm": 0.07950945198535919,
358
+ "learning_rate": 1.484848484848485e-05,
359
+ "loss": 1.0382137298583984,
360
+ "step": 50
361
+ },
362
+ {
363
+ "epoch": 0.15284677111196027,
364
+ "eval_loss": 1.0129202604293823,
365
+ "eval_runtime": 724.3664,
366
+ "eval_samples_per_second": 0.832,
367
+ "eval_steps_per_second": 0.832,
368
+ "step": 50
369
+ },
370
+ {
371
+ "epoch": 0.15590370653419947,
372
+ "grad_norm": 0.06961936503648758,
373
+ "learning_rate": 1.5151515151515153e-05,
374
+ "loss": 0.9690049886703491,
375
+ "step": 51
376
+ },
377
+ {
378
+ "epoch": 0.15896064195643866,
379
+ "grad_norm": 0.069523885846138,
380
+ "learning_rate": 1.5454545454545454e-05,
381
+ "loss": 0.9830482006072998,
382
+ "step": 52
383
+ },
384
+ {
385
+ "epoch": 0.16201757737867786,
386
+ "grad_norm": 0.0764622762799263,
387
+ "learning_rate": 1.575757575757576e-05,
388
+ "loss": 1.0895472764968872,
389
+ "step": 53
390
+ },
391
+ {
392
+ "epoch": 0.1650745128009171,
393
+ "grad_norm": 0.1413721889257431,
394
+ "learning_rate": 1.606060606060606e-05,
395
+ "loss": 1.0354574918746948,
396
+ "step": 54
397
+ },
398
+ {
399
+ "epoch": 0.1681314482231563,
400
+ "grad_norm": 0.06818042695522308,
401
+ "learning_rate": 1.6363636363636366e-05,
402
+ "loss": 0.8534265160560608,
403
+ "step": 55
404
+ },
405
+ {
406
+ "epoch": 0.1711883836453955,
407
+ "grad_norm": 0.0722246989607811,
408
+ "learning_rate": 1.6666666666666667e-05,
409
+ "loss": 0.9580274820327759,
410
+ "step": 56
411
+ },
412
+ {
413
+ "epoch": 0.17424531906763469,
414
+ "grad_norm": 0.07113443315029144,
415
+ "learning_rate": 1.6969696969696972e-05,
416
+ "loss": 1.0721848011016846,
417
+ "step": 57
418
+ },
419
+ {
420
+ "epoch": 0.1773022544898739,
421
+ "grad_norm": 0.08412107080221176,
422
+ "learning_rate": 1.7272727272727274e-05,
423
+ "loss": 1.1180150508880615,
424
+ "step": 58
425
+ },
426
+ {
427
+ "epoch": 0.1803591899121131,
428
+ "grad_norm": 0.07381036877632141,
429
+ "learning_rate": 1.7575757575757576e-05,
430
+ "loss": 1.0384547710418701,
431
+ "step": 59
432
+ },
433
+ {
434
+ "epoch": 0.1834161253343523,
435
+ "grad_norm": 0.07089001685380936,
436
+ "learning_rate": 1.787878787878788e-05,
437
+ "loss": 1.0446016788482666,
438
+ "step": 60
439
+ },
440
+ {
441
+ "epoch": 0.1864730607565915,
442
+ "grad_norm": 0.11576953530311584,
443
+ "learning_rate": 1.8181818181818182e-05,
444
+ "loss": 1.0015051364898682,
445
+ "step": 61
446
+ },
447
+ {
448
+ "epoch": 0.18952999617883073,
449
+ "grad_norm": 0.08030868321657181,
450
+ "learning_rate": 1.8484848484848487e-05,
451
+ "loss": 0.9642710089683533,
452
+ "step": 62
453
+ },
454
+ {
455
+ "epoch": 0.19258693160106993,
456
+ "grad_norm": 0.08332342654466629,
457
+ "learning_rate": 1.8787878787878792e-05,
458
+ "loss": 1.0722991228103638,
459
+ "step": 63
460
+ },
461
+ {
462
+ "epoch": 0.19564386702330913,
463
+ "grad_norm": 0.08000365644693375,
464
+ "learning_rate": 1.9090909090909094e-05,
465
+ "loss": 1.0104647874832153,
466
+ "step": 64
467
+ },
468
+ {
469
+ "epoch": 0.19870080244554833,
470
+ "grad_norm": 0.08139508217573166,
471
+ "learning_rate": 1.9393939393939395e-05,
472
+ "loss": 0.9445061087608337,
473
+ "step": 65
474
+ },
475
+ {
476
+ "epoch": 0.20175773786778756,
477
+ "grad_norm": 0.08749893307685852,
478
+ "learning_rate": 1.96969696969697e-05,
479
+ "loss": 1.080810308456421,
480
+ "step": 66
481
+ },
482
+ {
483
+ "epoch": 0.20481467329002676,
484
+ "grad_norm": 0.0786912813782692,
485
+ "learning_rate": 2e-05,
486
+ "loss": 0.9705753922462463,
487
+ "step": 67
488
+ },
489
+ {
490
+ "epoch": 0.20787160871226595,
491
+ "grad_norm": 0.08962028473615646,
492
+ "learning_rate": 1.9999858236410775e-05,
493
+ "loss": 0.962783694267273,
494
+ "step": 68
495
+ },
496
+ {
497
+ "epoch": 0.21092854413450515,
498
+ "grad_norm": 0.08402887731790543,
499
+ "learning_rate": 1.9999432949662483e-05,
500
+ "loss": 0.9959614872932434,
501
+ "step": 69
502
+ },
503
+ {
504
+ "epoch": 0.21398547955674435,
505
+ "grad_norm": 0.08036444336175919,
506
+ "learning_rate": 1.9998724151813157e-05,
507
+ "loss": 0.9569960832595825,
508
+ "step": 70
509
+ },
510
+ {
511
+ "epoch": 0.21704241497898358,
512
+ "grad_norm": 0.08247046917676926,
513
+ "learning_rate": 1.9997731862959143e-05,
514
+ "loss": 1.0012171268463135,
515
+ "step": 71
516
+ },
517
+ {
518
+ "epoch": 0.22009935040122278,
519
+ "grad_norm": 0.08966264873743057,
520
+ "learning_rate": 1.999645611123453e-05,
521
+ "loss": 1.0403809547424316,
522
+ "step": 72
523
+ },
524
+ {
525
+ "epoch": 0.22315628582346198,
526
+ "grad_norm": 0.08061660826206207,
527
+ "learning_rate": 1.999489693281034e-05,
528
+ "loss": 1.0089740753173828,
529
+ "step": 73
530
+ },
531
+ {
532
+ "epoch": 0.22621322124570117,
533
+ "grad_norm": 0.09005365520715714,
534
+ "learning_rate": 1.9993054371893526e-05,
535
+ "loss": 0.9333044290542603,
536
+ "step": 74
537
+ },
538
+ {
539
+ "epoch": 0.2292701566679404,
540
+ "grad_norm": 0.08651519566774368,
541
+ "learning_rate": 1.9990928480725694e-05,
542
+ "loss": 0.9284015893936157,
543
+ "step": 75
544
+ },
545
+ {
546
+ "epoch": 0.2323270920901796,
547
+ "grad_norm": 0.08141147345304489,
548
+ "learning_rate": 1.9988519319581637e-05,
549
+ "loss": 0.9782730340957642,
550
+ "step": 76
551
+ },
552
+ {
553
+ "epoch": 0.2353840275124188,
554
+ "grad_norm": 0.08344405144453049,
555
+ "learning_rate": 1.998582695676762e-05,
556
+ "loss": 0.9723064303398132,
557
+ "step": 77
558
+ },
559
+ {
560
+ "epoch": 0.238440962934658,
561
+ "grad_norm": 0.08019903302192688,
562
+ "learning_rate": 1.998285146861945e-05,
563
+ "loss": 0.9648997783660889,
564
+ "step": 78
565
+ },
566
+ {
567
+ "epoch": 0.24149789835689722,
568
+ "grad_norm": 0.08113416284322739,
569
+ "learning_rate": 1.99795929395003e-05,
570
+ "loss": 0.9263214468955994,
571
+ "step": 79
572
+ },
573
+ {
574
+ "epoch": 0.24455483377913642,
575
+ "grad_norm": 0.08127513527870178,
576
+ "learning_rate": 1.997605146179833e-05,
577
+ "loss": 0.8745232224464417,
578
+ "step": 80
579
+ },
580
+ {
581
+ "epoch": 0.24761176920137562,
582
+ "grad_norm": 0.09934187680482864,
583
+ "learning_rate": 1.997222713592405e-05,
584
+ "loss": 0.8722782135009766,
585
+ "step": 81
586
+ },
587
+ {
588
+ "epoch": 0.25066870462361485,
589
+ "grad_norm": 0.09701363742351532,
590
+ "learning_rate": 1.9968120070307503e-05,
591
+ "loss": 1.0084266662597656,
592
+ "step": 82
593
+ },
594
+ {
595
+ "epoch": 0.253725640045854,
596
+ "grad_norm": 0.08335654437541962,
597
+ "learning_rate": 1.9963730381395154e-05,
598
+ "loss": 0.9239332675933838,
599
+ "step": 83
600
+ },
601
+ {
602
+ "epoch": 0.25678257546809324,
603
+ "grad_norm": 0.09161650389432907,
604
+ "learning_rate": 1.9959058193646618e-05,
605
+ "loss": 0.9878032207489014,
606
+ "step": 84
607
+ },
608
+ {
609
+ "epoch": 0.2598395108903324,
610
+ "grad_norm": 0.08067663013935089,
611
+ "learning_rate": 1.9954103639531116e-05,
612
+ "loss": 0.9113098382949829,
613
+ "step": 85
614
+ },
615
+ {
616
+ "epoch": 0.26289644631257164,
617
+ "grad_norm": 0.09619539976119995,
618
+ "learning_rate": 1.9948866859523717e-05,
619
+ "loss": 0.9527600407600403,
620
+ "step": 86
621
+ },
622
+ {
623
+ "epoch": 0.26595338173481087,
624
+ "grad_norm": 0.10015493631362915,
625
+ "learning_rate": 1.9943348002101374e-05,
626
+ "loss": 0.9569152593612671,
627
+ "step": 87
628
+ },
629
+ {
630
+ "epoch": 0.26901031715705004,
631
+ "grad_norm": 0.09012345969676971,
632
+ "learning_rate": 1.993754722373869e-05,
633
+ "loss": 0.8912045359611511,
634
+ "step": 88
635
+ },
636
+ {
637
+ "epoch": 0.27206725257928926,
638
+ "grad_norm": 0.10342805832624435,
639
+ "learning_rate": 1.9931464688903502e-05,
640
+ "loss": 0.856104850769043,
641
+ "step": 89
642
+ },
643
+ {
644
+ "epoch": 0.2751241880015285,
645
+ "grad_norm": 0.10218493640422821,
646
+ "learning_rate": 1.9925100570052194e-05,
647
+ "loss": 0.9631397128105164,
648
+ "step": 90
649
+ },
650
+ {
651
+ "epoch": 0.27818112342376766,
652
+ "grad_norm": 0.10909046977758408,
653
+ "learning_rate": 1.9918455047624847e-05,
654
+ "loss": 0.8532565236091614,
655
+ "step": 91
656
+ },
657
+ {
658
+ "epoch": 0.2812380588460069,
659
+ "grad_norm": 0.10714197903871536,
660
+ "learning_rate": 1.9911528310040073e-05,
661
+ "loss": 0.9691859483718872,
662
+ "step": 92
663
+ },
664
+ {
665
+ "epoch": 0.28429499426824606,
666
+ "grad_norm": 0.1108694076538086,
667
+ "learning_rate": 1.990432055368971e-05,
668
+ "loss": 0.9374334812164307,
669
+ "step": 93
670
+ },
671
+ {
672
+ "epoch": 0.2873519296904853,
673
+ "grad_norm": 0.10037308186292648,
674
+ "learning_rate": 1.989683198293324e-05,
675
+ "loss": 0.9166896343231201,
676
+ "step": 94
677
+ },
678
+ {
679
+ "epoch": 0.2904088651127245,
680
+ "grad_norm": 0.10246684402227402,
681
+ "learning_rate": 1.9889062810092002e-05,
682
+ "loss": 1.0059239864349365,
683
+ "step": 95
684
+ },
685
+ {
686
+ "epoch": 0.2934658005349637,
687
+ "grad_norm": 0.09954962879419327,
688
+ "learning_rate": 1.9881013255443152e-05,
689
+ "loss": 1.00413179397583,
690
+ "step": 96
691
+ },
692
+ {
693
+ "epoch": 0.2965227359572029,
694
+ "grad_norm": 0.11006761342287064,
695
+ "learning_rate": 1.9872683547213446e-05,
696
+ "loss": 0.9414035677909851,
697
+ "step": 97
698
+ },
699
+ {
700
+ "epoch": 0.29957967137944214,
701
+ "grad_norm": 0.1014382541179657,
702
+ "learning_rate": 1.9864073921572756e-05,
703
+ "loss": 0.9155468940734863,
704
+ "step": 98
705
+ },
706
+ {
707
+ "epoch": 0.3026366068016813,
708
+ "grad_norm": 0.09883157908916473,
709
+ "learning_rate": 1.9855184622627362e-05,
710
+ "loss": 0.9429305195808411,
711
+ "step": 99
712
+ },
713
+ {
714
+ "epoch": 0.30569354222392053,
715
+ "grad_norm": 0.11199072748422623,
716
+ "learning_rate": 1.9846015902413053e-05,
717
+ "loss": 0.9143528342247009,
718
+ "step": 100
719
+ },
720
+ {
721
+ "epoch": 0.30569354222392053,
722
+ "eval_loss": 0.884428083896637,
723
+ "eval_runtime": 723.8143,
724
+ "eval_samples_per_second": 0.833,
725
+ "eval_steps_per_second": 0.833,
726
+ "step": 100
727
+ },
728
+ {
729
+ "epoch": 0.3087504776461597,
730
+ "grad_norm": 0.10796016454696655,
731
+ "learning_rate": 1.9836568020887963e-05,
732
+ "loss": 0.9726455211639404,
733
+ "step": 101
734
+ },
735
+ {
736
+ "epoch": 0.31180741306839893,
737
+ "grad_norm": 0.10056383162736893,
738
+ "learning_rate": 1.982684124592521e-05,
739
+ "loss": 0.8932135701179504,
740
+ "step": 102
741
+ },
742
+ {
743
+ "epoch": 0.31486434849063816,
744
+ "grad_norm": 0.10836594551801682,
745
+ "learning_rate": 1.9816835853305306e-05,
746
+ "loss": 0.919749915599823,
747
+ "step": 103
748
+ },
749
+ {
750
+ "epoch": 0.31792128391287733,
751
+ "grad_norm": 0.12032149732112885,
752
+ "learning_rate": 1.9806552126708322e-05,
753
+ "loss": 0.871781587600708,
754
+ "step": 104
755
+ },
756
+ {
757
+ "epoch": 0.32097821933511655,
758
+ "grad_norm": 0.10854160040616989,
759
+ "learning_rate": 1.9795990357705853e-05,
760
+ "loss": 0.8587784171104431,
761
+ "step": 105
762
+ },
763
+ {
764
+ "epoch": 0.3240351547573557,
765
+ "grad_norm": 0.10819399356842041,
766
+ "learning_rate": 1.978515084575276e-05,
767
+ "loss": 0.8524806499481201,
768
+ "step": 106
769
+ },
770
+ {
771
+ "epoch": 0.32709209017959495,
772
+ "grad_norm": 0.10226067155599594,
773
+ "learning_rate": 1.9774033898178668e-05,
774
+ "loss": 0.7892144918441772,
775
+ "step": 107
776
+ },
777
+ {
778
+ "epoch": 0.3301490256018342,
779
+ "grad_norm": 0.1071159616112709,
780
+ "learning_rate": 1.976263983017925e-05,
781
+ "loss": 0.8833234906196594,
782
+ "step": 108
783
+ },
784
+ {
785
+ "epoch": 0.33320596102407335,
786
+ "grad_norm": 0.11434526741504669,
787
+ "learning_rate": 1.9750968964807305e-05,
788
+ "loss": 0.861842155456543,
789
+ "step": 109
790
+ },
791
+ {
792
+ "epoch": 0.3362628964463126,
793
+ "grad_norm": 0.1159641221165657,
794
+ "learning_rate": 1.9739021632963584e-05,
795
+ "loss": 0.8987889289855957,
796
+ "step": 110
797
+ },
798
+ {
799
+ "epoch": 0.3393198318685518,
800
+ "grad_norm": 0.12371373921632767,
801
+ "learning_rate": 1.9726798173387417e-05,
802
+ "loss": 0.9710193872451782,
803
+ "step": 111
804
+ },
805
+ {
806
+ "epoch": 0.342376767290791,
807
+ "grad_norm": 0.11441531032323837,
808
+ "learning_rate": 1.97142989326471e-05,
809
+ "loss": 0.8199151158332825,
810
+ "step": 112
811
+ },
812
+ {
813
+ "epoch": 0.3454337027130302,
814
+ "grad_norm": 0.11842846125364304,
815
+ "learning_rate": 1.9701524265130088e-05,
816
+ "loss": 0.8845276236534119,
817
+ "step": 113
818
+ },
819
+ {
820
+ "epoch": 0.34849063813526937,
821
+ "grad_norm": 0.10813732445240021,
822
+ "learning_rate": 1.9688474533032916e-05,
823
+ "loss": 0.7964264750480652,
824
+ "step": 114
825
+ },
826
+ {
827
+ "epoch": 0.3515475735575086,
828
+ "grad_norm": 0.11050347238779068,
829
+ "learning_rate": 1.9675150106350957e-05,
830
+ "loss": 0.9630422592163086,
831
+ "step": 115
832
+ },
833
+ {
834
+ "epoch": 0.3546045089797478,
835
+ "grad_norm": 0.10537250339984894,
836
+ "learning_rate": 1.9661551362867926e-05,
837
+ "loss": 0.7706905007362366,
838
+ "step": 116
839
+ },
840
+ {
841
+ "epoch": 0.357661444401987,
842
+ "grad_norm": 0.11390368640422821,
843
+ "learning_rate": 1.9647678688145163e-05,
844
+ "loss": 0.8541204929351807,
845
+ "step": 117
846
+ },
847
+ {
848
+ "epoch": 0.3607183798242262,
849
+ "grad_norm": 0.10318922251462936,
850
+ "learning_rate": 1.963353247551069e-05,
851
+ "loss": 0.7400562763214111,
852
+ "step": 118
853
+ },
854
+ {
855
+ "epoch": 0.3637753152464654,
856
+ "grad_norm": 0.1347586214542389,
857
+ "learning_rate": 1.9619113126048086e-05,
858
+ "loss": 0.9232871532440186,
859
+ "step": 119
860
+ },
861
+ {
862
+ "epoch": 0.3668322506687046,
863
+ "grad_norm": 0.11458177119493484,
864
+ "learning_rate": 1.96044210485851e-05,
865
+ "loss": 0.833285927772522,
866
+ "step": 120
867
+ },
868
+ {
869
+ "epoch": 0.36988918609094384,
870
+ "grad_norm": 0.12361041456460953,
871
+ "learning_rate": 1.958945665968206e-05,
872
+ "loss": 0.7887391448020935,
873
+ "step": 121
874
+ },
875
+ {
876
+ "epoch": 0.372946121513183,
877
+ "grad_norm": 0.11985408514738083,
878
+ "learning_rate": 1.9574220383620054e-05,
879
+ "loss": 0.8206446170806885,
880
+ "step": 122
881
+ },
882
+ {
883
+ "epoch": 0.37600305693542224,
884
+ "grad_norm": 0.1355939507484436,
885
+ "learning_rate": 1.9558712652388932e-05,
886
+ "loss": 0.7648542523384094,
887
+ "step": 123
888
+ },
889
+ {
890
+ "epoch": 0.37905999235766147,
891
+ "grad_norm": 0.1229313388466835,
892
+ "learning_rate": 1.954293390567501e-05,
893
+ "loss": 0.8573335409164429,
894
+ "step": 124
895
+ },
896
+ {
897
+ "epoch": 0.38211692777990064,
898
+ "grad_norm": 0.11425124108791351,
899
+ "learning_rate": 1.9526884590848646e-05,
900
+ "loss": 0.7412531971931458,
901
+ "step": 125
902
+ },
903
+ {
904
+ "epoch": 0.38517386320213987,
905
+ "grad_norm": 0.12430041283369064,
906
+ "learning_rate": 1.9510565162951538e-05,
907
+ "loss": 0.8098543882369995,
908
+ "step": 126
909
+ },
910
+ {
911
+ "epoch": 0.38823079862437904,
912
+ "grad_norm": 0.12492368370294571,
913
+ "learning_rate": 1.9493976084683814e-05,
914
+ "loss": 0.8814713954925537,
915
+ "step": 127
916
+ },
917
+ {
918
+ "epoch": 0.39128773404661826,
919
+ "grad_norm": 0.14428824186325073,
920
+ "learning_rate": 1.9477117826390934e-05,
921
+ "loss": 0.8231979608535767,
922
+ "step": 128
923
+ },
924
+ {
925
+ "epoch": 0.3943446694688575,
926
+ "grad_norm": 0.12010085582733154,
927
+ "learning_rate": 1.9459990866050337e-05,
928
+ "loss": 0.7015627026557922,
929
+ "step": 129
930
+ },
931
+ {
932
+ "epoch": 0.39740160489109666,
933
+ "grad_norm": 0.11819776892662048,
934
+ "learning_rate": 1.9442595689257898e-05,
935
+ "loss": 0.8086729645729065,
936
+ "step": 130
937
+ },
938
+ {
939
+ "epoch": 0.4004585403133359,
940
+ "grad_norm": 0.12211033701896667,
941
+ "learning_rate": 1.9424932789214158e-05,
942
+ "loss": 0.8234002590179443,
943
+ "step": 131
944
+ },
945
+ {
946
+ "epoch": 0.4035154757355751,
947
+ "grad_norm": 0.14926476776599884,
948
+ "learning_rate": 1.9407002666710334e-05,
949
+ "loss": 0.874608039855957,
950
+ "step": 132
951
+ },
952
+ {
953
+ "epoch": 0.4065724111578143,
954
+ "grad_norm": 0.13012923300266266,
955
+ "learning_rate": 1.9388805830114132e-05,
956
+ "loss": 0.8491607904434204,
957
+ "step": 133
958
+ },
959
+ {
960
+ "epoch": 0.4096293465800535,
961
+ "grad_norm": 0.12012261897325516,
962
+ "learning_rate": 1.937034279535533e-05,
963
+ "loss": 0.7269159555435181,
964
+ "step": 134
965
+ },
966
+ {
967
+ "epoch": 0.4126862820022927,
968
+ "grad_norm": 0.15302567183971405,
969
+ "learning_rate": 1.9351614085911134e-05,
970
+ "loss": 0.8560839891433716,
971
+ "step": 135
972
+ },
973
+ {
974
+ "epoch": 0.4157432174245319,
975
+ "grad_norm": 0.12234190106391907,
976
+ "learning_rate": 1.933262023279137e-05,
977
+ "loss": 0.8211904764175415,
978
+ "step": 136
979
+ },
980
+ {
981
+ "epoch": 0.41880015284677113,
982
+ "grad_norm": 0.14427296817302704,
983
+ "learning_rate": 1.9313361774523387e-05,
984
+ "loss": 0.8500057458877563,
985
+ "step": 137
986
+ },
987
+ {
988
+ "epoch": 0.4218570882690103,
989
+ "grad_norm": 0.1314094066619873,
990
+ "learning_rate": 1.929383925713682e-05,
991
+ "loss": 0.7589091658592224,
992
+ "step": 138
993
+ },
994
+ {
995
+ "epoch": 0.42491402369124953,
996
+ "grad_norm": 0.1576734483242035,
997
+ "learning_rate": 1.92740532341481e-05,
998
+ "loss": 0.7581073641777039,
999
+ "step": 139
1000
+ },
1001
+ {
1002
+ "epoch": 0.4279709591134887,
1003
+ "grad_norm": 0.15788713097572327,
1004
+ "learning_rate": 1.925400426654475e-05,
1005
+ "loss": 0.809050440788269,
1006
+ "step": 140
1007
+ },
1008
+ {
1009
+ "epoch": 0.43102789453572793,
1010
+ "grad_norm": 0.13364559412002563,
1011
+ "learning_rate": 1.9233692922769497e-05,
1012
+ "loss": 0.7990086078643799,
1013
+ "step": 141
1014
+ },
1015
+ {
1016
+ "epoch": 0.43408482995796716,
1017
+ "grad_norm": 0.14786465466022491,
1018
+ "learning_rate": 1.921311977870413e-05,
1019
+ "loss": 0.8675815463066101,
1020
+ "step": 142
1021
+ },
1022
+ {
1023
+ "epoch": 0.4371417653802063,
1024
+ "grad_norm": 0.14621882140636444,
1025
+ "learning_rate": 1.9192285417653208e-05,
1026
+ "loss": 0.8713765740394592,
1027
+ "step": 143
1028
+ },
1029
+ {
1030
+ "epoch": 0.44019870080244555,
1031
+ "grad_norm": 0.12874048948287964,
1032
+ "learning_rate": 1.917119043032749e-05,
1033
+ "loss": 0.7361871004104614,
1034
+ "step": 144
1035
+ },
1036
+ {
1037
+ "epoch": 0.4432556362246848,
1038
+ "grad_norm": 0.12183775007724762,
1039
+ "learning_rate": 1.9149835414827193e-05,
1040
+ "loss": 0.7311941385269165,
1041
+ "step": 145
1042
+ },
1043
+ {
1044
+ "epoch": 0.44631257164692395,
1045
+ "grad_norm": 0.1397160291671753,
1046
+ "learning_rate": 1.912822097662505e-05,
1047
+ "loss": 0.8189159035682678,
1048
+ "step": 146
1049
+ },
1050
+ {
1051
+ "epoch": 0.4493695070691632,
1052
+ "grad_norm": 0.1458273082971573,
1053
+ "learning_rate": 1.9106347728549134e-05,
1054
+ "loss": 0.8288135528564453,
1055
+ "step": 147
1056
+ },
1057
+ {
1058
+ "epoch": 0.45242644249140235,
1059
+ "grad_norm": 0.16898781061172485,
1060
+ "learning_rate": 1.908421629076547e-05,
1061
+ "loss": 0.7878037095069885,
1062
+ "step": 148
1063
+ },
1064
+ {
1065
+ "epoch": 0.4554833779136416,
1066
+ "grad_norm": 0.1638474315404892,
1067
+ "learning_rate": 1.9061827290760466e-05,
1068
+ "loss": 0.8059952259063721,
1069
+ "step": 149
1070
+ },
1071
+ {
1072
+ "epoch": 0.4585403133358808,
1073
+ "grad_norm": 0.14130882918834686,
1074
+ "learning_rate": 1.9039181363323128e-05,
1075
+ "loss": 0.7346830368041992,
1076
+ "step": 150
1077
+ },
1078
+ {
1079
+ "epoch": 0.4585403133358808,
1080
+ "eval_loss": 0.7979016900062561,
1081
+ "eval_runtime": 828.6295,
1082
+ "eval_samples_per_second": 0.728,
1083
+ "eval_steps_per_second": 0.728,
1084
+ "step": 150
1085
+ },
1086
+ {
1087
+ "epoch": 0.46159724875811997,
1088
+ "grad_norm": 0.14427433907985687,
1089
+ "learning_rate": 1.9016279150527044e-05,
1090
+ "loss": 0.7583403587341309,
1091
+ "step": 151
1092
+ },
1093
+ {
1094
+ "epoch": 0.4646541841803592,
1095
+ "grad_norm": 0.1515798568725586,
1096
+ "learning_rate": 1.8993121301712194e-05,
1097
+ "loss": 0.7908380031585693,
1098
+ "step": 152
1099
+ },
1100
+ {
1101
+ "epoch": 0.46771111960259837,
1102
+ "grad_norm": 0.14444488286972046,
1103
+ "learning_rate": 1.896970847346653e-05,
1104
+ "loss": 0.7916130423545837,
1105
+ "step": 153
1106
+ },
1107
+ {
1108
+ "epoch": 0.4707680550248376,
1109
+ "grad_norm": 0.1460912823677063,
1110
+ "learning_rate": 1.8946041329607364e-05,
1111
+ "loss": 0.7750643491744995,
1112
+ "step": 154
1113
+ },
1114
+ {
1115
+ "epoch": 0.4738249904470768,
1116
+ "grad_norm": 0.13896244764328003,
1117
+ "learning_rate": 1.892212054116255e-05,
1118
+ "loss": 0.8059666156768799,
1119
+ "step": 155
1120
+ },
1121
+ {
1122
+ "epoch": 0.476881925869316,
1123
+ "grad_norm": 0.16133630275726318,
1124
+ "learning_rate": 1.889794678635145e-05,
1125
+ "loss": 0.8327827453613281,
1126
+ "step": 156
1127
+ },
1128
+ {
1129
+ "epoch": 0.4799388612915552,
1130
+ "grad_norm": 0.1474636346101761,
1131
+ "learning_rate": 1.8873520750565716e-05,
1132
+ "loss": 0.8498989343643188,
1133
+ "step": 157
1134
+ },
1135
+ {
1136
+ "epoch": 0.48299579671379445,
1137
+ "grad_norm": 0.17222349345684052,
1138
+ "learning_rate": 1.884884312634985e-05,
1139
+ "loss": 0.7750177979469299,
1140
+ "step": 158
1141
+ },
1142
+ {
1143
+ "epoch": 0.4860527321360336,
1144
+ "grad_norm": 0.15558090806007385,
1145
+ "learning_rate": 1.8823914613381568e-05,
1146
+ "loss": 0.7326169013977051,
1147
+ "step": 159
1148
+ },
1149
+ {
1150
+ "epoch": 0.48910966755827284,
1151
+ "grad_norm": 0.13808321952819824,
1152
+ "learning_rate": 1.8798735918451963e-05,
1153
+ "loss": 0.8308709859848022,
1154
+ "step": 160
1155
+ },
1156
+ {
1157
+ "epoch": 0.492166602980512,
1158
+ "grad_norm": 0.1761898398399353,
1159
+ "learning_rate": 1.8773307755445468e-05,
1160
+ "loss": 0.7805465459823608,
1161
+ "step": 161
1162
+ },
1163
+ {
1164
+ "epoch": 0.49522353840275124,
1165
+ "grad_norm": 0.160477414727211,
1166
+ "learning_rate": 1.874763084531961e-05,
1167
+ "loss": 0.8538846969604492,
1168
+ "step": 162
1169
+ },
1170
+ {
1171
+ "epoch": 0.49828047382499047,
1172
+ "grad_norm": 0.15238745510578156,
1173
+ "learning_rate": 1.872170591608459e-05,
1174
+ "loss": 0.8801217675209045,
1175
+ "step": 163
1176
+ },
1177
+ {
1178
+ "epoch": 0.5013374092472297,
1179
+ "grad_norm": 0.1567080318927765,
1180
+ "learning_rate": 1.86955337027826e-05,
1181
+ "loss": 0.7205259799957275,
1182
+ "step": 164
1183
+ },
1184
+ {
1185
+ "epoch": 0.5043943446694689,
1186
+ "grad_norm": 0.13637851178646088,
1187
+ "learning_rate": 1.866911494746702e-05,
1188
+ "loss": 0.7636491656303406,
1189
+ "step": 165
1190
+ },
1191
+ {
1192
+ "epoch": 0.507451280091708,
1193
+ "grad_norm": 0.15563489496707916,
1194
+ "learning_rate": 1.8642450399181373e-05,
1195
+ "loss": 0.7982497811317444,
1196
+ "step": 166
1197
+ },
1198
+ {
1199
+ "epoch": 0.5105082155139473,
1200
+ "grad_norm": 0.15503396093845367,
1201
+ "learning_rate": 1.8615540813938063e-05,
1202
+ "loss": 0.8737778067588806,
1203
+ "step": 167
1204
+ },
1205
+ {
1206
+ "epoch": 0.5135651509361865,
1207
+ "grad_norm": 0.16095557808876038,
1208
+ "learning_rate": 1.8588386954696972e-05,
1209
+ "loss": 0.796604335308075,
1210
+ "step": 168
1211
+ },
1212
+ {
1213
+ "epoch": 0.5166220863584257,
1214
+ "grad_norm": 0.1713593453168869,
1215
+ "learning_rate": 1.856098959134381e-05,
1216
+ "loss": 0.8247392177581787,
1217
+ "step": 169
1218
+ },
1219
+ {
1220
+ "epoch": 0.5196790217806648,
1221
+ "grad_norm": 0.18239113688468933,
1222
+ "learning_rate": 1.8533349500668295e-05,
1223
+ "loss": 0.7838484644889832,
1224
+ "step": 170
1225
+ },
1226
+ {
1227
+ "epoch": 0.5227359572029041,
1228
+ "grad_norm": 0.15745767951011658,
1229
+ "learning_rate": 1.850546746634211e-05,
1230
+ "loss": 0.7856907248497009,
1231
+ "step": 171
1232
+ },
1233
+ {
1234
+ "epoch": 0.5257928926251433,
1235
+ "grad_norm": 0.16820666193962097,
1236
+ "learning_rate": 1.8477344278896708e-05,
1237
+ "loss": 0.7829679846763611,
1238
+ "step": 172
1239
+ },
1240
+ {
1241
+ "epoch": 0.5288498280473825,
1242
+ "grad_norm": 0.16975544393062592,
1243
+ "learning_rate": 1.84489807357009e-05,
1244
+ "loss": 0.7374375462532043,
1245
+ "step": 173
1246
+ },
1247
+ {
1248
+ "epoch": 0.5319067634696217,
1249
+ "grad_norm": 0.167228102684021,
1250
+ "learning_rate": 1.8420377640938204e-05,
1251
+ "loss": 0.712837815284729,
1252
+ "step": 174
1253
+ },
1254
+ {
1255
+ "epoch": 0.5349636988918609,
1256
+ "grad_norm": 0.15955154597759247,
1257
+ "learning_rate": 1.839153580558411e-05,
1258
+ "loss": 0.7645693421363831,
1259
+ "step": 175
1260
+ },
1261
+ {
1262
+ "epoch": 0.5380206343141001,
1263
+ "grad_norm": 0.18378689885139465,
1264
+ "learning_rate": 1.8362456047383032e-05,
1265
+ "loss": 0.7974956631660461,
1266
+ "step": 176
1267
+ },
1268
+ {
1269
+ "epoch": 0.5410775697363394,
1270
+ "grad_norm": 0.15777672827243805,
1271
+ "learning_rate": 1.833313919082515e-05,
1272
+ "loss": 0.8957571983337402,
1273
+ "step": 177
1274
+ },
1275
+ {
1276
+ "epoch": 0.5441345051585785,
1277
+ "grad_norm": 0.15292386710643768,
1278
+ "learning_rate": 1.8303586067123028e-05,
1279
+ "loss": 0.7635619044303894,
1280
+ "step": 178
1281
+ },
1282
+ {
1283
+ "epoch": 0.5471914405808177,
1284
+ "grad_norm": 0.178152397274971,
1285
+ "learning_rate": 1.8273797514188043e-05,
1286
+ "loss": 0.7849246263504028,
1287
+ "step": 179
1288
+ },
1289
+ {
1290
+ "epoch": 0.550248376003057,
1291
+ "grad_norm": 0.15916013717651367,
1292
+ "learning_rate": 1.824377437660663e-05,
1293
+ "loss": 0.6975343227386475,
1294
+ "step": 180
1295
+ },
1296
+ {
1297
+ "epoch": 0.5533053114252962,
1298
+ "grad_norm": 0.18172231316566467,
1299
+ "learning_rate": 1.821351750561634e-05,
1300
+ "loss": 0.7675164341926575,
1301
+ "step": 181
1302
+ },
1303
+ {
1304
+ "epoch": 0.5563622468475353,
1305
+ "grad_norm": 0.16241903603076935,
1306
+ "learning_rate": 1.818302775908169e-05,
1307
+ "loss": 0.7950343489646912,
1308
+ "step": 182
1309
+ },
1310
+ {
1311
+ "epoch": 0.5594191822697746,
1312
+ "grad_norm": 0.18727579712867737,
1313
+ "learning_rate": 1.8152306001469875e-05,
1314
+ "loss": 0.787315309047699,
1315
+ "step": 183
1316
+ },
1317
+ {
1318
+ "epoch": 0.5624761176920138,
1319
+ "grad_norm": 0.1627933531999588,
1320
+ "learning_rate": 1.8121353103826213e-05,
1321
+ "loss": 0.7141211628913879,
1322
+ "step": 184
1323
+ },
1324
+ {
1325
+ "epoch": 0.565533053114253,
1326
+ "grad_norm": 0.4369247555732727,
1327
+ "learning_rate": 1.8090169943749477e-05,
1328
+ "loss": 0.8476608395576477,
1329
+ "step": 185
1330
+ },
1331
+ {
1332
+ "epoch": 0.5685899885364921,
1333
+ "grad_norm": 0.16494786739349365,
1334
+ "learning_rate": 1.8058757405367003e-05,
1335
+ "loss": 0.720562756061554,
1336
+ "step": 186
1337
+ },
1338
+ {
1339
+ "epoch": 0.5716469239587314,
1340
+ "grad_norm": 0.175015389919281,
1341
+ "learning_rate": 1.8027116379309637e-05,
1342
+ "loss": 0.7589252591133118,
1343
+ "step": 187
1344
+ },
1345
+ {
1346
+ "epoch": 0.5747038593809706,
1347
+ "grad_norm": 0.1769978553056717,
1348
+ "learning_rate": 1.799524776268646e-05,
1349
+ "loss": 0.7644155621528625,
1350
+ "step": 188
1351
+ },
1352
+ {
1353
+ "epoch": 0.5777607948032097,
1354
+ "grad_norm": 0.18481792509555817,
1355
+ "learning_rate": 1.796315245905936e-05,
1356
+ "loss": 0.7885835766792297,
1357
+ "step": 189
1358
+ },
1359
+ {
1360
+ "epoch": 0.580817730225449,
1361
+ "grad_norm": 0.1668689250946045,
1362
+ "learning_rate": 1.7930831378417437e-05,
1363
+ "loss": 0.7377231121063232,
1364
+ "step": 190
1365
+ },
1366
+ {
1367
+ "epoch": 0.5838746656476882,
1368
+ "grad_norm": 0.178734689950943,
1369
+ "learning_rate": 1.7898285437151163e-05,
1370
+ "loss": 0.7388894557952881,
1371
+ "step": 191
1372
+ },
1373
+ {
1374
+ "epoch": 0.5869316010699274,
1375
+ "grad_norm": 0.1740068644285202,
1376
+ "learning_rate": 1.786551555802643e-05,
1377
+ "loss": 0.8209859728813171,
1378
+ "step": 192
1379
+ },
1380
+ {
1381
+ "epoch": 0.5899885364921666,
1382
+ "grad_norm": 0.19211041927337646,
1383
+ "learning_rate": 1.783252267015837e-05,
1384
+ "loss": 0.7305737733840942,
1385
+ "step": 193
1386
+ },
1387
+ {
1388
+ "epoch": 0.5930454719144058,
1389
+ "grad_norm": 0.16644936800003052,
1390
+ "learning_rate": 1.779930770898503e-05,
1391
+ "loss": 0.7760804891586304,
1392
+ "step": 194
1393
+ },
1394
+ {
1395
+ "epoch": 0.596102407336645,
1396
+ "grad_norm": 0.1773686707019806,
1397
+ "learning_rate": 1.776587161624083e-05,
1398
+ "loss": 0.7879236936569214,
1399
+ "step": 195
1400
+ },
1401
+ {
1402
+ "epoch": 0.5991593427588843,
1403
+ "grad_norm": 0.17508819699287415,
1404
+ "learning_rate": 1.7732215339929874e-05,
1405
+ "loss": 0.7307407259941101,
1406
+ "step": 196
1407
+ },
1408
+ {
1409
+ "epoch": 0.6022162781811234,
1410
+ "grad_norm": 0.17211101949214935,
1411
+ "learning_rate": 1.7698339834299064e-05,
1412
+ "loss": 0.7293214797973633,
1413
+ "step": 197
1414
+ },
1415
+ {
1416
+ "epoch": 0.6052732136033626,
1417
+ "grad_norm": 0.18085215985774994,
1418
+ "learning_rate": 1.7664246059811058e-05,
1419
+ "loss": 0.763083279132843,
1420
+ "step": 198
1421
+ },
1422
+ {
1423
+ "epoch": 0.6083301490256018,
1424
+ "grad_norm": 0.20243075489997864,
1425
+ "learning_rate": 1.7629934983117025e-05,
1426
+ "loss": 0.7372676134109497,
1427
+ "step": 199
1428
+ },
1429
+ {
1430
+ "epoch": 0.6113870844478411,
1431
+ "grad_norm": 0.18152795732021332,
1432
+ "learning_rate": 1.759540757702924e-05,
1433
+ "loss": 0.7121898531913757,
1434
+ "step": 200
1435
+ },
1436
+ {
1437
+ "epoch": 0.6113870844478411,
1438
+ "eval_loss": 0.7551760673522949,
1439
+ "eval_runtime": 900.209,
1440
+ "eval_samples_per_second": 0.67,
1441
+ "eval_steps_per_second": 0.67,
1442
+ "step": 200
1443
+ }
1444
+ ],
1445
+ "logging_steps": 1,
1446
+ "max_steps": 656,
1447
+ "num_input_tokens_seen": 0,
1448
+ "num_train_epochs": 2,
1449
+ "save_steps": 100,
1450
+ "stateful_callbacks": {
1451
+ "TrainerControl": {
1452
+ "args": {
1453
+ "should_epoch_stop": false,
1454
+ "should_evaluate": false,
1455
+ "should_log": false,
1456
+ "should_save": true,
1457
+ "should_training_stop": false
1458
+ },
1459
+ "attributes": {}
1460
+ }
1461
+ },
1462
+ "total_flos": 1.1042918995329024e+18,
1463
+ "train_batch_size": 1,
1464
+ "trial_name": null,
1465
+ "trial_params": null
1466
+ }
cpt_qwen_14B/checkpoints/checkpoint-200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6a8e308e47eb936f678712445b19ddc52638f354c37c813ecaa432f69120a2e
3
+ size 5201
cpt_qwen_14B/checkpoints/checkpoint-300/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/Models/Qwen2.5-Coder-14B
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:/workspace/Models/Qwen2.5-Coder-14B
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
cpt_qwen_14B/checkpoints/checkpoint-300/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "/workspace/Models/Qwen2.5-Coder-14B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "v_proj",
33
+ "q_proj",
34
+ "o_proj",
35
+ "k_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
cpt_qwen_14B/checkpoints/checkpoint-300/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c370210851f30b5de992e131f4276973cf2b5feb6969fb7638ee7128b6b9674
3
+ size 201378736
cpt_qwen_14B/checkpoints/checkpoint-300/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
cpt_qwen_14B/checkpoints/checkpoint-300/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ad69f3254fd02c5361123fcfc2dea4b905fca69152def982a3356e7b8afd4ed
3
+ size 102698855
cpt_qwen_14B/checkpoints/checkpoint-300/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11c8108834e0d26f3ed6483fb470f43109ba4656c99180d6c32043763dd0a2df
3
+ size 14645
cpt_qwen_14B/checkpoints/checkpoint-300/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dd5d6b57b6230ca0af9ae6da5d18fafe1b14b1ea92b2fe466790a371d1f85fe
3
+ size 1465
cpt_qwen_14B/checkpoints/checkpoint-300/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
3
+ size 11421892
cpt_qwen_14B/checkpoints/checkpoint-300/tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|endoftext|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": true,
24
+ "model_max_length": 32768,
25
+ "pad_token": "<|endoftext|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }
cpt_qwen_14B/checkpoints/checkpoint-300/trainer_state.json ADDED
@@ -0,0 +1,2182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 300,
3
+ "best_metric": 0.7063615918159485,
4
+ "best_model_checkpoint": "runs/cpt_run_14b/checkpoints/checkpoint-300",
5
+ "epoch": 0.9170806266717616,
6
+ "eval_steps": 50,
7
+ "global_step": 300,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.003056935422239205,
14
+ "grad_norm": 0.06516239047050476,
15
+ "learning_rate": 0.0,
16
+ "loss": 1.138384461402893,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.00611387084447841,
21
+ "grad_norm": 0.05343673378229141,
22
+ "learning_rate": 3.0303030303030305e-07,
23
+ "loss": 0.983342707157135,
24
+ "step": 2
25
+ },
26
+ {
27
+ "epoch": 0.009170806266717615,
28
+ "grad_norm": 0.05608418956398964,
29
+ "learning_rate": 6.060606060606061e-07,
30
+ "loss": 1.0762118101119995,
31
+ "step": 3
32
+ },
33
+ {
34
+ "epoch": 0.01222774168895682,
35
+ "grad_norm": 0.06523486226797104,
36
+ "learning_rate": 9.090909090909091e-07,
37
+ "loss": 1.084489345550537,
38
+ "step": 4
39
+ },
40
+ {
41
+ "epoch": 0.015284677111196026,
42
+ "grad_norm": 0.06582186371088028,
43
+ "learning_rate": 1.2121212121212122e-06,
44
+ "loss": 1.2037022113800049,
45
+ "step": 5
46
+ },
47
+ {
48
+ "epoch": 0.01834161253343523,
49
+ "grad_norm": 0.06097998470067978,
50
+ "learning_rate": 1.5151515151515152e-06,
51
+ "loss": 1.10005784034729,
52
+ "step": 6
53
+ },
54
+ {
55
+ "epoch": 0.021398547955674436,
56
+ "grad_norm": 0.10365528613328934,
57
+ "learning_rate": 1.8181818181818183e-06,
58
+ "loss": 1.0895193815231323,
59
+ "step": 7
60
+ },
61
+ {
62
+ "epoch": 0.02445548337791364,
63
+ "grad_norm": 0.06312141567468643,
64
+ "learning_rate": 2.1212121212121216e-06,
65
+ "loss": 1.0593242645263672,
66
+ "step": 8
67
+ },
68
+ {
69
+ "epoch": 0.027512418800152847,
70
+ "grad_norm": 0.05508403480052948,
71
+ "learning_rate": 2.4242424242424244e-06,
72
+ "loss": 0.9772955179214478,
73
+ "step": 9
74
+ },
75
+ {
76
+ "epoch": 0.030569354222392053,
77
+ "grad_norm": 0.06006711348891258,
78
+ "learning_rate": 2.7272727272727272e-06,
79
+ "loss": 1.084238886833191,
80
+ "step": 10
81
+ },
82
+ {
83
+ "epoch": 0.033626289644631255,
84
+ "grad_norm": 0.0588749423623085,
85
+ "learning_rate": 3.0303030303030305e-06,
86
+ "loss": 1.0786534547805786,
87
+ "step": 11
88
+ },
89
+ {
90
+ "epoch": 0.03668322506687046,
91
+ "grad_norm": 0.046551357954740524,
92
+ "learning_rate": 3.3333333333333333e-06,
93
+ "loss": 1.0370622873306274,
94
+ "step": 12
95
+ },
96
+ {
97
+ "epoch": 0.039740160489109666,
98
+ "grad_norm": 0.061659567058086395,
99
+ "learning_rate": 3.6363636363636366e-06,
100
+ "loss": 1.0646986961364746,
101
+ "step": 13
102
+ },
103
+ {
104
+ "epoch": 0.04279709591134887,
105
+ "grad_norm": 0.06007347255945206,
106
+ "learning_rate": 3.93939393939394e-06,
107
+ "loss": 1.0311307907104492,
108
+ "step": 14
109
+ },
110
+ {
111
+ "epoch": 0.04585403133358808,
112
+ "grad_norm": 0.07314135134220123,
113
+ "learning_rate": 4.242424242424243e-06,
114
+ "loss": 1.1300500631332397,
115
+ "step": 15
116
+ },
117
+ {
118
+ "epoch": 0.04891096675582728,
119
+ "grad_norm": 0.060934022068977356,
120
+ "learning_rate": 4.5454545454545455e-06,
121
+ "loss": 1.0197452306747437,
122
+ "step": 16
123
+ },
124
+ {
125
+ "epoch": 0.05196790217806649,
126
+ "grad_norm": 0.056856051087379456,
127
+ "learning_rate": 4.848484848484849e-06,
128
+ "loss": 1.0438549518585205,
129
+ "step": 17
130
+ },
131
+ {
132
+ "epoch": 0.055024837600305694,
133
+ "grad_norm": 0.05908689647912979,
134
+ "learning_rate": 5.151515151515152e-06,
135
+ "loss": 1.0398856401443481,
136
+ "step": 18
137
+ },
138
+ {
139
+ "epoch": 0.0580817730225449,
140
+ "grad_norm": 0.07411840558052063,
141
+ "learning_rate": 5.4545454545454545e-06,
142
+ "loss": 1.107885479927063,
143
+ "step": 19
144
+ },
145
+ {
146
+ "epoch": 0.061138708444784105,
147
+ "grad_norm": 0.0749165341258049,
148
+ "learning_rate": 5.7575757575757586e-06,
149
+ "loss": 1.1060967445373535,
150
+ "step": 20
151
+ },
152
+ {
153
+ "epoch": 0.06419564386702331,
154
+ "grad_norm": 0.06720177084207535,
155
+ "learning_rate": 6.060606060606061e-06,
156
+ "loss": 1.0471720695495605,
157
+ "step": 21
158
+ },
159
+ {
160
+ "epoch": 0.06725257928926251,
161
+ "grad_norm": 0.05990725755691528,
162
+ "learning_rate": 6.363636363636364e-06,
163
+ "loss": 1.0944981575012207,
164
+ "step": 22
165
+ },
166
+ {
167
+ "epoch": 0.07030951471150172,
168
+ "grad_norm": 0.06672193855047226,
169
+ "learning_rate": 6.666666666666667e-06,
170
+ "loss": 1.1477092504501343,
171
+ "step": 23
172
+ },
173
+ {
174
+ "epoch": 0.07336645013374092,
175
+ "grad_norm": 0.06145205348730087,
176
+ "learning_rate": 6.969696969696971e-06,
177
+ "loss": 1.0591784715652466,
178
+ "step": 24
179
+ },
180
+ {
181
+ "epoch": 0.07642338555598013,
182
+ "grad_norm": 0.0757482647895813,
183
+ "learning_rate": 7.272727272727273e-06,
184
+ "loss": 1.0500165224075317,
185
+ "step": 25
186
+ },
187
+ {
188
+ "epoch": 0.07948032097821933,
189
+ "grad_norm": 0.07848478108644485,
190
+ "learning_rate": 7.5757575757575764e-06,
191
+ "loss": 1.0747522115707397,
192
+ "step": 26
193
+ },
194
+ {
195
+ "epoch": 0.08253725640045854,
196
+ "grad_norm": 0.07740631699562073,
197
+ "learning_rate": 7.87878787878788e-06,
198
+ "loss": 1.132310152053833,
199
+ "step": 27
200
+ },
201
+ {
202
+ "epoch": 0.08559419182269774,
203
+ "grad_norm": 0.07476603239774704,
204
+ "learning_rate": 8.181818181818183e-06,
205
+ "loss": 1.0339502096176147,
206
+ "step": 28
207
+ },
208
+ {
209
+ "epoch": 0.08865112724493696,
210
+ "grad_norm": 0.0779196098446846,
211
+ "learning_rate": 8.484848484848486e-06,
212
+ "loss": 1.1047282218933105,
213
+ "step": 29
214
+ },
215
+ {
216
+ "epoch": 0.09170806266717615,
217
+ "grad_norm": 0.06962384283542633,
218
+ "learning_rate": 8.787878787878788e-06,
219
+ "loss": 1.004916787147522,
220
+ "step": 30
221
+ },
222
+ {
223
+ "epoch": 0.09476499808941537,
224
+ "grad_norm": 0.06369175016880035,
225
+ "learning_rate": 9.090909090909091e-06,
226
+ "loss": 0.9296417832374573,
227
+ "step": 31
228
+ },
229
+ {
230
+ "epoch": 0.09782193351165457,
231
+ "grad_norm": 0.07470260560512543,
232
+ "learning_rate": 9.393939393939396e-06,
233
+ "loss": 1.0721708536148071,
234
+ "step": 32
235
+ },
236
+ {
237
+ "epoch": 0.10087886893389378,
238
+ "grad_norm": 0.07948213815689087,
239
+ "learning_rate": 9.696969696969698e-06,
240
+ "loss": 1.0350117683410645,
241
+ "step": 33
242
+ },
243
+ {
244
+ "epoch": 0.10393580435613298,
245
+ "grad_norm": 0.07066022604703903,
246
+ "learning_rate": 1e-05,
247
+ "loss": 1.026305913925171,
248
+ "step": 34
249
+ },
250
+ {
251
+ "epoch": 0.10699273977837218,
252
+ "grad_norm": 0.07774543762207031,
253
+ "learning_rate": 1.0303030303030304e-05,
254
+ "loss": 1.0509816408157349,
255
+ "step": 35
256
+ },
257
+ {
258
+ "epoch": 0.11004967520061139,
259
+ "grad_norm": 0.07501248270273209,
260
+ "learning_rate": 1.0606060606060606e-05,
261
+ "loss": 1.0011574029922485,
262
+ "step": 36
263
+ },
264
+ {
265
+ "epoch": 0.11310661062285059,
266
+ "grad_norm": 0.6622501611709595,
267
+ "learning_rate": 1.0909090909090909e-05,
268
+ "loss": 0.9754424691200256,
269
+ "step": 37
270
+ },
271
+ {
272
+ "epoch": 0.1161635460450898,
273
+ "grad_norm": 0.07566080242395401,
274
+ "learning_rate": 1.1212121212121212e-05,
275
+ "loss": 1.0342774391174316,
276
+ "step": 38
277
+ },
278
+ {
279
+ "epoch": 0.119220481467329,
280
+ "grad_norm": 0.07573831081390381,
281
+ "learning_rate": 1.1515151515151517e-05,
282
+ "loss": 0.9714518785476685,
283
+ "step": 39
284
+ },
285
+ {
286
+ "epoch": 0.12227741688956821,
287
+ "grad_norm": 0.08083852380514145,
288
+ "learning_rate": 1.181818181818182e-05,
289
+ "loss": 1.1050316095352173,
290
+ "step": 40
291
+ },
292
+ {
293
+ "epoch": 0.12533435231180742,
294
+ "grad_norm": 0.08540588617324829,
295
+ "learning_rate": 1.2121212121212122e-05,
296
+ "loss": 1.0871070623397827,
297
+ "step": 41
298
+ },
299
+ {
300
+ "epoch": 0.12839128773404662,
301
+ "grad_norm": 0.07391592115163803,
302
+ "learning_rate": 1.2424242424242425e-05,
303
+ "loss": 1.0206722021102905,
304
+ "step": 42
305
+ },
306
+ {
307
+ "epoch": 0.13144822315628582,
308
+ "grad_norm": 0.07063689082860947,
309
+ "learning_rate": 1.2727272727272728e-05,
310
+ "loss": 0.9775047898292542,
311
+ "step": 43
312
+ },
313
+ {
314
+ "epoch": 0.13450515857852502,
315
+ "grad_norm": 0.07288888841867447,
316
+ "learning_rate": 1.3030303030303032e-05,
317
+ "loss": 1.1132858991622925,
318
+ "step": 44
319
+ },
320
+ {
321
+ "epoch": 0.13756209400076425,
322
+ "grad_norm": 0.07641777396202087,
323
+ "learning_rate": 1.3333333333333333e-05,
324
+ "loss": 1.0707701444625854,
325
+ "step": 45
326
+ },
327
+ {
328
+ "epoch": 0.14061902942300344,
329
+ "grad_norm": 0.06990326195955276,
330
+ "learning_rate": 1.3636363636363637e-05,
331
+ "loss": 0.9328265190124512,
332
+ "step": 46
333
+ },
334
+ {
335
+ "epoch": 0.14367596484524264,
336
+ "grad_norm": 0.0834241658449173,
337
+ "learning_rate": 1.3939393939393942e-05,
338
+ "loss": 1.0131721496582031,
339
+ "step": 47
340
+ },
341
+ {
342
+ "epoch": 0.14673290026748184,
343
+ "grad_norm": 0.0714937075972557,
344
+ "learning_rate": 1.4242424242424245e-05,
345
+ "loss": 0.940493106842041,
346
+ "step": 48
347
+ },
348
+ {
349
+ "epoch": 0.14978983568972107,
350
+ "grad_norm": 0.07770547270774841,
351
+ "learning_rate": 1.4545454545454546e-05,
352
+ "loss": 1.0435771942138672,
353
+ "step": 49
354
+ },
355
+ {
356
+ "epoch": 0.15284677111196027,
357
+ "grad_norm": 0.07950945198535919,
358
+ "learning_rate": 1.484848484848485e-05,
359
+ "loss": 1.0382137298583984,
360
+ "step": 50
361
+ },
362
+ {
363
+ "epoch": 0.15284677111196027,
364
+ "eval_loss": 1.0129202604293823,
365
+ "eval_runtime": 724.3664,
366
+ "eval_samples_per_second": 0.832,
367
+ "eval_steps_per_second": 0.832,
368
+ "step": 50
369
+ },
370
+ {
371
+ "epoch": 0.15590370653419947,
372
+ "grad_norm": 0.06961936503648758,
373
+ "learning_rate": 1.5151515151515153e-05,
374
+ "loss": 0.9690049886703491,
375
+ "step": 51
376
+ },
377
+ {
378
+ "epoch": 0.15896064195643866,
379
+ "grad_norm": 0.069523885846138,
380
+ "learning_rate": 1.5454545454545454e-05,
381
+ "loss": 0.9830482006072998,
382
+ "step": 52
383
+ },
384
+ {
385
+ "epoch": 0.16201757737867786,
386
+ "grad_norm": 0.0764622762799263,
387
+ "learning_rate": 1.575757575757576e-05,
388
+ "loss": 1.0895472764968872,
389
+ "step": 53
390
+ },
391
+ {
392
+ "epoch": 0.1650745128009171,
393
+ "grad_norm": 0.1413721889257431,
394
+ "learning_rate": 1.606060606060606e-05,
395
+ "loss": 1.0354574918746948,
396
+ "step": 54
397
+ },
398
+ {
399
+ "epoch": 0.1681314482231563,
400
+ "grad_norm": 0.06818042695522308,
401
+ "learning_rate": 1.6363636363636366e-05,
402
+ "loss": 0.8534265160560608,
403
+ "step": 55
404
+ },
405
+ {
406
+ "epoch": 0.1711883836453955,
407
+ "grad_norm": 0.0722246989607811,
408
+ "learning_rate": 1.6666666666666667e-05,
409
+ "loss": 0.9580274820327759,
410
+ "step": 56
411
+ },
412
+ {
413
+ "epoch": 0.17424531906763469,
414
+ "grad_norm": 0.07113443315029144,
415
+ "learning_rate": 1.6969696969696972e-05,
416
+ "loss": 1.0721848011016846,
417
+ "step": 57
418
+ },
419
+ {
420
+ "epoch": 0.1773022544898739,
421
+ "grad_norm": 0.08412107080221176,
422
+ "learning_rate": 1.7272727272727274e-05,
423
+ "loss": 1.1180150508880615,
424
+ "step": 58
425
+ },
426
+ {
427
+ "epoch": 0.1803591899121131,
428
+ "grad_norm": 0.07381036877632141,
429
+ "learning_rate": 1.7575757575757576e-05,
430
+ "loss": 1.0384547710418701,
431
+ "step": 59
432
+ },
433
+ {
434
+ "epoch": 0.1834161253343523,
435
+ "grad_norm": 0.07089001685380936,
436
+ "learning_rate": 1.787878787878788e-05,
437
+ "loss": 1.0446016788482666,
438
+ "step": 60
439
+ },
440
+ {
441
+ "epoch": 0.1864730607565915,
442
+ "grad_norm": 0.11576953530311584,
443
+ "learning_rate": 1.8181818181818182e-05,
444
+ "loss": 1.0015051364898682,
445
+ "step": 61
446
+ },
447
+ {
448
+ "epoch": 0.18952999617883073,
449
+ "grad_norm": 0.08030868321657181,
450
+ "learning_rate": 1.8484848484848487e-05,
451
+ "loss": 0.9642710089683533,
452
+ "step": 62
453
+ },
454
+ {
455
+ "epoch": 0.19258693160106993,
456
+ "grad_norm": 0.08332342654466629,
457
+ "learning_rate": 1.8787878787878792e-05,
458
+ "loss": 1.0722991228103638,
459
+ "step": 63
460
+ },
461
+ {
462
+ "epoch": 0.19564386702330913,
463
+ "grad_norm": 0.08000365644693375,
464
+ "learning_rate": 1.9090909090909094e-05,
465
+ "loss": 1.0104647874832153,
466
+ "step": 64
467
+ },
468
+ {
469
+ "epoch": 0.19870080244554833,
470
+ "grad_norm": 0.08139508217573166,
471
+ "learning_rate": 1.9393939393939395e-05,
472
+ "loss": 0.9445061087608337,
473
+ "step": 65
474
+ },
475
+ {
476
+ "epoch": 0.20175773786778756,
477
+ "grad_norm": 0.08749893307685852,
478
+ "learning_rate": 1.96969696969697e-05,
479
+ "loss": 1.080810308456421,
480
+ "step": 66
481
+ },
482
+ {
483
+ "epoch": 0.20481467329002676,
484
+ "grad_norm": 0.0786912813782692,
485
+ "learning_rate": 2e-05,
486
+ "loss": 0.9705753922462463,
487
+ "step": 67
488
+ },
489
+ {
490
+ "epoch": 0.20787160871226595,
491
+ "grad_norm": 0.08962028473615646,
492
+ "learning_rate": 1.9999858236410775e-05,
493
+ "loss": 0.962783694267273,
494
+ "step": 68
495
+ },
496
+ {
497
+ "epoch": 0.21092854413450515,
498
+ "grad_norm": 0.08402887731790543,
499
+ "learning_rate": 1.9999432949662483e-05,
500
+ "loss": 0.9959614872932434,
501
+ "step": 69
502
+ },
503
+ {
504
+ "epoch": 0.21398547955674435,
505
+ "grad_norm": 0.08036444336175919,
506
+ "learning_rate": 1.9998724151813157e-05,
507
+ "loss": 0.9569960832595825,
508
+ "step": 70
509
+ },
510
+ {
511
+ "epoch": 0.21704241497898358,
512
+ "grad_norm": 0.08247046917676926,
513
+ "learning_rate": 1.9997731862959143e-05,
514
+ "loss": 1.0012171268463135,
515
+ "step": 71
516
+ },
517
+ {
518
+ "epoch": 0.22009935040122278,
519
+ "grad_norm": 0.08966264873743057,
520
+ "learning_rate": 1.999645611123453e-05,
521
+ "loss": 1.0403809547424316,
522
+ "step": 72
523
+ },
524
+ {
525
+ "epoch": 0.22315628582346198,
526
+ "grad_norm": 0.08061660826206207,
527
+ "learning_rate": 1.999489693281034e-05,
528
+ "loss": 1.0089740753173828,
529
+ "step": 73
530
+ },
531
+ {
532
+ "epoch": 0.22621322124570117,
533
+ "grad_norm": 0.09005365520715714,
534
+ "learning_rate": 1.9993054371893526e-05,
535
+ "loss": 0.9333044290542603,
536
+ "step": 74
537
+ },
538
+ {
539
+ "epoch": 0.2292701566679404,
540
+ "grad_norm": 0.08651519566774368,
541
+ "learning_rate": 1.9990928480725694e-05,
542
+ "loss": 0.9284015893936157,
543
+ "step": 75
544
+ },
545
+ {
546
+ "epoch": 0.2323270920901796,
547
+ "grad_norm": 0.08141147345304489,
548
+ "learning_rate": 1.9988519319581637e-05,
549
+ "loss": 0.9782730340957642,
550
+ "step": 76
551
+ },
552
+ {
553
+ "epoch": 0.2353840275124188,
554
+ "grad_norm": 0.08344405144453049,
555
+ "learning_rate": 1.998582695676762e-05,
556
+ "loss": 0.9723064303398132,
557
+ "step": 77
558
+ },
559
+ {
560
+ "epoch": 0.238440962934658,
561
+ "grad_norm": 0.08019903302192688,
562
+ "learning_rate": 1.998285146861945e-05,
563
+ "loss": 0.9648997783660889,
564
+ "step": 78
565
+ },
566
+ {
567
+ "epoch": 0.24149789835689722,
568
+ "grad_norm": 0.08113416284322739,
569
+ "learning_rate": 1.99795929395003e-05,
570
+ "loss": 0.9263214468955994,
571
+ "step": 79
572
+ },
573
+ {
574
+ "epoch": 0.24455483377913642,
575
+ "grad_norm": 0.08127513527870178,
576
+ "learning_rate": 1.997605146179833e-05,
577
+ "loss": 0.8745232224464417,
578
+ "step": 80
579
+ },
580
+ {
581
+ "epoch": 0.24761176920137562,
582
+ "grad_norm": 0.09934187680482864,
583
+ "learning_rate": 1.997222713592405e-05,
584
+ "loss": 0.8722782135009766,
585
+ "step": 81
586
+ },
587
+ {
588
+ "epoch": 0.25066870462361485,
589
+ "grad_norm": 0.09701363742351532,
590
+ "learning_rate": 1.9968120070307503e-05,
591
+ "loss": 1.0084266662597656,
592
+ "step": 82
593
+ },
594
+ {
595
+ "epoch": 0.253725640045854,
596
+ "grad_norm": 0.08335654437541962,
597
+ "learning_rate": 1.9963730381395154e-05,
598
+ "loss": 0.9239332675933838,
599
+ "step": 83
600
+ },
601
+ {
602
+ "epoch": 0.25678257546809324,
603
+ "grad_norm": 0.09161650389432907,
604
+ "learning_rate": 1.9959058193646618e-05,
605
+ "loss": 0.9878032207489014,
606
+ "step": 84
607
+ },
608
+ {
609
+ "epoch": 0.2598395108903324,
610
+ "grad_norm": 0.08067663013935089,
611
+ "learning_rate": 1.9954103639531116e-05,
612
+ "loss": 0.9113098382949829,
613
+ "step": 85
614
+ },
615
+ {
616
+ "epoch": 0.26289644631257164,
617
+ "grad_norm": 0.09619539976119995,
618
+ "learning_rate": 1.9948866859523717e-05,
619
+ "loss": 0.9527600407600403,
620
+ "step": 86
621
+ },
622
+ {
623
+ "epoch": 0.26595338173481087,
624
+ "grad_norm": 0.10015493631362915,
625
+ "learning_rate": 1.9943348002101374e-05,
626
+ "loss": 0.9569152593612671,
627
+ "step": 87
628
+ },
629
+ {
630
+ "epoch": 0.26901031715705004,
631
+ "grad_norm": 0.09012345969676971,
632
+ "learning_rate": 1.993754722373869e-05,
633
+ "loss": 0.8912045359611511,
634
+ "step": 88
635
+ },
636
+ {
637
+ "epoch": 0.27206725257928926,
638
+ "grad_norm": 0.10342805832624435,
639
+ "learning_rate": 1.9931464688903502e-05,
640
+ "loss": 0.856104850769043,
641
+ "step": 89
642
+ },
643
+ {
644
+ "epoch": 0.2751241880015285,
645
+ "grad_norm": 0.10218493640422821,
646
+ "learning_rate": 1.9925100570052194e-05,
647
+ "loss": 0.9631397128105164,
648
+ "step": 90
649
+ },
650
+ {
651
+ "epoch": 0.27818112342376766,
652
+ "grad_norm": 0.10909046977758408,
653
+ "learning_rate": 1.9918455047624847e-05,
654
+ "loss": 0.8532565236091614,
655
+ "step": 91
656
+ },
657
+ {
658
+ "epoch": 0.2812380588460069,
659
+ "grad_norm": 0.10714197903871536,
660
+ "learning_rate": 1.9911528310040073e-05,
661
+ "loss": 0.9691859483718872,
662
+ "step": 92
663
+ },
664
+ {
665
+ "epoch": 0.28429499426824606,
666
+ "grad_norm": 0.1108694076538086,
667
+ "learning_rate": 1.990432055368971e-05,
668
+ "loss": 0.9374334812164307,
669
+ "step": 93
670
+ },
671
+ {
672
+ "epoch": 0.2873519296904853,
673
+ "grad_norm": 0.10037308186292648,
674
+ "learning_rate": 1.989683198293324e-05,
675
+ "loss": 0.9166896343231201,
676
+ "step": 94
677
+ },
678
+ {
679
+ "epoch": 0.2904088651127245,
680
+ "grad_norm": 0.10246684402227402,
681
+ "learning_rate": 1.9889062810092002e-05,
682
+ "loss": 1.0059239864349365,
683
+ "step": 95
684
+ },
685
+ {
686
+ "epoch": 0.2934658005349637,
687
+ "grad_norm": 0.09954962879419327,
688
+ "learning_rate": 1.9881013255443152e-05,
689
+ "loss": 1.00413179397583,
690
+ "step": 96
691
+ },
692
+ {
693
+ "epoch": 0.2965227359572029,
694
+ "grad_norm": 0.11006761342287064,
695
+ "learning_rate": 1.9872683547213446e-05,
696
+ "loss": 0.9414035677909851,
697
+ "step": 97
698
+ },
699
+ {
700
+ "epoch": 0.29957967137944214,
701
+ "grad_norm": 0.1014382541179657,
702
+ "learning_rate": 1.9864073921572756e-05,
703
+ "loss": 0.9155468940734863,
704
+ "step": 98
705
+ },
706
+ {
707
+ "epoch": 0.3026366068016813,
708
+ "grad_norm": 0.09883157908916473,
709
+ "learning_rate": 1.9855184622627362e-05,
710
+ "loss": 0.9429305195808411,
711
+ "step": 99
712
+ },
713
+ {
714
+ "epoch": 0.30569354222392053,
715
+ "grad_norm": 0.11199072748422623,
716
+ "learning_rate": 1.9846015902413053e-05,
717
+ "loss": 0.9143528342247009,
718
+ "step": 100
719
+ },
720
+ {
721
+ "epoch": 0.30569354222392053,
722
+ "eval_loss": 0.884428083896637,
723
+ "eval_runtime": 723.8143,
724
+ "eval_samples_per_second": 0.833,
725
+ "eval_steps_per_second": 0.833,
726
+ "step": 100
727
+ },
728
+ {
729
+ "epoch": 0.3087504776461597,
730
+ "grad_norm": 0.10796016454696655,
731
+ "learning_rate": 1.9836568020887963e-05,
732
+ "loss": 0.9726455211639404,
733
+ "step": 101
734
+ },
735
+ {
736
+ "epoch": 0.31180741306839893,
737
+ "grad_norm": 0.10056383162736893,
738
+ "learning_rate": 1.982684124592521e-05,
739
+ "loss": 0.8932135701179504,
740
+ "step": 102
741
+ },
742
+ {
743
+ "epoch": 0.31486434849063816,
744
+ "grad_norm": 0.10836594551801682,
745
+ "learning_rate": 1.9816835853305306e-05,
746
+ "loss": 0.919749915599823,
747
+ "step": 103
748
+ },
749
+ {
750
+ "epoch": 0.31792128391287733,
751
+ "grad_norm": 0.12032149732112885,
752
+ "learning_rate": 1.9806552126708322e-05,
753
+ "loss": 0.871781587600708,
754
+ "step": 104
755
+ },
756
+ {
757
+ "epoch": 0.32097821933511655,
758
+ "grad_norm": 0.10854160040616989,
759
+ "learning_rate": 1.9795990357705853e-05,
760
+ "loss": 0.8587784171104431,
761
+ "step": 105
762
+ },
763
+ {
764
+ "epoch": 0.3240351547573557,
765
+ "grad_norm": 0.10819399356842041,
766
+ "learning_rate": 1.978515084575276e-05,
767
+ "loss": 0.8524806499481201,
768
+ "step": 106
769
+ },
770
+ {
771
+ "epoch": 0.32709209017959495,
772
+ "grad_norm": 0.10226067155599594,
773
+ "learning_rate": 1.9774033898178668e-05,
774
+ "loss": 0.7892144918441772,
775
+ "step": 107
776
+ },
777
+ {
778
+ "epoch": 0.3301490256018342,
779
+ "grad_norm": 0.1071159616112709,
780
+ "learning_rate": 1.976263983017925e-05,
781
+ "loss": 0.8833234906196594,
782
+ "step": 108
783
+ },
784
+ {
785
+ "epoch": 0.33320596102407335,
786
+ "grad_norm": 0.11434526741504669,
787
+ "learning_rate": 1.9750968964807305e-05,
788
+ "loss": 0.861842155456543,
789
+ "step": 109
790
+ },
791
+ {
792
+ "epoch": 0.3362628964463126,
793
+ "grad_norm": 0.1159641221165657,
794
+ "learning_rate": 1.9739021632963584e-05,
795
+ "loss": 0.8987889289855957,
796
+ "step": 110
797
+ },
798
+ {
799
+ "epoch": 0.3393198318685518,
800
+ "grad_norm": 0.12371373921632767,
801
+ "learning_rate": 1.9726798173387417e-05,
802
+ "loss": 0.9710193872451782,
803
+ "step": 111
804
+ },
805
+ {
806
+ "epoch": 0.342376767290791,
807
+ "grad_norm": 0.11441531032323837,
808
+ "learning_rate": 1.97142989326471e-05,
809
+ "loss": 0.8199151158332825,
810
+ "step": 112
811
+ },
812
+ {
813
+ "epoch": 0.3454337027130302,
814
+ "grad_norm": 0.11842846125364304,
815
+ "learning_rate": 1.9701524265130088e-05,
816
+ "loss": 0.8845276236534119,
817
+ "step": 113
818
+ },
819
+ {
820
+ "epoch": 0.34849063813526937,
821
+ "grad_norm": 0.10813732445240021,
822
+ "learning_rate": 1.9688474533032916e-05,
823
+ "loss": 0.7964264750480652,
824
+ "step": 114
825
+ },
826
+ {
827
+ "epoch": 0.3515475735575086,
828
+ "grad_norm": 0.11050347238779068,
829
+ "learning_rate": 1.9675150106350957e-05,
830
+ "loss": 0.9630422592163086,
831
+ "step": 115
832
+ },
833
+ {
834
+ "epoch": 0.3546045089797478,
835
+ "grad_norm": 0.10537250339984894,
836
+ "learning_rate": 1.9661551362867926e-05,
837
+ "loss": 0.7706905007362366,
838
+ "step": 116
839
+ },
840
+ {
841
+ "epoch": 0.357661444401987,
842
+ "grad_norm": 0.11390368640422821,
843
+ "learning_rate": 1.9647678688145163e-05,
844
+ "loss": 0.8541204929351807,
845
+ "step": 117
846
+ },
847
+ {
848
+ "epoch": 0.3607183798242262,
849
+ "grad_norm": 0.10318922251462936,
850
+ "learning_rate": 1.963353247551069e-05,
851
+ "loss": 0.7400562763214111,
852
+ "step": 118
853
+ },
854
+ {
855
+ "epoch": 0.3637753152464654,
856
+ "grad_norm": 0.1347586214542389,
857
+ "learning_rate": 1.9619113126048086e-05,
858
+ "loss": 0.9232871532440186,
859
+ "step": 119
860
+ },
861
+ {
862
+ "epoch": 0.3668322506687046,
863
+ "grad_norm": 0.11458177119493484,
864
+ "learning_rate": 1.96044210485851e-05,
865
+ "loss": 0.833285927772522,
866
+ "step": 120
867
+ },
868
+ {
869
+ "epoch": 0.36988918609094384,
870
+ "grad_norm": 0.12361041456460953,
871
+ "learning_rate": 1.958945665968206e-05,
872
+ "loss": 0.7887391448020935,
873
+ "step": 121
874
+ },
875
+ {
876
+ "epoch": 0.372946121513183,
877
+ "grad_norm": 0.11985408514738083,
878
+ "learning_rate": 1.9574220383620054e-05,
879
+ "loss": 0.8206446170806885,
880
+ "step": 122
881
+ },
882
+ {
883
+ "epoch": 0.37600305693542224,
884
+ "grad_norm": 0.1355939507484436,
885
+ "learning_rate": 1.9558712652388932e-05,
886
+ "loss": 0.7648542523384094,
887
+ "step": 123
888
+ },
889
+ {
890
+ "epoch": 0.37905999235766147,
891
+ "grad_norm": 0.1229313388466835,
892
+ "learning_rate": 1.954293390567501e-05,
893
+ "loss": 0.8573335409164429,
894
+ "step": 124
895
+ },
896
+ {
897
+ "epoch": 0.38211692777990064,
898
+ "grad_norm": 0.11425124108791351,
899
+ "learning_rate": 1.9526884590848646e-05,
900
+ "loss": 0.7412531971931458,
901
+ "step": 125
902
+ },
903
+ {
904
+ "epoch": 0.38517386320213987,
905
+ "grad_norm": 0.12430041283369064,
906
+ "learning_rate": 1.9510565162951538e-05,
907
+ "loss": 0.8098543882369995,
908
+ "step": 126
909
+ },
910
+ {
911
+ "epoch": 0.38823079862437904,
912
+ "grad_norm": 0.12492368370294571,
913
+ "learning_rate": 1.9493976084683814e-05,
914
+ "loss": 0.8814713954925537,
915
+ "step": 127
916
+ },
917
+ {
918
+ "epoch": 0.39128773404661826,
919
+ "grad_norm": 0.14428824186325073,
920
+ "learning_rate": 1.9477117826390934e-05,
921
+ "loss": 0.8231979608535767,
922
+ "step": 128
923
+ },
924
+ {
925
+ "epoch": 0.3943446694688575,
926
+ "grad_norm": 0.12010085582733154,
927
+ "learning_rate": 1.9459990866050337e-05,
928
+ "loss": 0.7015627026557922,
929
+ "step": 129
930
+ },
931
+ {
932
+ "epoch": 0.39740160489109666,
933
+ "grad_norm": 0.11819776892662048,
934
+ "learning_rate": 1.9442595689257898e-05,
935
+ "loss": 0.8086729645729065,
936
+ "step": 130
937
+ },
938
+ {
939
+ "epoch": 0.4004585403133359,
940
+ "grad_norm": 0.12211033701896667,
941
+ "learning_rate": 1.9424932789214158e-05,
942
+ "loss": 0.8234002590179443,
943
+ "step": 131
944
+ },
945
+ {
946
+ "epoch": 0.4035154757355751,
947
+ "grad_norm": 0.14926476776599884,
948
+ "learning_rate": 1.9407002666710334e-05,
949
+ "loss": 0.874608039855957,
950
+ "step": 132
951
+ },
952
+ {
953
+ "epoch": 0.4065724111578143,
954
+ "grad_norm": 0.13012923300266266,
955
+ "learning_rate": 1.9388805830114132e-05,
956
+ "loss": 0.8491607904434204,
957
+ "step": 133
958
+ },
959
+ {
960
+ "epoch": 0.4096293465800535,
961
+ "grad_norm": 0.12012261897325516,
962
+ "learning_rate": 1.937034279535533e-05,
963
+ "loss": 0.7269159555435181,
964
+ "step": 134
965
+ },
966
+ {
967
+ "epoch": 0.4126862820022927,
968
+ "grad_norm": 0.15302567183971405,
969
+ "learning_rate": 1.9351614085911134e-05,
970
+ "loss": 0.8560839891433716,
971
+ "step": 135
972
+ },
973
+ {
974
+ "epoch": 0.4157432174245319,
975
+ "grad_norm": 0.12234190106391907,
976
+ "learning_rate": 1.933262023279137e-05,
977
+ "loss": 0.8211904764175415,
978
+ "step": 136
979
+ },
980
+ {
981
+ "epoch": 0.41880015284677113,
982
+ "grad_norm": 0.14427296817302704,
983
+ "learning_rate": 1.9313361774523387e-05,
984
+ "loss": 0.8500057458877563,
985
+ "step": 137
986
+ },
987
+ {
988
+ "epoch": 0.4218570882690103,
989
+ "grad_norm": 0.1314094066619873,
990
+ "learning_rate": 1.929383925713682e-05,
991
+ "loss": 0.7589091658592224,
992
+ "step": 138
993
+ },
994
+ {
995
+ "epoch": 0.42491402369124953,
996
+ "grad_norm": 0.1576734483242035,
997
+ "learning_rate": 1.92740532341481e-05,
998
+ "loss": 0.7581073641777039,
999
+ "step": 139
1000
+ },
1001
+ {
1002
+ "epoch": 0.4279709591134887,
1003
+ "grad_norm": 0.15788713097572327,
1004
+ "learning_rate": 1.925400426654475e-05,
1005
+ "loss": 0.809050440788269,
1006
+ "step": 140
1007
+ },
1008
+ {
1009
+ "epoch": 0.43102789453572793,
1010
+ "grad_norm": 0.13364559412002563,
1011
+ "learning_rate": 1.9233692922769497e-05,
1012
+ "loss": 0.7990086078643799,
1013
+ "step": 141
1014
+ },
1015
+ {
1016
+ "epoch": 0.43408482995796716,
1017
+ "grad_norm": 0.14786465466022491,
1018
+ "learning_rate": 1.921311977870413e-05,
1019
+ "loss": 0.8675815463066101,
1020
+ "step": 142
1021
+ },
1022
+ {
1023
+ "epoch": 0.4371417653802063,
1024
+ "grad_norm": 0.14621882140636444,
1025
+ "learning_rate": 1.9192285417653208e-05,
1026
+ "loss": 0.8713765740394592,
1027
+ "step": 143
1028
+ },
1029
+ {
1030
+ "epoch": 0.44019870080244555,
1031
+ "grad_norm": 0.12874048948287964,
1032
+ "learning_rate": 1.917119043032749e-05,
1033
+ "loss": 0.7361871004104614,
1034
+ "step": 144
1035
+ },
1036
+ {
1037
+ "epoch": 0.4432556362246848,
1038
+ "grad_norm": 0.12183775007724762,
1039
+ "learning_rate": 1.9149835414827193e-05,
1040
+ "loss": 0.7311941385269165,
1041
+ "step": 145
1042
+ },
1043
+ {
1044
+ "epoch": 0.44631257164692395,
1045
+ "grad_norm": 0.1397160291671753,
1046
+ "learning_rate": 1.912822097662505e-05,
1047
+ "loss": 0.8189159035682678,
1048
+ "step": 146
1049
+ },
1050
+ {
1051
+ "epoch": 0.4493695070691632,
1052
+ "grad_norm": 0.1458273082971573,
1053
+ "learning_rate": 1.9106347728549134e-05,
1054
+ "loss": 0.8288135528564453,
1055
+ "step": 147
1056
+ },
1057
+ {
1058
+ "epoch": 0.45242644249140235,
1059
+ "grad_norm": 0.16898781061172485,
1060
+ "learning_rate": 1.908421629076547e-05,
1061
+ "loss": 0.7878037095069885,
1062
+ "step": 148
1063
+ },
1064
+ {
1065
+ "epoch": 0.4554833779136416,
1066
+ "grad_norm": 0.1638474315404892,
1067
+ "learning_rate": 1.9061827290760466e-05,
1068
+ "loss": 0.8059952259063721,
1069
+ "step": 149
1070
+ },
1071
+ {
1072
+ "epoch": 0.4585403133358808,
1073
+ "grad_norm": 0.14130882918834686,
1074
+ "learning_rate": 1.9039181363323128e-05,
1075
+ "loss": 0.7346830368041992,
1076
+ "step": 150
1077
+ },
1078
+ {
1079
+ "epoch": 0.4585403133358808,
1080
+ "eval_loss": 0.7979016900062561,
1081
+ "eval_runtime": 828.6295,
1082
+ "eval_samples_per_second": 0.728,
1083
+ "eval_steps_per_second": 0.728,
1084
+ "step": 150
1085
+ },
1086
+ {
1087
+ "epoch": 0.46159724875811997,
1088
+ "grad_norm": 0.14427433907985687,
1089
+ "learning_rate": 1.9016279150527044e-05,
1090
+ "loss": 0.7583403587341309,
1091
+ "step": 151
1092
+ },
1093
+ {
1094
+ "epoch": 0.4646541841803592,
1095
+ "grad_norm": 0.1515798568725586,
1096
+ "learning_rate": 1.8993121301712194e-05,
1097
+ "loss": 0.7908380031585693,
1098
+ "step": 152
1099
+ },
1100
+ {
1101
+ "epoch": 0.46771111960259837,
1102
+ "grad_norm": 0.14444488286972046,
1103
+ "learning_rate": 1.896970847346653e-05,
1104
+ "loss": 0.7916130423545837,
1105
+ "step": 153
1106
+ },
1107
+ {
1108
+ "epoch": 0.4707680550248376,
1109
+ "grad_norm": 0.1460912823677063,
1110
+ "learning_rate": 1.8946041329607364e-05,
1111
+ "loss": 0.7750643491744995,
1112
+ "step": 154
1113
+ },
1114
+ {
1115
+ "epoch": 0.4738249904470768,
1116
+ "grad_norm": 0.13896244764328003,
1117
+ "learning_rate": 1.892212054116255e-05,
1118
+ "loss": 0.8059666156768799,
1119
+ "step": 155
1120
+ },
1121
+ {
1122
+ "epoch": 0.476881925869316,
1123
+ "grad_norm": 0.16133630275726318,
1124
+ "learning_rate": 1.889794678635145e-05,
1125
+ "loss": 0.8327827453613281,
1126
+ "step": 156
1127
+ },
1128
+ {
1129
+ "epoch": 0.4799388612915552,
1130
+ "grad_norm": 0.1474636346101761,
1131
+ "learning_rate": 1.8873520750565716e-05,
1132
+ "loss": 0.8498989343643188,
1133
+ "step": 157
1134
+ },
1135
+ {
1136
+ "epoch": 0.48299579671379445,
1137
+ "grad_norm": 0.17222349345684052,
1138
+ "learning_rate": 1.884884312634985e-05,
1139
+ "loss": 0.7750177979469299,
1140
+ "step": 158
1141
+ },
1142
+ {
1143
+ "epoch": 0.4860527321360336,
1144
+ "grad_norm": 0.15558090806007385,
1145
+ "learning_rate": 1.8823914613381568e-05,
1146
+ "loss": 0.7326169013977051,
1147
+ "step": 159
1148
+ },
1149
+ {
1150
+ "epoch": 0.48910966755827284,
1151
+ "grad_norm": 0.13808321952819824,
1152
+ "learning_rate": 1.8798735918451963e-05,
1153
+ "loss": 0.8308709859848022,
1154
+ "step": 160
1155
+ },
1156
+ {
1157
+ "epoch": 0.492166602980512,
1158
+ "grad_norm": 0.1761898398399353,
1159
+ "learning_rate": 1.8773307755445468e-05,
1160
+ "loss": 0.7805465459823608,
1161
+ "step": 161
1162
+ },
1163
+ {
1164
+ "epoch": 0.49522353840275124,
1165
+ "grad_norm": 0.160477414727211,
1166
+ "learning_rate": 1.874763084531961e-05,
1167
+ "loss": 0.8538846969604492,
1168
+ "step": 162
1169
+ },
1170
+ {
1171
+ "epoch": 0.49828047382499047,
1172
+ "grad_norm": 0.15238745510578156,
1173
+ "learning_rate": 1.872170591608459e-05,
1174
+ "loss": 0.8801217675209045,
1175
+ "step": 163
1176
+ },
1177
+ {
1178
+ "epoch": 0.5013374092472297,
1179
+ "grad_norm": 0.1567080318927765,
1180
+ "learning_rate": 1.86955337027826e-05,
1181
+ "loss": 0.7205259799957275,
1182
+ "step": 164
1183
+ },
1184
+ {
1185
+ "epoch": 0.5043943446694689,
1186
+ "grad_norm": 0.13637851178646088,
1187
+ "learning_rate": 1.866911494746702e-05,
1188
+ "loss": 0.7636491656303406,
1189
+ "step": 165
1190
+ },
1191
+ {
1192
+ "epoch": 0.507451280091708,
1193
+ "grad_norm": 0.15563489496707916,
1194
+ "learning_rate": 1.8642450399181373e-05,
1195
+ "loss": 0.7982497811317444,
1196
+ "step": 166
1197
+ },
1198
+ {
1199
+ "epoch": 0.5105082155139473,
1200
+ "grad_norm": 0.15503396093845367,
1201
+ "learning_rate": 1.8615540813938063e-05,
1202
+ "loss": 0.8737778067588806,
1203
+ "step": 167
1204
+ },
1205
+ {
1206
+ "epoch": 0.5135651509361865,
1207
+ "grad_norm": 0.16095557808876038,
1208
+ "learning_rate": 1.8588386954696972e-05,
1209
+ "loss": 0.796604335308075,
1210
+ "step": 168
1211
+ },
1212
+ {
1213
+ "epoch": 0.5166220863584257,
1214
+ "grad_norm": 0.1713593453168869,
1215
+ "learning_rate": 1.856098959134381e-05,
1216
+ "loss": 0.8247392177581787,
1217
+ "step": 169
1218
+ },
1219
+ {
1220
+ "epoch": 0.5196790217806648,
1221
+ "grad_norm": 0.18239113688468933,
1222
+ "learning_rate": 1.8533349500668295e-05,
1223
+ "loss": 0.7838484644889832,
1224
+ "step": 170
1225
+ },
1226
+ {
1227
+ "epoch": 0.5227359572029041,
1228
+ "grad_norm": 0.15745767951011658,
1229
+ "learning_rate": 1.850546746634211e-05,
1230
+ "loss": 0.7856907248497009,
1231
+ "step": 171
1232
+ },
1233
+ {
1234
+ "epoch": 0.5257928926251433,
1235
+ "grad_norm": 0.16820666193962097,
1236
+ "learning_rate": 1.8477344278896708e-05,
1237
+ "loss": 0.7829679846763611,
1238
+ "step": 172
1239
+ },
1240
+ {
1241
+ "epoch": 0.5288498280473825,
1242
+ "grad_norm": 0.16975544393062592,
1243
+ "learning_rate": 1.84489807357009e-05,
1244
+ "loss": 0.7374375462532043,
1245
+ "step": 173
1246
+ },
1247
+ {
1248
+ "epoch": 0.5319067634696217,
1249
+ "grad_norm": 0.167228102684021,
1250
+ "learning_rate": 1.8420377640938204e-05,
1251
+ "loss": 0.712837815284729,
1252
+ "step": 174
1253
+ },
1254
+ {
1255
+ "epoch": 0.5349636988918609,
1256
+ "grad_norm": 0.15955154597759247,
1257
+ "learning_rate": 1.839153580558411e-05,
1258
+ "loss": 0.7645693421363831,
1259
+ "step": 175
1260
+ },
1261
+ {
1262
+ "epoch": 0.5380206343141001,
1263
+ "grad_norm": 0.18378689885139465,
1264
+ "learning_rate": 1.8362456047383032e-05,
1265
+ "loss": 0.7974956631660461,
1266
+ "step": 176
1267
+ },
1268
+ {
1269
+ "epoch": 0.5410775697363394,
1270
+ "grad_norm": 0.15777672827243805,
1271
+ "learning_rate": 1.833313919082515e-05,
1272
+ "loss": 0.8957571983337402,
1273
+ "step": 177
1274
+ },
1275
+ {
1276
+ "epoch": 0.5441345051585785,
1277
+ "grad_norm": 0.15292386710643768,
1278
+ "learning_rate": 1.8303586067123028e-05,
1279
+ "loss": 0.7635619044303894,
1280
+ "step": 178
1281
+ },
1282
+ {
1283
+ "epoch": 0.5471914405808177,
1284
+ "grad_norm": 0.178152397274971,
1285
+ "learning_rate": 1.8273797514188043e-05,
1286
+ "loss": 0.7849246263504028,
1287
+ "step": 179
1288
+ },
1289
+ {
1290
+ "epoch": 0.550248376003057,
1291
+ "grad_norm": 0.15916013717651367,
1292
+ "learning_rate": 1.824377437660663e-05,
1293
+ "loss": 0.6975343227386475,
1294
+ "step": 180
1295
+ },
1296
+ {
1297
+ "epoch": 0.5533053114252962,
1298
+ "grad_norm": 0.18172231316566467,
1299
+ "learning_rate": 1.821351750561634e-05,
1300
+ "loss": 0.7675164341926575,
1301
+ "step": 181
1302
+ },
1303
+ {
1304
+ "epoch": 0.5563622468475353,
1305
+ "grad_norm": 0.16241903603076935,
1306
+ "learning_rate": 1.818302775908169e-05,
1307
+ "loss": 0.7950343489646912,
1308
+ "step": 182
1309
+ },
1310
+ {
1311
+ "epoch": 0.5594191822697746,
1312
+ "grad_norm": 0.18727579712867737,
1313
+ "learning_rate": 1.8152306001469875e-05,
1314
+ "loss": 0.787315309047699,
1315
+ "step": 183
1316
+ },
1317
+ {
1318
+ "epoch": 0.5624761176920138,
1319
+ "grad_norm": 0.1627933531999588,
1320
+ "learning_rate": 1.8121353103826213e-05,
1321
+ "loss": 0.7141211628913879,
1322
+ "step": 184
1323
+ },
1324
+ {
1325
+ "epoch": 0.565533053114253,
1326
+ "grad_norm": 0.4369247555732727,
1327
+ "learning_rate": 1.8090169943749477e-05,
1328
+ "loss": 0.8476608395576477,
1329
+ "step": 185
1330
+ },
1331
+ {
1332
+ "epoch": 0.5685899885364921,
1333
+ "grad_norm": 0.16494786739349365,
1334
+ "learning_rate": 1.8058757405367003e-05,
1335
+ "loss": 0.720562756061554,
1336
+ "step": 186
1337
+ },
1338
+ {
1339
+ "epoch": 0.5716469239587314,
1340
+ "grad_norm": 0.175015389919281,
1341
+ "learning_rate": 1.8027116379309637e-05,
1342
+ "loss": 0.7589252591133118,
1343
+ "step": 187
1344
+ },
1345
+ {
1346
+ "epoch": 0.5747038593809706,
1347
+ "grad_norm": 0.1769978553056717,
1348
+ "learning_rate": 1.799524776268646e-05,
1349
+ "loss": 0.7644155621528625,
1350
+ "step": 188
1351
+ },
1352
+ {
1353
+ "epoch": 0.5777607948032097,
1354
+ "grad_norm": 0.18481792509555817,
1355
+ "learning_rate": 1.796315245905936e-05,
1356
+ "loss": 0.7885835766792297,
1357
+ "step": 189
1358
+ },
1359
+ {
1360
+ "epoch": 0.580817730225449,
1361
+ "grad_norm": 0.1668689250946045,
1362
+ "learning_rate": 1.7930831378417437e-05,
1363
+ "loss": 0.7377231121063232,
1364
+ "step": 190
1365
+ },
1366
+ {
1367
+ "epoch": 0.5838746656476882,
1368
+ "grad_norm": 0.178734689950943,
1369
+ "learning_rate": 1.7898285437151163e-05,
1370
+ "loss": 0.7388894557952881,
1371
+ "step": 191
1372
+ },
1373
+ {
1374
+ "epoch": 0.5869316010699274,
1375
+ "grad_norm": 0.1740068644285202,
1376
+ "learning_rate": 1.786551555802643e-05,
1377
+ "loss": 0.8209859728813171,
1378
+ "step": 192
1379
+ },
1380
+ {
1381
+ "epoch": 0.5899885364921666,
1382
+ "grad_norm": 0.19211041927337646,
1383
+ "learning_rate": 1.783252267015837e-05,
1384
+ "loss": 0.7305737733840942,
1385
+ "step": 193
1386
+ },
1387
+ {
1388
+ "epoch": 0.5930454719144058,
1389
+ "grad_norm": 0.16644936800003052,
1390
+ "learning_rate": 1.779930770898503e-05,
1391
+ "loss": 0.7760804891586304,
1392
+ "step": 194
1393
+ },
1394
+ {
1395
+ "epoch": 0.596102407336645,
1396
+ "grad_norm": 0.1773686707019806,
1397
+ "learning_rate": 1.776587161624083e-05,
1398
+ "loss": 0.7879236936569214,
1399
+ "step": 195
1400
+ },
1401
+ {
1402
+ "epoch": 0.5991593427588843,
1403
+ "grad_norm": 0.17508819699287415,
1404
+ "learning_rate": 1.7732215339929874e-05,
1405
+ "loss": 0.7307407259941101,
1406
+ "step": 196
1407
+ },
1408
+ {
1409
+ "epoch": 0.6022162781811234,
1410
+ "grad_norm": 0.17211101949214935,
1411
+ "learning_rate": 1.7698339834299064e-05,
1412
+ "loss": 0.7293214797973633,
1413
+ "step": 197
1414
+ },
1415
+ {
1416
+ "epoch": 0.6052732136033626,
1417
+ "grad_norm": 0.18085215985774994,
1418
+ "learning_rate": 1.7664246059811058e-05,
1419
+ "loss": 0.763083279132843,
1420
+ "step": 198
1421
+ },
1422
+ {
1423
+ "epoch": 0.6083301490256018,
1424
+ "grad_norm": 0.20243075489997864,
1425
+ "learning_rate": 1.7629934983117025e-05,
1426
+ "loss": 0.7372676134109497,
1427
+ "step": 199
1428
+ },
1429
+ {
1430
+ "epoch": 0.6113870844478411,
1431
+ "grad_norm": 0.18152795732021332,
1432
+ "learning_rate": 1.759540757702924e-05,
1433
+ "loss": 0.7121898531913757,
1434
+ "step": 200
1435
+ },
1436
+ {
1437
+ "epoch": 0.6113870844478411,
1438
+ "eval_loss": 0.7551760673522949,
1439
+ "eval_runtime": 900.209,
1440
+ "eval_samples_per_second": 0.67,
1441
+ "eval_steps_per_second": 0.67,
1442
+ "step": 200
1443
+ },
1444
+ {
1445
+ "epoch": 0.6144440198700802,
1446
+ "grad_norm": 0.18808062374591827,
1447
+ "learning_rate": 1.7560664820493502e-05,
1448
+ "loss": 0.734307050704956,
1449
+ "step": 201
1450
+ },
1451
+ {
1452
+ "epoch": 0.6175009552923194,
1453
+ "grad_norm": 0.18151243031024933,
1454
+ "learning_rate": 1.7525707698561383e-05,
1455
+ "loss": 0.7998429536819458,
1456
+ "step": 202
1457
+ },
1458
+ {
1459
+ "epoch": 0.6205578907145587,
1460
+ "grad_norm": 0.19583043456077576,
1461
+ "learning_rate": 1.7490537202362313e-05,
1462
+ "loss": 0.7546265721321106,
1463
+ "step": 203
1464
+ },
1465
+ {
1466
+ "epoch": 0.6236148261367979,
1467
+ "grad_norm": 0.2508557140827179,
1468
+ "learning_rate": 1.7455154329075427e-05,
1469
+ "loss": 0.7810050249099731,
1470
+ "step": 204
1471
+ },
1472
+ {
1473
+ "epoch": 0.626671761559037,
1474
+ "grad_norm": 0.1685105562210083,
1475
+ "learning_rate": 1.741956008190136e-05,
1476
+ "loss": 0.7558917999267578,
1477
+ "step": 205
1478
+ },
1479
+ {
1480
+ "epoch": 0.6297286969812763,
1481
+ "grad_norm": 0.18195222318172455,
1482
+ "learning_rate": 1.7383755470033756e-05,
1483
+ "loss": 0.7216942310333252,
1484
+ "step": 206
1485
+ },
1486
+ {
1487
+ "epoch": 0.6327856324035155,
1488
+ "grad_norm": 0.1878063678741455,
1489
+ "learning_rate": 1.7347741508630673e-05,
1490
+ "loss": 0.7417092323303223,
1491
+ "step": 207
1492
+ },
1493
+ {
1494
+ "epoch": 0.6358425678257547,
1495
+ "grad_norm": 0.25273698568344116,
1496
+ "learning_rate": 1.73115192187858e-05,
1497
+ "loss": 0.807498037815094,
1498
+ "step": 208
1499
+ },
1500
+ {
1501
+ "epoch": 0.6388995032479939,
1502
+ "grad_norm": 0.2451465129852295,
1503
+ "learning_rate": 1.7275089627499493e-05,
1504
+ "loss": 0.7557163238525391,
1505
+ "step": 209
1506
+ },
1507
+ {
1508
+ "epoch": 0.6419564386702331,
1509
+ "grad_norm": 0.19272617995738983,
1510
+ "learning_rate": 1.7238453767649683e-05,
1511
+ "loss": 0.8285109996795654,
1512
+ "step": 210
1513
+ },
1514
+ {
1515
+ "epoch": 0.6450133740924723,
1516
+ "grad_norm": 0.1869518756866455,
1517
+ "learning_rate": 1.720161267796256e-05,
1518
+ "loss": 0.7824444770812988,
1519
+ "step": 211
1520
+ },
1521
+ {
1522
+ "epoch": 0.6480703095147115,
1523
+ "grad_norm": 0.2029627561569214,
1524
+ "learning_rate": 1.7164567402983153e-05,
1525
+ "loss": 0.7018642425537109,
1526
+ "step": 212
1527
+ },
1528
+ {
1529
+ "epoch": 0.6511272449369507,
1530
+ "grad_norm": 0.23215501010417938,
1531
+ "learning_rate": 1.7127318993045686e-05,
1532
+ "loss": 0.7263948917388916,
1533
+ "step": 213
1534
+ },
1535
+ {
1536
+ "epoch": 0.6541841803591899,
1537
+ "grad_norm": 0.19869184494018555,
1538
+ "learning_rate": 1.7089868504243816e-05,
1539
+ "loss": 0.8285576105117798,
1540
+ "step": 214
1541
+ },
1542
+ {
1543
+ "epoch": 0.6572411157814291,
1544
+ "grad_norm": 0.22871531546115875,
1545
+ "learning_rate": 1.705221699840069e-05,
1546
+ "loss": 0.7871490716934204,
1547
+ "step": 215
1548
+ },
1549
+ {
1550
+ "epoch": 0.6602980512036684,
1551
+ "grad_norm": 0.17945580184459686,
1552
+ "learning_rate": 1.701436554303882e-05,
1553
+ "loss": 0.740180492401123,
1554
+ "step": 216
1555
+ },
1556
+ {
1557
+ "epoch": 0.6633549866259075,
1558
+ "grad_norm": 0.20516762137413025,
1559
+ "learning_rate": 1.6976315211349848e-05,
1560
+ "loss": 0.7542892098426819,
1561
+ "step": 217
1562
+ },
1563
+ {
1564
+ "epoch": 0.6664119220481467,
1565
+ "grad_norm": 0.22108283638954163,
1566
+ "learning_rate": 1.6938067082164093e-05,
1567
+ "loss": 0.8117404580116272,
1568
+ "step": 218
1569
+ },
1570
+ {
1571
+ "epoch": 0.669468857470386,
1572
+ "grad_norm": 0.22329698503017426,
1573
+ "learning_rate": 1.6899622239919965e-05,
1574
+ "loss": 0.8002716898918152,
1575
+ "step": 219
1576
+ },
1577
+ {
1578
+ "epoch": 0.6725257928926252,
1579
+ "grad_norm": 0.23545362055301666,
1580
+ "learning_rate": 1.6860981774633228e-05,
1581
+ "loss": 0.7750573754310608,
1582
+ "step": 220
1583
+ },
1584
+ {
1585
+ "epoch": 0.6755827283148643,
1586
+ "grad_norm": 0.21816480159759521,
1587
+ "learning_rate": 1.6822146781866097e-05,
1588
+ "loss": 0.8051223754882812,
1589
+ "step": 221
1590
+ },
1591
+ {
1592
+ "epoch": 0.6786396637371036,
1593
+ "grad_norm": 0.18638508021831512,
1594
+ "learning_rate": 1.6783118362696162e-05,
1595
+ "loss": 0.7286484241485596,
1596
+ "step": 222
1597
+ },
1598
+ {
1599
+ "epoch": 0.6816965991593428,
1600
+ "grad_norm": 0.16794732213020325,
1601
+ "learning_rate": 1.6743897623685178e-05,
1602
+ "loss": 0.7001460194587708,
1603
+ "step": 223
1604
+ },
1605
+ {
1606
+ "epoch": 0.684753534581582,
1607
+ "grad_norm": 0.21157318353652954,
1608
+ "learning_rate": 1.6704485676847695e-05,
1609
+ "loss": 0.7479901313781738,
1610
+ "step": 224
1611
+ },
1612
+ {
1613
+ "epoch": 0.6878104700038211,
1614
+ "grad_norm": 0.35601308941841125,
1615
+ "learning_rate": 1.666488363961952e-05,
1616
+ "loss": 0.7660019397735596,
1617
+ "step": 225
1618
+ },
1619
+ {
1620
+ "epoch": 0.6908674054260604,
1621
+ "grad_norm": 0.17416611313819885,
1622
+ "learning_rate": 1.662509263482604e-05,
1623
+ "loss": 0.7157142162322998,
1624
+ "step": 226
1625
+ },
1626
+ {
1627
+ "epoch": 0.6939243408482996,
1628
+ "grad_norm": 0.19655123353004456,
1629
+ "learning_rate": 1.658511379065039e-05,
1630
+ "loss": 0.7894638776779175,
1631
+ "step": 227
1632
+ },
1633
+ {
1634
+ "epoch": 0.6969812762705387,
1635
+ "grad_norm": 0.2034345269203186,
1636
+ "learning_rate": 1.6544948240601453e-05,
1637
+ "loss": 0.6853711009025574,
1638
+ "step": 228
1639
+ },
1640
+ {
1641
+ "epoch": 0.700038211692778,
1642
+ "grad_norm": 0.199235200881958,
1643
+ "learning_rate": 1.6504597123481737e-05,
1644
+ "loss": 0.7487372756004333,
1645
+ "step": 229
1646
+ },
1647
+ {
1648
+ "epoch": 0.7030951471150172,
1649
+ "grad_norm": 0.20407404005527496,
1650
+ "learning_rate": 1.6464061583355088e-05,
1651
+ "loss": 0.7335573434829712,
1652
+ "step": 230
1653
+ },
1654
+ {
1655
+ "epoch": 0.7061520825372564,
1656
+ "grad_norm": 0.22096174955368042,
1657
+ "learning_rate": 1.6423342769514227e-05,
1658
+ "loss": 0.7659798264503479,
1659
+ "step": 231
1660
+ },
1661
+ {
1662
+ "epoch": 0.7092090179594956,
1663
+ "grad_norm": 0.1916825920343399,
1664
+ "learning_rate": 1.6382441836448203e-05,
1665
+ "loss": 0.7162011861801147,
1666
+ "step": 232
1667
+ },
1668
+ {
1669
+ "epoch": 0.7122659533817348,
1670
+ "grad_norm": 0.20505093038082123,
1671
+ "learning_rate": 1.6341359943809626e-05,
1672
+ "loss": 0.6957600116729736,
1673
+ "step": 233
1674
+ },
1675
+ {
1676
+ "epoch": 0.715322888803974,
1677
+ "grad_norm": 0.19968082010746002,
1678
+ "learning_rate": 1.6300098256381807e-05,
1679
+ "loss": 0.6724053025245667,
1680
+ "step": 234
1681
+ },
1682
+ {
1683
+ "epoch": 0.7183798242262133,
1684
+ "grad_norm": 0.19768832623958588,
1685
+ "learning_rate": 1.625865794404573e-05,
1686
+ "loss": 0.774741530418396,
1687
+ "step": 235
1688
+ },
1689
+ {
1690
+ "epoch": 0.7214367596484524,
1691
+ "grad_norm": 0.19257694482803345,
1692
+ "learning_rate": 1.621704018174688e-05,
1693
+ "loss": 0.6658651828765869,
1694
+ "step": 236
1695
+ },
1696
+ {
1697
+ "epoch": 0.7244936950706916,
1698
+ "grad_norm": 0.21594858169555664,
1699
+ "learning_rate": 1.617524614946192e-05,
1700
+ "loss": 0.810744047164917,
1701
+ "step": 237
1702
+ },
1703
+ {
1704
+ "epoch": 0.7275506304929308,
1705
+ "grad_norm": 0.2107633650302887,
1706
+ "learning_rate": 1.6133277032165264e-05,
1707
+ "loss": 0.7623897194862366,
1708
+ "step": 238
1709
+ },
1710
+ {
1711
+ "epoch": 0.7306075659151701,
1712
+ "grad_norm": 0.20114055275917053,
1713
+ "learning_rate": 1.6091134019795447e-05,
1714
+ "loss": 0.7082816362380981,
1715
+ "step": 239
1716
+ },
1717
+ {
1718
+ "epoch": 0.7336645013374092,
1719
+ "grad_norm": 0.2542732059955597,
1720
+ "learning_rate": 1.604881830722141e-05,
1721
+ "loss": 0.7051193714141846,
1722
+ "step": 240
1723
+ },
1724
+ {
1725
+ "epoch": 0.7367214367596484,
1726
+ "grad_norm": 0.19180485606193542,
1727
+ "learning_rate": 1.600633109420861e-05,
1728
+ "loss": 0.7895385026931763,
1729
+ "step": 241
1730
+ },
1731
+ {
1732
+ "epoch": 0.7397783721818877,
1733
+ "grad_norm": 0.368756502866745,
1734
+ "learning_rate": 1.5963673585385016e-05,
1735
+ "loss": 0.7146293520927429,
1736
+ "step": 242
1737
+ },
1738
+ {
1739
+ "epoch": 0.7428353076041269,
1740
+ "grad_norm": 0.18490125238895416,
1741
+ "learning_rate": 1.5920846990206934e-05,
1742
+ "loss": 0.650428056716919,
1743
+ "step": 243
1744
+ },
1745
+ {
1746
+ "epoch": 0.745892243026366,
1747
+ "grad_norm": 0.23592503368854523,
1748
+ "learning_rate": 1.5877852522924733e-05,
1749
+ "loss": 0.6367110013961792,
1750
+ "step": 244
1751
+ },
1752
+ {
1753
+ "epoch": 0.7489491784486053,
1754
+ "grad_norm": 0.20223264396190643,
1755
+ "learning_rate": 1.5834691402548415e-05,
1756
+ "loss": 0.6563615798950195,
1757
+ "step": 245
1758
+ },
1759
+ {
1760
+ "epoch": 0.7520061138708445,
1761
+ "grad_norm": 0.27459946274757385,
1762
+ "learning_rate": 1.5791364852813047e-05,
1763
+ "loss": 0.7361881136894226,
1764
+ "step": 246
1765
+ },
1766
+ {
1767
+ "epoch": 0.7550630492930837,
1768
+ "grad_norm": 0.21085411310195923,
1769
+ "learning_rate": 1.5747874102144073e-05,
1770
+ "loss": 0.7373813390731812,
1771
+ "step": 247
1772
+ },
1773
+ {
1774
+ "epoch": 0.7581199847153229,
1775
+ "grad_norm": 0.23332320153713226,
1776
+ "learning_rate": 1.5704220383622464e-05,
1777
+ "loss": 0.6971457004547119,
1778
+ "step": 248
1779
+ },
1780
+ {
1781
+ "epoch": 0.7611769201375621,
1782
+ "grad_norm": 0.23525936901569366,
1783
+ "learning_rate": 1.5660404934949798e-05,
1784
+ "loss": 0.6756627559661865,
1785
+ "step": 249
1786
+ },
1787
+ {
1788
+ "epoch": 0.7642338555598013,
1789
+ "grad_norm": 0.2150791585445404,
1790
+ "learning_rate": 1.5616428998413122e-05,
1791
+ "loss": 0.7029792666435242,
1792
+ "step": 250
1793
+ },
1794
+ {
1795
+ "epoch": 0.7642338555598013,
1796
+ "eval_loss": 0.7269901633262634,
1797
+ "eval_runtime": 877.665,
1798
+ "eval_samples_per_second": 0.687,
1799
+ "eval_steps_per_second": 0.687,
1800
+ "step": 250
1801
+ },
1802
+ {
1803
+ "epoch": 0.7672907909820404,
1804
+ "grad_norm": 0.19510552287101746,
1805
+ "learning_rate": 1.5572293820849754e-05,
1806
+ "loss": 0.715162992477417,
1807
+ "step": 251
1808
+ },
1809
+ {
1810
+ "epoch": 0.7703477264042797,
1811
+ "grad_norm": 0.25246763229370117,
1812
+ "learning_rate": 1.5528000653611935e-05,
1813
+ "loss": 0.634660542011261,
1814
+ "step": 252
1815
+ },
1816
+ {
1817
+ "epoch": 0.7734046618265189,
1818
+ "grad_norm": 0.2980027496814728,
1819
+ "learning_rate": 1.5483550752531337e-05,
1820
+ "loss": 0.7154463529586792,
1821
+ "step": 253
1822
+ },
1823
+ {
1824
+ "epoch": 0.7764615972487581,
1825
+ "grad_norm": 0.2730556130409241,
1826
+ "learning_rate": 1.5438945377883463e-05,
1827
+ "loss": 0.8110946416854858,
1828
+ "step": 254
1829
+ },
1830
+ {
1831
+ "epoch": 0.7795185326709974,
1832
+ "grad_norm": 0.17258886992931366,
1833
+ "learning_rate": 1.5394185794351914e-05,
1834
+ "loss": 0.72202467918396,
1835
+ "step": 255
1836
+ },
1837
+ {
1838
+ "epoch": 0.7825754680932365,
1839
+ "grad_norm": 0.19966280460357666,
1840
+ "learning_rate": 1.5349273270992537e-05,
1841
+ "loss": 0.7368704080581665,
1842
+ "step": 256
1843
+ },
1844
+ {
1845
+ "epoch": 0.7856324035154757,
1846
+ "grad_norm": 0.23305682837963104,
1847
+ "learning_rate": 1.5304209081197425e-05,
1848
+ "loss": 0.7429723143577576,
1849
+ "step": 257
1850
+ },
1851
+ {
1852
+ "epoch": 0.788689338937715,
1853
+ "grad_norm": 0.21786810457706451,
1854
+ "learning_rate": 1.5258994502658846e-05,
1855
+ "loss": 0.6498424410820007,
1856
+ "step": 258
1857
+ },
1858
+ {
1859
+ "epoch": 0.7917462743599541,
1860
+ "grad_norm": 0.2370925396680832,
1861
+ "learning_rate": 1.5213630817332985e-05,
1862
+ "loss": 0.7379459142684937,
1863
+ "step": 259
1864
+ },
1865
+ {
1866
+ "epoch": 0.7948032097821933,
1867
+ "grad_norm": 0.25566384196281433,
1868
+ "learning_rate": 1.5168119311403611e-05,
1869
+ "loss": 0.6742876172065735,
1870
+ "step": 260
1871
+ },
1872
+ {
1873
+ "epoch": 0.7978601452044326,
1874
+ "grad_norm": 0.2171633243560791,
1875
+ "learning_rate": 1.512246127524561e-05,
1876
+ "loss": 0.72329181432724,
1877
+ "step": 261
1878
+ },
1879
+ {
1880
+ "epoch": 0.8009170806266718,
1881
+ "grad_norm": 0.23292019963264465,
1882
+ "learning_rate": 1.50766580033884e-05,
1883
+ "loss": 0.765812873840332,
1884
+ "step": 262
1885
+ },
1886
+ {
1887
+ "epoch": 0.8039740160489109,
1888
+ "grad_norm": 0.19427980482578278,
1889
+ "learning_rate": 1.5030710794479226e-05,
1890
+ "loss": 0.7872639298439026,
1891
+ "step": 263
1892
+ },
1893
+ {
1894
+ "epoch": 0.8070309514711502,
1895
+ "grad_norm": 0.2460346817970276,
1896
+ "learning_rate": 1.4984620951246333e-05,
1897
+ "loss": 0.6940722465515137,
1898
+ "step": 264
1899
+ },
1900
+ {
1901
+ "epoch": 0.8100878868933894,
1902
+ "grad_norm": 0.2493411898612976,
1903
+ "learning_rate": 1.4938389780462044e-05,
1904
+ "loss": 0.7680137157440186,
1905
+ "step": 265
1906
+ },
1907
+ {
1908
+ "epoch": 0.8131448223156286,
1909
+ "grad_norm": 0.23873573541641235,
1910
+ "learning_rate": 1.4892018592905702e-05,
1911
+ "loss": 0.6780916452407837,
1912
+ "step": 266
1913
+ },
1914
+ {
1915
+ "epoch": 0.8162017577378677,
1916
+ "grad_norm": 0.2580571174621582,
1917
+ "learning_rate": 1.4845508703326504e-05,
1918
+ "loss": 0.7183764576911926,
1919
+ "step": 267
1920
+ },
1921
+ {
1922
+ "epoch": 0.819258693160107,
1923
+ "grad_norm": 0.2125079482793808,
1924
+ "learning_rate": 1.4798861430406221e-05,
1925
+ "loss": 0.8207096457481384,
1926
+ "step": 268
1927
+ },
1928
+ {
1929
+ "epoch": 0.8223156285823462,
1930
+ "grad_norm": 0.21065691113471985,
1931
+ "learning_rate": 1.4752078096721827e-05,
1932
+ "loss": 0.7414214611053467,
1933
+ "step": 269
1934
+ },
1935
+ {
1936
+ "epoch": 0.8253725640045854,
1937
+ "grad_norm": 0.25807511806488037,
1938
+ "learning_rate": 1.4705160028707976e-05,
1939
+ "loss": 0.7086384296417236,
1940
+ "step": 270
1941
+ },
1942
+ {
1943
+ "epoch": 0.8284294994268246,
1944
+ "grad_norm": 0.2444671094417572,
1945
+ "learning_rate": 1.4658108556619417e-05,
1946
+ "loss": 0.7065964937210083,
1947
+ "step": 271
1948
+ },
1949
+ {
1950
+ "epoch": 0.8314864348490638,
1951
+ "grad_norm": 0.200303316116333,
1952
+ "learning_rate": 1.461092501449326e-05,
1953
+ "loss": 0.7533905506134033,
1954
+ "step": 272
1955
+ },
1956
+ {
1957
+ "epoch": 0.834543370271303,
1958
+ "grad_norm": 0.2807226777076721,
1959
+ "learning_rate": 1.4563610740111163e-05,
1960
+ "loss": 0.756553053855896,
1961
+ "step": 273
1962
+ },
1963
+ {
1964
+ "epoch": 0.8376003056935423,
1965
+ "grad_norm": 0.2516884207725525,
1966
+ "learning_rate": 1.4516167074961394e-05,
1967
+ "loss": 0.8125098347663879,
1968
+ "step": 274
1969
+ },
1970
+ {
1971
+ "epoch": 0.8406572411157814,
1972
+ "grad_norm": 0.22799813747406006,
1973
+ "learning_rate": 1.4468595364200808e-05,
1974
+ "loss": 0.7360811829566956,
1975
+ "step": 275
1976
+ },
1977
+ {
1978
+ "epoch": 0.8437141765380206,
1979
+ "grad_norm": 0.27390384674072266,
1980
+ "learning_rate": 1.4420896956616698e-05,
1981
+ "loss": 0.7135312557220459,
1982
+ "step": 276
1983
+ },
1984
+ {
1985
+ "epoch": 0.8467711119602599,
1986
+ "grad_norm": 0.2811775505542755,
1987
+ "learning_rate": 1.4373073204588556e-05,
1988
+ "loss": 0.7489083409309387,
1989
+ "step": 277
1990
+ },
1991
+ {
1992
+ "epoch": 0.8498280473824991,
1993
+ "grad_norm": 0.2652314603328705,
1994
+ "learning_rate": 1.4325125464049725e-05,
1995
+ "loss": 0.752477765083313,
1996
+ "step": 278
1997
+ },
1998
+ {
1999
+ "epoch": 0.8528849828047382,
2000
+ "grad_norm": 0.2218960076570511,
2001
+ "learning_rate": 1.427705509444897e-05,
2002
+ "loss": 0.6534979939460754,
2003
+ "step": 279
2004
+ },
2005
+ {
2006
+ "epoch": 0.8559419182269774,
2007
+ "grad_norm": 0.23746474087238312,
2008
+ "learning_rate": 1.4228863458711915e-05,
2009
+ "loss": 0.7061883211135864,
2010
+ "step": 280
2011
+ },
2012
+ {
2013
+ "epoch": 0.8589988536492167,
2014
+ "grad_norm": 0.21507228910923004,
2015
+ "learning_rate": 1.4180551923202406e-05,
2016
+ "loss": 0.7044329643249512,
2017
+ "step": 281
2018
+ },
2019
+ {
2020
+ "epoch": 0.8620557890714559,
2021
+ "grad_norm": 0.2412186861038208,
2022
+ "learning_rate": 1.4132121857683782e-05,
2023
+ "loss": 0.706013023853302,
2024
+ "step": 282
2025
+ },
2026
+ {
2027
+ "epoch": 0.865112724493695,
2028
+ "grad_norm": 0.2832106947898865,
2029
+ "learning_rate": 1.4083574635280029e-05,
2030
+ "loss": 0.6572445631027222,
2031
+ "step": 283
2032
+ },
2033
+ {
2034
+ "epoch": 0.8681696599159343,
2035
+ "grad_norm": 0.21925900876522064,
2036
+ "learning_rate": 1.403491163243684e-05,
2037
+ "loss": 0.675041139125824,
2038
+ "step": 284
2039
+ },
2040
+ {
2041
+ "epoch": 0.8712265953381735,
2042
+ "grad_norm": 0.22488665580749512,
2043
+ "learning_rate": 1.3986134228882607e-05,
2044
+ "loss": 0.7474229335784912,
2045
+ "step": 285
2046
+ },
2047
+ {
2048
+ "epoch": 0.8742835307604127,
2049
+ "grad_norm": 0.2221737653017044,
2050
+ "learning_rate": 1.3937243807589291e-05,
2051
+ "loss": 0.7394901514053345,
2052
+ "step": 286
2053
+ },
2054
+ {
2055
+ "epoch": 0.8773404661826519,
2056
+ "grad_norm": 0.29034581780433655,
2057
+ "learning_rate": 1.388824175473321e-05,
2058
+ "loss": 0.7346636056900024,
2059
+ "step": 287
2060
+ },
2061
+ {
2062
+ "epoch": 0.8803974016048911,
2063
+ "grad_norm": 0.2580259144306183,
2064
+ "learning_rate": 1.383912945965574e-05,
2065
+ "loss": 0.8125481009483337,
2066
+ "step": 288
2067
+ },
2068
+ {
2069
+ "epoch": 0.8834543370271303,
2070
+ "grad_norm": 0.2533118724822998,
2071
+ "learning_rate": 1.3789908314823932e-05,
2072
+ "loss": 0.6768131256103516,
2073
+ "step": 289
2074
+ },
2075
+ {
2076
+ "epoch": 0.8865112724493696,
2077
+ "grad_norm": 0.2074616551399231,
2078
+ "learning_rate": 1.3740579715791017e-05,
2079
+ "loss": 0.7096269726753235,
2080
+ "step": 290
2081
+ },
2082
+ {
2083
+ "epoch": 0.8895682078716087,
2084
+ "grad_norm": 0.29789987206459045,
2085
+ "learning_rate": 1.3691145061156843e-05,
2086
+ "loss": 0.6973364353179932,
2087
+ "step": 291
2088
+ },
2089
+ {
2090
+ "epoch": 0.8926251432938479,
2091
+ "grad_norm": 0.2937224805355072,
2092
+ "learning_rate": 1.3641605752528225e-05,
2093
+ "loss": 0.7693608999252319,
2094
+ "step": 292
2095
+ },
2096
+ {
2097
+ "epoch": 0.8956820787160871,
2098
+ "grad_norm": 0.27355870604515076,
2099
+ "learning_rate": 1.3591963194479198e-05,
2100
+ "loss": 0.6870795488357544,
2101
+ "step": 293
2102
+ },
2103
+ {
2104
+ "epoch": 0.8987390141383264,
2105
+ "grad_norm": 0.22792251408100128,
2106
+ "learning_rate": 1.3542218794511212e-05,
2107
+ "loss": 0.7095532417297363,
2108
+ "step": 294
2109
+ },
2110
+ {
2111
+ "epoch": 0.9017959495605655,
2112
+ "grad_norm": 0.2855125665664673,
2113
+ "learning_rate": 1.3492373963013199e-05,
2114
+ "loss": 0.7536489963531494,
2115
+ "step": 295
2116
+ },
2117
+ {
2118
+ "epoch": 0.9048528849828047,
2119
+ "grad_norm": 0.24969056248664856,
2120
+ "learning_rate": 1.3442430113221602e-05,
2121
+ "loss": 0.7433043718338013,
2122
+ "step": 296
2123
+ },
2124
+ {
2125
+ "epoch": 0.907909820405044,
2126
+ "grad_norm": 0.24534980952739716,
2127
+ "learning_rate": 1.3392388661180303e-05,
2128
+ "loss": 0.7204138040542603,
2129
+ "step": 297
2130
+ },
2131
+ {
2132
+ "epoch": 0.9109667558272831,
2133
+ "grad_norm": 0.2540739178657532,
2134
+ "learning_rate": 1.3342251025700474e-05,
2135
+ "loss": 0.7114053964614868,
2136
+ "step": 298
2137
+ },
2138
+ {
2139
+ "epoch": 0.9140236912495223,
2140
+ "grad_norm": 0.2494630217552185,
2141
+ "learning_rate": 1.3292018628320346e-05,
2142
+ "loss": 0.7337151169776917,
2143
+ "step": 299
2144
+ },
2145
+ {
2146
+ "epoch": 0.9170806266717616,
2147
+ "grad_norm": 0.3079741597175598,
2148
+ "learning_rate": 1.3241692893264909e-05,
2149
+ "loss": 0.7486672401428223,
2150
+ "step": 300
2151
+ },
2152
+ {
2153
+ "epoch": 0.9170806266717616,
2154
+ "eval_loss": 0.7063615918159485,
2155
+ "eval_runtime": 882.246,
2156
+ "eval_samples_per_second": 0.683,
2157
+ "eval_steps_per_second": 0.683,
2158
+ "step": 300
2159
+ }
2160
+ ],
2161
+ "logging_steps": 1,
2162
+ "max_steps": 656,
2163
+ "num_input_tokens_seen": 0,
2164
+ "num_train_epochs": 2,
2165
+ "save_steps": 100,
2166
+ "stateful_callbacks": {
2167
+ "TrainerControl": {
2168
+ "args": {
2169
+ "should_epoch_stop": false,
2170
+ "should_evaluate": false,
2171
+ "should_log": false,
2172
+ "should_save": true,
2173
+ "should_training_stop": false
2174
+ },
2175
+ "attributes": {}
2176
+ }
2177
+ },
2178
+ "total_flos": 1.6564378492993536e+18,
2179
+ "train_batch_size": 1,
2180
+ "trial_name": null,
2181
+ "trial_params": null
2182
+ }
cpt_qwen_14B/checkpoints/checkpoint-300/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6a8e308e47eb936f678712445b19ddc52638f354c37c813ecaa432f69120a2e
3
+ size 5201
cpt_qwen_14B/checkpoints/checkpoint-400/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/Models/Qwen2.5-Coder-14B
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:/workspace/Models/Qwen2.5-Coder-14B
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
cpt_qwen_14B/checkpoints/checkpoint-400/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "/workspace/Models/Qwen2.5-Coder-14B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "v_proj",
33
+ "q_proj",
34
+ "o_proj",
35
+ "k_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
cpt_qwen_14B/checkpoints/checkpoint-400/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc35c49614987939ccf4e73733555cd12c1d55e627db1e2f836d2341ca58bd60
3
+ size 201378736
cpt_qwen_14B/checkpoints/checkpoint-400/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
cpt_qwen_14B/checkpoints/checkpoint-400/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be4e4b976fbe87d4881b1cf906d435f007d6f2c6775114b03ca77718ebb3e099
3
+ size 102698855
cpt_qwen_14B/checkpoints/checkpoint-400/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54f252272095f008d9c3adc5557d863356ce442db9820129678a2dbdeb028a30
3
+ size 14645
cpt_qwen_14B/checkpoints/checkpoint-400/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c58aed3093aa166a91339aaba61e306c07de6b4c4581b6fcc79de090acb72707
3
+ size 1465
cpt_qwen_14B/checkpoints/checkpoint-400/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
3
+ size 11421892
cpt_qwen_14B/checkpoints/checkpoint-400/tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|endoftext|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": true,
24
+ "model_max_length": 32768,
25
+ "pad_token": "<|endoftext|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }