gg676 commited on
Commit
40c75ca
·
verified ·
1 Parent(s): 7beb7e3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/README.md +209 -0
  2. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/adapter_config.json +46 -0
  3. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/added_tokens.json +24 -0
  4. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/chat_template.jinja +54 -0
  5. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/latest +1 -0
  6. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/merges.txt +0 -0
  7. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/special_tokens_map.json +31 -0
  8. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/tokenizer_config.json +207 -0
  9. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/trainer_state.json +834 -0
  10. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/vocab.json +0 -0
  11. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/zero_to_fp32.py +760 -0
  12. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/README.md +209 -0
  13. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/adapter_config.json +46 -0
  14. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/added_tokens.json +24 -0
  15. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/chat_template.jinja +54 -0
  16. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/latest +1 -0
  17. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/merges.txt +0 -0
  18. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/special_tokens_map.json +31 -0
  19. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/tokenizer_config.json +207 -0
  20. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/trainer_state.json +1034 -0
  21. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/vocab.json +0 -0
  22. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/zero_to_fp32.py +760 -0
  23. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/adapter_config.json +46 -0
  24. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/added_tokens.json +24 -0
  25. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/chat_template.jinja +54 -0
  26. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/latest +1 -0
  27. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/merges.txt +0 -0
  28. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/special_tokens_map.json +31 -0
  29. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/vocab.json +0 -0
  30. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/zero_to_fp32.py +760 -0
  31. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/adapter_config.json +46 -0
  32. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/added_tokens.json +24 -0
  33. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/chat_template.jinja +54 -0
  34. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/latest +1 -0
  35. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/merges.txt +0 -0
  36. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/special_tokens_map.json +31 -0
  37. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/tokenizer_config.json +207 -0
  38. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/trainer_state.json +1434 -0
  39. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/vocab.json +0 -0
  40. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/zero_to_fp32.py +760 -0
  41. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/adapter_config.json +46 -0
  42. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/added_tokens.json +24 -0
  43. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/chat_template.jinja +54 -0
  44. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/merges.txt +0 -0
  45. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/special_tokens_map.json +31 -0
  46. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/tokenizer_config.json +207 -0
  47. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/trainer_state.json +1534 -0
  48. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/vocab.json +0 -0
  49. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/zero_to_fp32.py +760 -0
  50. qwen2.5-coder-3B_r64_lr1e-4/checkpoint-8000/special_tokens_map.json +31 -0
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-Coder-3B
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Qwen/Qwen2.5-Coder-3B
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.18.0
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-3B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 128,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 64,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "up_proj",
33
+ "q_proj",
34
+ "o_proj",
35
+ "down_proj",
36
+ "gate_proj",
37
+ "v_proj",
38
+ "k_proj"
39
+ ],
40
+ "target_parameters": null,
41
+ "task_type": "CAUSAL_LM",
42
+ "trainable_token_indices": null,
43
+ "use_dora": false,
44
+ "use_qalora": false,
45
+ "use_rslora": false
46
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step4000
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|endoftext|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/trainer_state.json ADDED
@@ -0,0 +1,834 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.8,
6
+ "eval_steps": 500,
7
+ "global_step": 4000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 0.166884765625,
14
+ "epoch": 0.01,
15
+ "grad_norm": 0.09714163839817047,
16
+ "learning_rate": 1.6333333333333335e-05,
17
+ "loss": 0.1512,
18
+ "mean_token_accuracy": 0.966504562497139,
19
+ "num_tokens": 973322.0,
20
+ "step": 50
21
+ },
22
+ {
23
+ "entropy": 0.048140869140625,
24
+ "epoch": 0.02,
25
+ "grad_norm": 0.057216521352529526,
26
+ "learning_rate": 3.3e-05,
27
+ "loss": 0.0424,
28
+ "mean_token_accuracy": 0.9896974349021912,
29
+ "num_tokens": 1965908.0,
30
+ "step": 100
31
+ },
32
+ {
33
+ "entropy": 0.029632568359375,
34
+ "epoch": 0.03,
35
+ "grad_norm": 0.054412033408880234,
36
+ "learning_rate": 4.966666666666667e-05,
37
+ "loss": 0.0248,
38
+ "mean_token_accuracy": 0.9936319047212601,
39
+ "num_tokens": 2931943.0,
40
+ "step": 150
41
+ },
42
+ {
43
+ "entropy": 0.02478515625,
44
+ "epoch": 0.04,
45
+ "grad_norm": 0.08800562471151352,
46
+ "learning_rate": 6.633333333333334e-05,
47
+ "loss": 0.0214,
48
+ "mean_token_accuracy": 0.9943305045366287,
49
+ "num_tokens": 3906979.0,
50
+ "step": 200
51
+ },
52
+ {
53
+ "entropy": 0.0224652099609375,
54
+ "epoch": 0.05,
55
+ "grad_norm": 0.041793134063482285,
56
+ "learning_rate": 8.3e-05,
57
+ "loss": 0.0191,
58
+ "mean_token_accuracy": 0.9948792272806167,
59
+ "num_tokens": 4891791.0,
60
+ "step": 250
61
+ },
62
+ {
63
+ "entropy": 0.01969482421875,
64
+ "epoch": 0.06,
65
+ "grad_norm": 0.04181526601314545,
66
+ "learning_rate": 9.966666666666667e-05,
67
+ "loss": 0.0173,
68
+ "mean_token_accuracy": 0.995435175895691,
69
+ "num_tokens": 5923811.0,
70
+ "step": 300
71
+ },
72
+ {
73
+ "entropy": 0.019368896484375,
74
+ "epoch": 0.07,
75
+ "grad_norm": 0.03995412588119507,
76
+ "learning_rate": 9.999370378817499e-05,
77
+ "loss": 0.0164,
78
+ "mean_token_accuracy": 0.9956372672319412,
79
+ "num_tokens": 6923169.0,
80
+ "step": 350
81
+ },
82
+ {
83
+ "entropy": 0.0204400634765625,
84
+ "epoch": 0.08,
85
+ "grad_norm": 0.06872096657752991,
86
+ "learning_rate": 9.997430021636957e-05,
87
+ "loss": 0.0163,
88
+ "mean_token_accuracy": 0.9956023478507996,
89
+ "num_tokens": 7901360.0,
90
+ "step": 400
91
+ },
92
+ {
93
+ "entropy": 0.0186175537109375,
94
+ "epoch": 0.09,
95
+ "grad_norm": 0.037068989127874374,
96
+ "learning_rate": 9.994179175045023e-05,
97
+ "loss": 0.0157,
98
+ "mean_token_accuracy": 0.9958296239376068,
99
+ "num_tokens": 8899074.0,
100
+ "step": 450
101
+ },
102
+ {
103
+ "entropy": 0.0173492431640625,
104
+ "epoch": 0.1,
105
+ "grad_norm": 0.014651700854301453,
106
+ "learning_rate": 9.989618691519873e-05,
107
+ "loss": 0.015,
108
+ "mean_token_accuracy": 0.9959587389230729,
109
+ "num_tokens": 9909340.0,
110
+ "step": 500
111
+ },
112
+ {
113
+ "entropy": 0.0183203125,
114
+ "epoch": 0.11,
115
+ "grad_norm": 0.016151444986462593,
116
+ "learning_rate": 9.983749766969271e-05,
117
+ "loss": 0.0151,
118
+ "mean_token_accuracy": 0.9959770923852921,
119
+ "num_tokens": 10894801.0,
120
+ "step": 550
121
+ },
122
+ {
123
+ "entropy": 0.02047607421875,
124
+ "epoch": 0.12,
125
+ "grad_norm": 0.021302781999111176,
126
+ "learning_rate": 9.976573940416966e-05,
127
+ "loss": 0.0157,
128
+ "mean_token_accuracy": 0.9958055526018142,
129
+ "num_tokens": 11862148.0,
130
+ "step": 600
131
+ },
132
+ {
133
+ "entropy": 0.017862548828125,
134
+ "epoch": 0.13,
135
+ "grad_norm": 0.013205700553953648,
136
+ "learning_rate": 9.968093093599106e-05,
137
+ "loss": 0.0152,
138
+ "mean_token_accuracy": 0.9958789277076722,
139
+ "num_tokens": 12851463.0,
140
+ "step": 650
141
+ },
142
+ {
143
+ "entropy": 0.0182177734375,
144
+ "epoch": 0.14,
145
+ "grad_norm": 0.013120726682245731,
146
+ "learning_rate": 9.958309450470784e-05,
147
+ "loss": 0.0144,
148
+ "mean_token_accuracy": 0.9962116169929505,
149
+ "num_tokens": 13849228.0,
150
+ "step": 700
151
+ },
152
+ {
153
+ "entropy": 0.0179010009765625,
154
+ "epoch": 0.15,
155
+ "grad_norm": 0.014701537787914276,
156
+ "learning_rate": 9.947225576622847e-05,
157
+ "loss": 0.0146,
158
+ "mean_token_accuracy": 0.9961142331361771,
159
+ "num_tokens": 14812166.0,
160
+ "step": 750
161
+ },
162
+ {
163
+ "entropy": 0.0173638916015625,
164
+ "epoch": 0.16,
165
+ "grad_norm": 0.033504419028759,
166
+ "learning_rate": 9.934844378609117e-05,
167
+ "loss": 0.0145,
168
+ "mean_token_accuracy": 0.9961155968904495,
169
+ "num_tokens": 15789120.0,
170
+ "step": 800
171
+ },
172
+ {
173
+ "entropy": 0.0183349609375,
174
+ "epoch": 0.17,
175
+ "grad_norm": 0.010470729321241379,
176
+ "learning_rate": 9.921169103184187e-05,
177
+ "loss": 0.0144,
178
+ "mean_token_accuracy": 0.9961740493774414,
179
+ "num_tokens": 16764645.0,
180
+ "step": 850
181
+ },
182
+ {
183
+ "entropy": 0.016279296875,
184
+ "epoch": 0.18,
185
+ "grad_norm": 0.02781980112195015,
186
+ "learning_rate": 9.906203336452029e-05,
187
+ "loss": 0.0142,
188
+ "mean_token_accuracy": 0.9962105673551559,
189
+ "num_tokens": 17730003.0,
190
+ "step": 900
191
+ },
192
+ {
193
+ "entropy": 0.0166461181640625,
194
+ "epoch": 0.19,
195
+ "grad_norm": 0.020350394770503044,
196
+ "learning_rate": 9.889951002925593e-05,
197
+ "loss": 0.0141,
198
+ "mean_token_accuracy": 0.9962483876943589,
199
+ "num_tokens": 18723861.0,
200
+ "step": 950
201
+ },
202
+ {
203
+ "entropy": 0.0165386962890625,
204
+ "epoch": 0.2,
205
+ "grad_norm": 0.013212243095040321,
206
+ "learning_rate": 9.872416364497675e-05,
207
+ "loss": 0.0141,
208
+ "mean_token_accuracy": 0.9961817175149917,
209
+ "num_tokens": 19695091.0,
210
+ "step": 1000
211
+ },
212
+ {
213
+ "entropy": 0.017391357421875,
214
+ "epoch": 0.21,
215
+ "grad_norm": 0.014041919261217117,
216
+ "learning_rate": 9.853604019323301e-05,
217
+ "loss": 0.0141,
218
+ "mean_token_accuracy": 0.9962570595741272,
219
+ "num_tokens": 20662255.0,
220
+ "step": 1050
221
+ },
222
+ {
223
+ "entropy": 0.016884765625,
224
+ "epoch": 0.22,
225
+ "grad_norm": 0.017174789682030678,
226
+ "learning_rate": 9.833518900613956e-05,
227
+ "loss": 0.0138,
228
+ "mean_token_accuracy": 0.9962636953592301,
229
+ "num_tokens": 21642092.0,
230
+ "step": 1100
231
+ },
232
+ {
233
+ "entropy": 0.019366455078125,
234
+ "epoch": 0.23,
235
+ "grad_norm": 0.017337976023554802,
236
+ "learning_rate": 9.812166275343917e-05,
237
+ "loss": 0.0143,
238
+ "mean_token_accuracy": 0.996159919500351,
239
+ "num_tokens": 22607505.0,
240
+ "step": 1150
241
+ },
242
+ {
243
+ "entropy": 0.0164886474609375,
244
+ "epoch": 0.24,
245
+ "grad_norm": 0.018486863002181053,
246
+ "learning_rate": 9.789551742869096e-05,
247
+ "loss": 0.0137,
248
+ "mean_token_accuracy": 0.9963399863243103,
249
+ "num_tokens": 23584579.0,
250
+ "step": 1200
251
+ },
252
+ {
253
+ "entropy": 0.016282958984375,
254
+ "epoch": 0.25,
255
+ "grad_norm": 0.009195365011692047,
256
+ "learning_rate": 9.765681233458693e-05,
257
+ "loss": 0.0136,
258
+ "mean_token_accuracy": 0.9963367432355881,
259
+ "num_tokens": 24580285.0,
260
+ "step": 1250
261
+ },
262
+ {
263
+ "entropy": 0.0197235107421875,
264
+ "epoch": 0.26,
265
+ "grad_norm": 0.01205432415008545,
266
+ "learning_rate": 9.740561006740098e-05,
267
+ "loss": 0.0144,
268
+ "mean_token_accuracy": 0.996049216389656,
269
+ "num_tokens": 25525513.0,
270
+ "step": 1300
271
+ },
272
+ {
273
+ "entropy": 0.0153009033203125,
274
+ "epoch": 0.27,
275
+ "grad_norm": 0.01174497976899147,
276
+ "learning_rate": 9.7141976500574e-05,
277
+ "loss": 0.0135,
278
+ "mean_token_accuracy": 0.9964245760440826,
279
+ "num_tokens": 26523426.0,
280
+ "step": 1350
281
+ },
282
+ {
283
+ "entropy": 0.0169378662109375,
284
+ "epoch": 0.28,
285
+ "grad_norm": 0.014242298901081085,
286
+ "learning_rate": 9.686598076743981e-05,
287
+ "loss": 0.0139,
288
+ "mean_token_accuracy": 0.9962529343366623,
289
+ "num_tokens": 27493091.0,
290
+ "step": 1400
291
+ },
292
+ {
293
+ "entropy": 0.017811279296875,
294
+ "epoch": 0.29,
295
+ "grad_norm": 0.0160844549536705,
296
+ "learning_rate": 9.657769524309605e-05,
297
+ "loss": 0.0145,
298
+ "mean_token_accuracy": 0.996181645989418,
299
+ "num_tokens": 28458553.0,
300
+ "step": 1450
301
+ },
302
+ {
303
+ "entropy": 0.0169232177734375,
304
+ "epoch": 0.3,
305
+ "grad_norm": 0.03241236135363579,
306
+ "learning_rate": 9.627719552542516e-05,
307
+ "loss": 0.014,
308
+ "mean_token_accuracy": 0.9962622857093811,
309
+ "num_tokens": 29430497.0,
310
+ "step": 1500
311
+ },
312
+ {
313
+ "entropy": 0.0163720703125,
314
+ "epoch": 0.31,
315
+ "grad_norm": 0.01750756986439228,
316
+ "learning_rate": 9.596456041527001e-05,
317
+ "loss": 0.0136,
318
+ "mean_token_accuracy": 0.9964328509569168,
319
+ "num_tokens": 30414152.0,
320
+ "step": 1550
321
+ },
322
+ {
323
+ "entropy": 0.017760009765625,
324
+ "epoch": 0.32,
325
+ "grad_norm": 0.02182549238204956,
326
+ "learning_rate": 9.563987189576991e-05,
327
+ "loss": 0.0147,
328
+ "mean_token_accuracy": 0.9961263346672058,
329
+ "num_tokens": 31396417.0,
330
+ "step": 1600
331
+ },
332
+ {
333
+ "entropy": 0.0171954345703125,
334
+ "epoch": 0.33,
335
+ "grad_norm": 0.011301269754767418,
336
+ "learning_rate": 9.530321511086183e-05,
337
+ "loss": 0.0139,
338
+ "mean_token_accuracy": 0.9962880289554596,
339
+ "num_tokens": 32364443.0,
340
+ "step": 1650
341
+ },
342
+ {
343
+ "entropy": 0.015126953125,
344
+ "epoch": 0.34,
345
+ "grad_norm": 0.009061276912689209,
346
+ "learning_rate": 9.495467834295291e-05,
347
+ "loss": 0.0134,
348
+ "mean_token_accuracy": 0.9964066076278687,
349
+ "num_tokens": 33374821.0,
350
+ "step": 1700
351
+ },
352
+ {
353
+ "entropy": 0.018043212890625,
354
+ "epoch": 0.35,
355
+ "grad_norm": 0.014127642847597599,
356
+ "learning_rate": 9.459435298976998e-05,
357
+ "loss": 0.0137,
358
+ "mean_token_accuracy": 0.9963885354995727,
359
+ "num_tokens": 34338570.0,
360
+ "step": 1750
361
+ },
362
+ {
363
+ "entropy": 0.0155584716796875,
364
+ "epoch": 0.36,
365
+ "grad_norm": 0.00913177989423275,
366
+ "learning_rate": 9.422233354039198e-05,
367
+ "loss": 0.0132,
368
+ "mean_token_accuracy": 0.9965262812376022,
369
+ "num_tokens": 35333253.0,
370
+ "step": 1800
371
+ },
372
+ {
373
+ "entropy": 0.0177911376953125,
374
+ "epoch": 0.37,
375
+ "grad_norm": 0.009629561565816402,
376
+ "learning_rate": 9.383871755047198e-05,
377
+ "loss": 0.0131,
378
+ "mean_token_accuracy": 0.9964758467674255,
379
+ "num_tokens": 36316255.0,
380
+ "step": 1850
381
+ },
382
+ {
383
+ "entropy": 0.0159539794921875,
384
+ "epoch": 0.38,
385
+ "grad_norm": 0.01395715307444334,
386
+ "learning_rate": 9.344360561665476e-05,
387
+ "loss": 0.0132,
388
+ "mean_token_accuracy": 0.9964872688055039,
389
+ "num_tokens": 37303247.0,
390
+ "step": 1900
391
+ },
392
+ {
393
+ "entropy": 0.0152783203125,
394
+ "epoch": 0.39,
395
+ "grad_norm": 0.007368483114987612,
396
+ "learning_rate": 9.30371013501972e-05,
397
+ "loss": 0.013,
398
+ "mean_token_accuracy": 0.9965988719463348,
399
+ "num_tokens": 38287463.0,
400
+ "step": 1950
401
+ },
402
+ {
403
+ "entropy": 0.0158270263671875,
404
+ "epoch": 0.4,
405
+ "grad_norm": 0.01508031040430069,
406
+ "learning_rate": 9.261931134979791e-05,
407
+ "loss": 0.0133,
408
+ "mean_token_accuracy": 0.9965020543336869,
409
+ "num_tokens": 39273383.0,
410
+ "step": 2000
411
+ },
412
+ {
413
+ "entropy": 0.0167919921875,
414
+ "epoch": 0.41,
415
+ "grad_norm": 0.009929426945745945,
416
+ "learning_rate": 9.219034517364369e-05,
417
+ "loss": 0.0131,
418
+ "mean_token_accuracy": 0.9965337771177292,
419
+ "num_tokens": 40251665.0,
420
+ "step": 2050
421
+ },
422
+ {
423
+ "entropy": 0.0153082275390625,
424
+ "epoch": 0.42,
425
+ "grad_norm": 0.007909618318080902,
426
+ "learning_rate": 9.17503153106797e-05,
427
+ "loss": 0.0129,
428
+ "mean_token_accuracy": 0.9965605139732361,
429
+ "num_tokens": 41243731.0,
430
+ "step": 2100
431
+ },
432
+ {
433
+ "entropy": 0.0154461669921875,
434
+ "epoch": 0.43,
435
+ "grad_norm": 0.015921294689178467,
436
+ "learning_rate": 9.129933715111125e-05,
437
+ "loss": 0.0129,
438
+ "mean_token_accuracy": 0.9964988273382187,
439
+ "num_tokens": 42247346.0,
440
+ "step": 2150
441
+ },
442
+ {
443
+ "entropy": 0.01652587890625,
444
+ "epoch": 0.44,
445
+ "grad_norm": 0.006036434322595596,
446
+ "learning_rate": 9.083752895614464e-05,
447
+ "loss": 0.0133,
448
+ "mean_token_accuracy": 0.9964230692386628,
449
+ "num_tokens": 43220001.0,
450
+ "step": 2200
451
+ },
452
+ {
453
+ "entropy": 0.015128173828125,
454
+ "epoch": 0.45,
455
+ "grad_norm": 0.009778963401913643,
456
+ "learning_rate": 9.03650118269753e-05,
457
+ "loss": 0.0131,
458
+ "mean_token_accuracy": 0.9965469115972518,
459
+ "num_tokens": 44205197.0,
460
+ "step": 2250
461
+ },
462
+ {
463
+ "entropy": 0.015570068359375,
464
+ "epoch": 0.46,
465
+ "grad_norm": 0.014646470546722412,
466
+ "learning_rate": 8.988190967303101e-05,
467
+ "loss": 0.0131,
468
+ "mean_token_accuracy": 0.9965356832742691,
469
+ "num_tokens": 45185181.0,
470
+ "step": 2300
471
+ },
472
+ {
473
+ "entropy": 0.01632568359375,
474
+ "epoch": 0.47,
475
+ "grad_norm": 0.006433142349123955,
476
+ "learning_rate": 8.938834917947889e-05,
477
+ "loss": 0.0132,
478
+ "mean_token_accuracy": 0.9965211725234986,
479
+ "num_tokens": 46170545.0,
480
+ "step": 2350
481
+ },
482
+ {
483
+ "entropy": 0.01555419921875,
484
+ "epoch": 0.48,
485
+ "grad_norm": 0.007329673506319523,
486
+ "learning_rate": 8.888445977400435e-05,
487
+ "loss": 0.0131,
488
+ "mean_token_accuracy": 0.9965089631080627,
489
+ "num_tokens": 47152165.0,
490
+ "step": 2400
491
+ },
492
+ {
493
+ "entropy": 0.0156890869140625,
494
+ "epoch": 0.49,
495
+ "grad_norm": 0.010034145787358284,
496
+ "learning_rate": 8.837037359287092e-05,
497
+ "loss": 0.0131,
498
+ "mean_token_accuracy": 0.9965383523702621,
499
+ "num_tokens": 48122023.0,
500
+ "step": 2450
501
+ },
502
+ {
503
+ "entropy": 0.0163201904296875,
504
+ "epoch": 0.5,
505
+ "grad_norm": 0.008338450454175472,
506
+ "learning_rate": 8.784622544626985e-05,
507
+ "loss": 0.0131,
508
+ "mean_token_accuracy": 0.9965311539173126,
509
+ "num_tokens": 49094218.0,
510
+ "step": 2500
511
+ },
512
+ {
513
+ "entropy": 0.0152789306640625,
514
+ "epoch": 0.51,
515
+ "grad_norm": 0.007771766744554043,
516
+ "learning_rate": 8.731215278296843e-05,
517
+ "loss": 0.0128,
518
+ "mean_token_accuracy": 0.9965461915731431,
519
+ "num_tokens": 50090248.0,
520
+ "step": 2550
521
+ },
522
+ {
523
+ "entropy": 0.0171893310546875,
524
+ "epoch": 0.52,
525
+ "grad_norm": 0.010534558445215225,
526
+ "learning_rate": 8.676829565426646e-05,
527
+ "loss": 0.0132,
528
+ "mean_token_accuracy": 0.9964153295755387,
529
+ "num_tokens": 51055771.0,
530
+ "step": 2600
531
+ },
532
+ {
533
+ "entropy": 0.0148272705078125,
534
+ "epoch": 0.53,
535
+ "grad_norm": 0.007072142791002989,
536
+ "learning_rate": 8.62147966772702e-05,
537
+ "loss": 0.0129,
538
+ "mean_token_accuracy": 0.9966214698553085,
539
+ "num_tokens": 52051029.0,
540
+ "step": 2650
541
+ },
542
+ {
543
+ "entropy": 0.0146075439453125,
544
+ "epoch": 0.54,
545
+ "grad_norm": 0.007414950989186764,
546
+ "learning_rate": 8.565180099749355e-05,
547
+ "loss": 0.0125,
548
+ "mean_token_accuracy": 0.9966325616836548,
549
+ "num_tokens": 53064519.0,
550
+ "step": 2700
551
+ },
552
+ {
553
+ "entropy": 0.0161309814453125,
554
+ "epoch": 0.55,
555
+ "grad_norm": 0.0070669627748429775,
556
+ "learning_rate": 8.50794562507961e-05,
557
+ "loss": 0.013,
558
+ "mean_token_accuracy": 0.9964843678474427,
559
+ "num_tokens": 54043844.0,
560
+ "step": 2750
561
+ },
562
+ {
563
+ "entropy": 0.015706787109375,
564
+ "epoch": 0.56,
565
+ "grad_norm": 0.005744470749050379,
566
+ "learning_rate": 8.449791252466819e-05,
567
+ "loss": 0.013,
568
+ "mean_token_accuracy": 0.9965711253881454,
569
+ "num_tokens": 55026422.0,
570
+ "step": 2800
571
+ },
572
+ {
573
+ "entropy": 0.01641357421875,
574
+ "epoch": 0.57,
575
+ "grad_norm": 0.0069727362133562565,
576
+ "learning_rate": 8.390732231887314e-05,
577
+ "loss": 0.0129,
578
+ "mean_token_accuracy": 0.996541822552681,
579
+ "num_tokens": 56009908.0,
580
+ "step": 2850
581
+ },
582
+ {
583
+ "entropy": 0.016571044921875,
584
+ "epoch": 0.58,
585
+ "grad_norm": 0.007201408036053181,
586
+ "learning_rate": 8.330784050545672e-05,
587
+ "loss": 0.0133,
588
+ "mean_token_accuracy": 0.9964863443374634,
589
+ "num_tokens": 56948904.0,
590
+ "step": 2900
591
+ },
592
+ {
593
+ "entropy": 0.0156451416015625,
594
+ "epoch": 0.59,
595
+ "grad_norm": 0.005370237864553928,
596
+ "learning_rate": 8.269962428813474e-05,
597
+ "loss": 0.0128,
598
+ "mean_token_accuracy": 0.9966201782226562,
599
+ "num_tokens": 57925745.0,
600
+ "step": 2950
601
+ },
602
+ {
603
+ "entropy": 0.01558837890625,
604
+ "epoch": 0.6,
605
+ "grad_norm": 0.005006297491490841,
606
+ "learning_rate": 8.208283316106902e-05,
607
+ "loss": 0.0127,
608
+ "mean_token_accuracy": 0.9966268092393875,
609
+ "num_tokens": 58902798.0,
610
+ "step": 3000
611
+ },
612
+ {
613
+ "entropy": 0.015733642578125,
614
+ "epoch": 0.61,
615
+ "grad_norm": 0.06520090997219086,
616
+ "learning_rate": 8.145762886704286e-05,
617
+ "loss": 0.013,
618
+ "mean_token_accuracy": 0.9965688252449035,
619
+ "num_tokens": 59860890.0,
620
+ "step": 3050
621
+ },
622
+ {
623
+ "entropy": 0.015631103515625,
624
+ "epoch": 0.62,
625
+ "grad_norm": 0.017850181087851524,
626
+ "learning_rate": 8.082417535504683e-05,
627
+ "loss": 0.0134,
628
+ "mean_token_accuracy": 0.9963969469070435,
629
+ "num_tokens": 60845473.0,
630
+ "step": 3100
631
+ },
632
+ {
633
+ "entropy": 0.0175927734375,
634
+ "epoch": 0.63,
635
+ "grad_norm": 0.017514687031507492,
636
+ "learning_rate": 8.018263873728585e-05,
637
+ "loss": 0.014,
638
+ "mean_token_accuracy": 0.9963253819942475,
639
+ "num_tokens": 61784192.0,
640
+ "step": 3150
641
+ },
642
+ {
643
+ "entropy": 0.0167742919921875,
644
+ "epoch": 0.64,
645
+ "grad_norm": 0.008665528148412704,
646
+ "learning_rate": 7.953318724561932e-05,
647
+ "loss": 0.0132,
648
+ "mean_token_accuracy": 0.9964195656776428,
649
+ "num_tokens": 62766207.0,
650
+ "step": 3200
651
+ },
652
+ {
653
+ "entropy": 0.0157501220703125,
654
+ "epoch": 0.65,
655
+ "grad_norm": 0.019448991864919662,
656
+ "learning_rate": 7.887599118744509e-05,
657
+ "loss": 0.0128,
658
+ "mean_token_accuracy": 0.9966711962223053,
659
+ "num_tokens": 63762073.0,
660
+ "step": 3250
661
+ },
662
+ {
663
+ "entropy": 0.0180609130859375,
664
+ "epoch": 0.66,
665
+ "grad_norm": 0.004935233388096094,
666
+ "learning_rate": 7.821122290103938e-05,
667
+ "loss": 0.0133,
668
+ "mean_token_accuracy": 0.9964338165521621,
669
+ "num_tokens": 64710323.0,
670
+ "step": 3300
671
+ },
672
+ {
673
+ "entropy": 0.015982666015625,
674
+ "epoch": 0.67,
675
+ "grad_norm": 0.004395989701151848,
676
+ "learning_rate": 7.753905671036403e-05,
677
+ "loss": 0.0128,
678
+ "mean_token_accuracy": 0.9966453295946122,
679
+ "num_tokens": 65703355.0,
680
+ "step": 3350
681
+ },
682
+ {
683
+ "entropy": 0.0160992431640625,
684
+ "epoch": 0.68,
685
+ "grad_norm": 0.01108239684253931,
686
+ "learning_rate": 7.685966887935309e-05,
687
+ "loss": 0.0128,
688
+ "mean_token_accuracy": 0.996572095155716,
689
+ "num_tokens": 66693270.0,
690
+ "step": 3400
691
+ },
692
+ {
693
+ "entropy": 0.0156439208984375,
694
+ "epoch": 0.69,
695
+ "grad_norm": 0.007867238484323025,
696
+ "learning_rate": 7.617323756569053e-05,
697
+ "loss": 0.013,
698
+ "mean_token_accuracy": 0.9965278363227844,
699
+ "num_tokens": 67662491.0,
700
+ "step": 3450
701
+ },
702
+ {
703
+ "entropy": 0.0155462646484375,
704
+ "epoch": 0.7,
705
+ "grad_norm": 0.006034135352820158,
706
+ "learning_rate": 7.547994277409168e-05,
707
+ "loss": 0.0128,
708
+ "mean_token_accuracy": 0.9966409939527512,
709
+ "num_tokens": 68643197.0,
710
+ "step": 3500
711
+ },
712
+ {
713
+ "entropy": 0.015638427734375,
714
+ "epoch": 0.71,
715
+ "grad_norm": 0.007907884195446968,
716
+ "learning_rate": 7.477996630909994e-05,
717
+ "loss": 0.0127,
718
+ "mean_token_accuracy": 0.9966798382997513,
719
+ "num_tokens": 69638359.0,
720
+ "step": 3550
721
+ },
722
+ {
723
+ "entropy": 0.0161322021484375,
724
+ "epoch": 0.72,
725
+ "grad_norm": 0.007897689007222652,
726
+ "learning_rate": 7.40734917274118e-05,
727
+ "loss": 0.0127,
728
+ "mean_token_accuracy": 0.9966677987575531,
729
+ "num_tokens": 70621331.0,
730
+ "step": 3600
731
+ },
732
+ {
733
+ "entropy": 0.015335693359375,
734
+ "epoch": 0.73,
735
+ "grad_norm": 0.006248582154512405,
736
+ "learning_rate": 7.336070428974218e-05,
737
+ "loss": 0.0127,
738
+ "mean_token_accuracy": 0.9966181468963623,
739
+ "num_tokens": 71609172.0,
740
+ "step": 3650
741
+ },
742
+ {
743
+ "entropy": 0.0166754150390625,
744
+ "epoch": 0.74,
745
+ "grad_norm": 0.009811230935156345,
746
+ "learning_rate": 7.26417909122431e-05,
747
+ "loss": 0.0127,
748
+ "mean_token_accuracy": 0.9965909200906754,
749
+ "num_tokens": 72589978.0,
750
+ "step": 3700
751
+ },
752
+ {
753
+ "entropy": 0.0143695068359375,
754
+ "epoch": 0.75,
755
+ "grad_norm": 0.012172735296189785,
756
+ "learning_rate": 7.191694011748818e-05,
757
+ "loss": 0.0131,
758
+ "mean_token_accuracy": 0.9965182906389236,
759
+ "num_tokens": 73606622.0,
760
+ "step": 3750
761
+ },
762
+ {
763
+ "entropy": 0.0156829833984375,
764
+ "epoch": 0.76,
765
+ "grad_norm": 0.004879123531281948,
766
+ "learning_rate": 7.118634198503571e-05,
767
+ "loss": 0.0129,
768
+ "mean_token_accuracy": 0.9965479630231857,
769
+ "num_tokens": 74626982.0,
770
+ "step": 3800
771
+ },
772
+ {
773
+ "entropy": 0.016346435546875,
774
+ "epoch": 0.77,
775
+ "grad_norm": 0.005593061912804842,
776
+ "learning_rate": 7.045018810158375e-05,
777
+ "loss": 0.0127,
778
+ "mean_token_accuracy": 0.9966219502687454,
779
+ "num_tokens": 75622857.0,
780
+ "step": 3850
781
+ },
782
+ {
783
+ "entropy": 0.015889892578125,
784
+ "epoch": 0.78,
785
+ "grad_norm": 0.006423663813620806,
786
+ "learning_rate": 6.97086715107298e-05,
787
+ "loss": 0.0128,
788
+ "mean_token_accuracy": 0.9966537064313888,
789
+ "num_tokens": 76601614.0,
790
+ "step": 3900
791
+ },
792
+ {
793
+ "entropy": 0.015166015625,
794
+ "epoch": 0.79,
795
+ "grad_norm": 0.009724115021526814,
796
+ "learning_rate": 6.896198666234833e-05,
797
+ "loss": 0.0125,
798
+ "mean_token_accuracy": 0.9966633880138397,
799
+ "num_tokens": 77598059.0,
800
+ "step": 3950
801
+ },
802
+ {
803
+ "entropy": 0.0157177734375,
804
+ "epoch": 0.8,
805
+ "grad_norm": 0.014047837816178799,
806
+ "learning_rate": 6.821032936159986e-05,
807
+ "loss": 0.0128,
808
+ "mean_token_accuracy": 0.996613291501999,
809
+ "num_tokens": 78570180.0,
810
+ "step": 4000
811
+ }
812
+ ],
813
+ "logging_steps": 50,
814
+ "max_steps": 10000,
815
+ "num_input_tokens_seen": 0,
816
+ "num_train_epochs": 2,
817
+ "save_steps": 500,
818
+ "stateful_callbacks": {
819
+ "TrainerControl": {
820
+ "args": {
821
+ "should_epoch_stop": false,
822
+ "should_evaluate": false,
823
+ "should_log": false,
824
+ "should_save": true,
825
+ "should_training_stop": false
826
+ },
827
+ "attributes": {}
828
+ }
829
+ },
830
+ "total_flos": 1.3645321080343101e+18,
831
+ "train_batch_size": 1,
832
+ "trial_name": null,
833
+ "trial_params": null
834
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-4000/zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info("Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info("Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-Coder-3B
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Qwen/Qwen2.5-Coder-3B
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.18.0
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-3B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 128,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 64,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "up_proj",
33
+ "q_proj",
34
+ "o_proj",
35
+ "down_proj",
36
+ "gate_proj",
37
+ "v_proj",
38
+ "k_proj"
39
+ ],
40
+ "target_parameters": null,
41
+ "task_type": "CAUSAL_LM",
42
+ "trainable_token_indices": null,
43
+ "use_dora": false,
44
+ "use_qalora": false,
45
+ "use_rslora": false
46
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step5000
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|endoftext|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/trainer_state.json ADDED
@@ -0,0 +1,1034 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 5000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 0.166884765625,
14
+ "epoch": 0.01,
15
+ "grad_norm": 0.09714163839817047,
16
+ "learning_rate": 1.6333333333333335e-05,
17
+ "loss": 0.1512,
18
+ "mean_token_accuracy": 0.966504562497139,
19
+ "num_tokens": 973322.0,
20
+ "step": 50
21
+ },
22
+ {
23
+ "entropy": 0.048140869140625,
24
+ "epoch": 0.02,
25
+ "grad_norm": 0.057216521352529526,
26
+ "learning_rate": 3.3e-05,
27
+ "loss": 0.0424,
28
+ "mean_token_accuracy": 0.9896974349021912,
29
+ "num_tokens": 1965908.0,
30
+ "step": 100
31
+ },
32
+ {
33
+ "entropy": 0.029632568359375,
34
+ "epoch": 0.03,
35
+ "grad_norm": 0.054412033408880234,
36
+ "learning_rate": 4.966666666666667e-05,
37
+ "loss": 0.0248,
38
+ "mean_token_accuracy": 0.9936319047212601,
39
+ "num_tokens": 2931943.0,
40
+ "step": 150
41
+ },
42
+ {
43
+ "entropy": 0.02478515625,
44
+ "epoch": 0.04,
45
+ "grad_norm": 0.08800562471151352,
46
+ "learning_rate": 6.633333333333334e-05,
47
+ "loss": 0.0214,
48
+ "mean_token_accuracy": 0.9943305045366287,
49
+ "num_tokens": 3906979.0,
50
+ "step": 200
51
+ },
52
+ {
53
+ "entropy": 0.0224652099609375,
54
+ "epoch": 0.05,
55
+ "grad_norm": 0.041793134063482285,
56
+ "learning_rate": 8.3e-05,
57
+ "loss": 0.0191,
58
+ "mean_token_accuracy": 0.9948792272806167,
59
+ "num_tokens": 4891791.0,
60
+ "step": 250
61
+ },
62
+ {
63
+ "entropy": 0.01969482421875,
64
+ "epoch": 0.06,
65
+ "grad_norm": 0.04181526601314545,
66
+ "learning_rate": 9.966666666666667e-05,
67
+ "loss": 0.0173,
68
+ "mean_token_accuracy": 0.995435175895691,
69
+ "num_tokens": 5923811.0,
70
+ "step": 300
71
+ },
72
+ {
73
+ "entropy": 0.019368896484375,
74
+ "epoch": 0.07,
75
+ "grad_norm": 0.03995412588119507,
76
+ "learning_rate": 9.999370378817499e-05,
77
+ "loss": 0.0164,
78
+ "mean_token_accuracy": 0.9956372672319412,
79
+ "num_tokens": 6923169.0,
80
+ "step": 350
81
+ },
82
+ {
83
+ "entropy": 0.0204400634765625,
84
+ "epoch": 0.08,
85
+ "grad_norm": 0.06872096657752991,
86
+ "learning_rate": 9.997430021636957e-05,
87
+ "loss": 0.0163,
88
+ "mean_token_accuracy": 0.9956023478507996,
89
+ "num_tokens": 7901360.0,
90
+ "step": 400
91
+ },
92
+ {
93
+ "entropy": 0.0186175537109375,
94
+ "epoch": 0.09,
95
+ "grad_norm": 0.037068989127874374,
96
+ "learning_rate": 9.994179175045023e-05,
97
+ "loss": 0.0157,
98
+ "mean_token_accuracy": 0.9958296239376068,
99
+ "num_tokens": 8899074.0,
100
+ "step": 450
101
+ },
102
+ {
103
+ "entropy": 0.0173492431640625,
104
+ "epoch": 0.1,
105
+ "grad_norm": 0.014651700854301453,
106
+ "learning_rate": 9.989618691519873e-05,
107
+ "loss": 0.015,
108
+ "mean_token_accuracy": 0.9959587389230729,
109
+ "num_tokens": 9909340.0,
110
+ "step": 500
111
+ },
112
+ {
113
+ "entropy": 0.0183203125,
114
+ "epoch": 0.11,
115
+ "grad_norm": 0.016151444986462593,
116
+ "learning_rate": 9.983749766969271e-05,
117
+ "loss": 0.0151,
118
+ "mean_token_accuracy": 0.9959770923852921,
119
+ "num_tokens": 10894801.0,
120
+ "step": 550
121
+ },
122
+ {
123
+ "entropy": 0.02047607421875,
124
+ "epoch": 0.12,
125
+ "grad_norm": 0.021302781999111176,
126
+ "learning_rate": 9.976573940416966e-05,
127
+ "loss": 0.0157,
128
+ "mean_token_accuracy": 0.9958055526018142,
129
+ "num_tokens": 11862148.0,
130
+ "step": 600
131
+ },
132
+ {
133
+ "entropy": 0.017862548828125,
134
+ "epoch": 0.13,
135
+ "grad_norm": 0.013205700553953648,
136
+ "learning_rate": 9.968093093599106e-05,
137
+ "loss": 0.0152,
138
+ "mean_token_accuracy": 0.9958789277076722,
139
+ "num_tokens": 12851463.0,
140
+ "step": 650
141
+ },
142
+ {
143
+ "entropy": 0.0182177734375,
144
+ "epoch": 0.14,
145
+ "grad_norm": 0.013120726682245731,
146
+ "learning_rate": 9.958309450470784e-05,
147
+ "loss": 0.0144,
148
+ "mean_token_accuracy": 0.9962116169929505,
149
+ "num_tokens": 13849228.0,
150
+ "step": 700
151
+ },
152
+ {
153
+ "entropy": 0.0179010009765625,
154
+ "epoch": 0.15,
155
+ "grad_norm": 0.014701537787914276,
156
+ "learning_rate": 9.947225576622847e-05,
157
+ "loss": 0.0146,
158
+ "mean_token_accuracy": 0.9961142331361771,
159
+ "num_tokens": 14812166.0,
160
+ "step": 750
161
+ },
162
+ {
163
+ "entropy": 0.0173638916015625,
164
+ "epoch": 0.16,
165
+ "grad_norm": 0.033504419028759,
166
+ "learning_rate": 9.934844378609117e-05,
167
+ "loss": 0.0145,
168
+ "mean_token_accuracy": 0.9961155968904495,
169
+ "num_tokens": 15789120.0,
170
+ "step": 800
171
+ },
172
+ {
173
+ "entropy": 0.0183349609375,
174
+ "epoch": 0.17,
175
+ "grad_norm": 0.010470729321241379,
176
+ "learning_rate": 9.921169103184187e-05,
177
+ "loss": 0.0144,
178
+ "mean_token_accuracy": 0.9961740493774414,
179
+ "num_tokens": 16764645.0,
180
+ "step": 850
181
+ },
182
+ {
183
+ "entropy": 0.016279296875,
184
+ "epoch": 0.18,
185
+ "grad_norm": 0.02781980112195015,
186
+ "learning_rate": 9.906203336452029e-05,
187
+ "loss": 0.0142,
188
+ "mean_token_accuracy": 0.9962105673551559,
189
+ "num_tokens": 17730003.0,
190
+ "step": 900
191
+ },
192
+ {
193
+ "entropy": 0.0166461181640625,
194
+ "epoch": 0.19,
195
+ "grad_norm": 0.020350394770503044,
196
+ "learning_rate": 9.889951002925593e-05,
197
+ "loss": 0.0141,
198
+ "mean_token_accuracy": 0.9962483876943589,
199
+ "num_tokens": 18723861.0,
200
+ "step": 950
201
+ },
202
+ {
203
+ "entropy": 0.0165386962890625,
204
+ "epoch": 0.2,
205
+ "grad_norm": 0.013212243095040321,
206
+ "learning_rate": 9.872416364497675e-05,
207
+ "loss": 0.0141,
208
+ "mean_token_accuracy": 0.9961817175149917,
209
+ "num_tokens": 19695091.0,
210
+ "step": 1000
211
+ },
212
+ {
213
+ "entropy": 0.017391357421875,
214
+ "epoch": 0.21,
215
+ "grad_norm": 0.014041919261217117,
216
+ "learning_rate": 9.853604019323301e-05,
217
+ "loss": 0.0141,
218
+ "mean_token_accuracy": 0.9962570595741272,
219
+ "num_tokens": 20662255.0,
220
+ "step": 1050
221
+ },
222
+ {
223
+ "entropy": 0.016884765625,
224
+ "epoch": 0.22,
225
+ "grad_norm": 0.017174789682030678,
226
+ "learning_rate": 9.833518900613956e-05,
227
+ "loss": 0.0138,
228
+ "mean_token_accuracy": 0.9962636953592301,
229
+ "num_tokens": 21642092.0,
230
+ "step": 1100
231
+ },
232
+ {
233
+ "entropy": 0.019366455078125,
234
+ "epoch": 0.23,
235
+ "grad_norm": 0.017337976023554802,
236
+ "learning_rate": 9.812166275343917e-05,
237
+ "loss": 0.0143,
238
+ "mean_token_accuracy": 0.996159919500351,
239
+ "num_tokens": 22607505.0,
240
+ "step": 1150
241
+ },
242
+ {
243
+ "entropy": 0.0164886474609375,
244
+ "epoch": 0.24,
245
+ "grad_norm": 0.018486863002181053,
246
+ "learning_rate": 9.789551742869096e-05,
247
+ "loss": 0.0137,
248
+ "mean_token_accuracy": 0.9963399863243103,
249
+ "num_tokens": 23584579.0,
250
+ "step": 1200
251
+ },
252
+ {
253
+ "entropy": 0.016282958984375,
254
+ "epoch": 0.25,
255
+ "grad_norm": 0.009195365011692047,
256
+ "learning_rate": 9.765681233458693e-05,
257
+ "loss": 0.0136,
258
+ "mean_token_accuracy": 0.9963367432355881,
259
+ "num_tokens": 24580285.0,
260
+ "step": 1250
261
+ },
262
+ {
263
+ "entropy": 0.0197235107421875,
264
+ "epoch": 0.26,
265
+ "grad_norm": 0.01205432415008545,
266
+ "learning_rate": 9.740561006740098e-05,
267
+ "loss": 0.0144,
268
+ "mean_token_accuracy": 0.996049216389656,
269
+ "num_tokens": 25525513.0,
270
+ "step": 1300
271
+ },
272
+ {
273
+ "entropy": 0.0153009033203125,
274
+ "epoch": 0.27,
275
+ "grad_norm": 0.01174497976899147,
276
+ "learning_rate": 9.7141976500574e-05,
277
+ "loss": 0.0135,
278
+ "mean_token_accuracy": 0.9964245760440826,
279
+ "num_tokens": 26523426.0,
280
+ "step": 1350
281
+ },
282
+ {
283
+ "entropy": 0.0169378662109375,
284
+ "epoch": 0.28,
285
+ "grad_norm": 0.014242298901081085,
286
+ "learning_rate": 9.686598076743981e-05,
287
+ "loss": 0.0139,
288
+ "mean_token_accuracy": 0.9962529343366623,
289
+ "num_tokens": 27493091.0,
290
+ "step": 1400
291
+ },
292
+ {
293
+ "entropy": 0.017811279296875,
294
+ "epoch": 0.29,
295
+ "grad_norm": 0.0160844549536705,
296
+ "learning_rate": 9.657769524309605e-05,
297
+ "loss": 0.0145,
298
+ "mean_token_accuracy": 0.996181645989418,
299
+ "num_tokens": 28458553.0,
300
+ "step": 1450
301
+ },
302
+ {
303
+ "entropy": 0.0169232177734375,
304
+ "epoch": 0.3,
305
+ "grad_norm": 0.03241236135363579,
306
+ "learning_rate": 9.627719552542516e-05,
307
+ "loss": 0.014,
308
+ "mean_token_accuracy": 0.9962622857093811,
309
+ "num_tokens": 29430497.0,
310
+ "step": 1500
311
+ },
312
+ {
313
+ "entropy": 0.0163720703125,
314
+ "epoch": 0.31,
315
+ "grad_norm": 0.01750756986439228,
316
+ "learning_rate": 9.596456041527001e-05,
317
+ "loss": 0.0136,
318
+ "mean_token_accuracy": 0.9964328509569168,
319
+ "num_tokens": 30414152.0,
320
+ "step": 1550
321
+ },
322
+ {
323
+ "entropy": 0.017760009765625,
324
+ "epoch": 0.32,
325
+ "grad_norm": 0.02182549238204956,
326
+ "learning_rate": 9.563987189576991e-05,
327
+ "loss": 0.0147,
328
+ "mean_token_accuracy": 0.9961263346672058,
329
+ "num_tokens": 31396417.0,
330
+ "step": 1600
331
+ },
332
+ {
333
+ "entropy": 0.0171954345703125,
334
+ "epoch": 0.33,
335
+ "grad_norm": 0.011301269754767418,
336
+ "learning_rate": 9.530321511086183e-05,
337
+ "loss": 0.0139,
338
+ "mean_token_accuracy": 0.9962880289554596,
339
+ "num_tokens": 32364443.0,
340
+ "step": 1650
341
+ },
342
+ {
343
+ "entropy": 0.015126953125,
344
+ "epoch": 0.34,
345
+ "grad_norm": 0.009061276912689209,
346
+ "learning_rate": 9.495467834295291e-05,
347
+ "loss": 0.0134,
348
+ "mean_token_accuracy": 0.9964066076278687,
349
+ "num_tokens": 33374821.0,
350
+ "step": 1700
351
+ },
352
+ {
353
+ "entropy": 0.018043212890625,
354
+ "epoch": 0.35,
355
+ "grad_norm": 0.014127642847597599,
356
+ "learning_rate": 9.459435298976998e-05,
357
+ "loss": 0.0137,
358
+ "mean_token_accuracy": 0.9963885354995727,
359
+ "num_tokens": 34338570.0,
360
+ "step": 1750
361
+ },
362
+ {
363
+ "entropy": 0.0155584716796875,
364
+ "epoch": 0.36,
365
+ "grad_norm": 0.00913177989423275,
366
+ "learning_rate": 9.422233354039198e-05,
367
+ "loss": 0.0132,
368
+ "mean_token_accuracy": 0.9965262812376022,
369
+ "num_tokens": 35333253.0,
370
+ "step": 1800
371
+ },
372
+ {
373
+ "entropy": 0.0177911376953125,
374
+ "epoch": 0.37,
375
+ "grad_norm": 0.009629561565816402,
376
+ "learning_rate": 9.383871755047198e-05,
377
+ "loss": 0.0131,
378
+ "mean_token_accuracy": 0.9964758467674255,
379
+ "num_tokens": 36316255.0,
380
+ "step": 1850
381
+ },
382
+ {
383
+ "entropy": 0.0159539794921875,
384
+ "epoch": 0.38,
385
+ "grad_norm": 0.01395715307444334,
386
+ "learning_rate": 9.344360561665476e-05,
387
+ "loss": 0.0132,
388
+ "mean_token_accuracy": 0.9964872688055039,
389
+ "num_tokens": 37303247.0,
390
+ "step": 1900
391
+ },
392
+ {
393
+ "entropy": 0.0152783203125,
394
+ "epoch": 0.39,
395
+ "grad_norm": 0.007368483114987612,
396
+ "learning_rate": 9.30371013501972e-05,
397
+ "loss": 0.013,
398
+ "mean_token_accuracy": 0.9965988719463348,
399
+ "num_tokens": 38287463.0,
400
+ "step": 1950
401
+ },
402
+ {
403
+ "entropy": 0.0158270263671875,
404
+ "epoch": 0.4,
405
+ "grad_norm": 0.01508031040430069,
406
+ "learning_rate": 9.261931134979791e-05,
407
+ "loss": 0.0133,
408
+ "mean_token_accuracy": 0.9965020543336869,
409
+ "num_tokens": 39273383.0,
410
+ "step": 2000
411
+ },
412
+ {
413
+ "entropy": 0.0167919921875,
414
+ "epoch": 0.41,
415
+ "grad_norm": 0.009929426945745945,
416
+ "learning_rate": 9.219034517364369e-05,
417
+ "loss": 0.0131,
418
+ "mean_token_accuracy": 0.9965337771177292,
419
+ "num_tokens": 40251665.0,
420
+ "step": 2050
421
+ },
422
+ {
423
+ "entropy": 0.0153082275390625,
424
+ "epoch": 0.42,
425
+ "grad_norm": 0.007909618318080902,
426
+ "learning_rate": 9.17503153106797e-05,
427
+ "loss": 0.0129,
428
+ "mean_token_accuracy": 0.9965605139732361,
429
+ "num_tokens": 41243731.0,
430
+ "step": 2100
431
+ },
432
+ {
433
+ "entropy": 0.0154461669921875,
434
+ "epoch": 0.43,
435
+ "grad_norm": 0.015921294689178467,
436
+ "learning_rate": 9.129933715111125e-05,
437
+ "loss": 0.0129,
438
+ "mean_token_accuracy": 0.9964988273382187,
439
+ "num_tokens": 42247346.0,
440
+ "step": 2150
441
+ },
442
+ {
443
+ "entropy": 0.01652587890625,
444
+ "epoch": 0.44,
445
+ "grad_norm": 0.006036434322595596,
446
+ "learning_rate": 9.083752895614464e-05,
447
+ "loss": 0.0133,
448
+ "mean_token_accuracy": 0.9964230692386628,
449
+ "num_tokens": 43220001.0,
450
+ "step": 2200
451
+ },
452
+ {
453
+ "entropy": 0.015128173828125,
454
+ "epoch": 0.45,
455
+ "grad_norm": 0.009778963401913643,
456
+ "learning_rate": 9.03650118269753e-05,
457
+ "loss": 0.0131,
458
+ "mean_token_accuracy": 0.9965469115972518,
459
+ "num_tokens": 44205197.0,
460
+ "step": 2250
461
+ },
462
+ {
463
+ "entropy": 0.015570068359375,
464
+ "epoch": 0.46,
465
+ "grad_norm": 0.014646470546722412,
466
+ "learning_rate": 8.988190967303101e-05,
467
+ "loss": 0.0131,
468
+ "mean_token_accuracy": 0.9965356832742691,
469
+ "num_tokens": 45185181.0,
470
+ "step": 2300
471
+ },
472
+ {
473
+ "entropy": 0.01632568359375,
474
+ "epoch": 0.47,
475
+ "grad_norm": 0.006433142349123955,
476
+ "learning_rate": 8.938834917947889e-05,
477
+ "loss": 0.0132,
478
+ "mean_token_accuracy": 0.9965211725234986,
479
+ "num_tokens": 46170545.0,
480
+ "step": 2350
481
+ },
482
+ {
483
+ "entropy": 0.01555419921875,
484
+ "epoch": 0.48,
485
+ "grad_norm": 0.007329673506319523,
486
+ "learning_rate": 8.888445977400435e-05,
487
+ "loss": 0.0131,
488
+ "mean_token_accuracy": 0.9965089631080627,
489
+ "num_tokens": 47152165.0,
490
+ "step": 2400
491
+ },
492
+ {
493
+ "entropy": 0.0156890869140625,
494
+ "epoch": 0.49,
495
+ "grad_norm": 0.010034145787358284,
496
+ "learning_rate": 8.837037359287092e-05,
497
+ "loss": 0.0131,
498
+ "mean_token_accuracy": 0.9965383523702621,
499
+ "num_tokens": 48122023.0,
500
+ "step": 2450
501
+ },
502
+ {
503
+ "entropy": 0.0163201904296875,
504
+ "epoch": 0.5,
505
+ "grad_norm": 0.008338450454175472,
506
+ "learning_rate": 8.784622544626985e-05,
507
+ "loss": 0.0131,
508
+ "mean_token_accuracy": 0.9965311539173126,
509
+ "num_tokens": 49094218.0,
510
+ "step": 2500
511
+ },
512
+ {
513
+ "entropy": 0.0152789306640625,
514
+ "epoch": 0.51,
515
+ "grad_norm": 0.007771766744554043,
516
+ "learning_rate": 8.731215278296843e-05,
517
+ "loss": 0.0128,
518
+ "mean_token_accuracy": 0.9965461915731431,
519
+ "num_tokens": 50090248.0,
520
+ "step": 2550
521
+ },
522
+ {
523
+ "entropy": 0.0171893310546875,
524
+ "epoch": 0.52,
525
+ "grad_norm": 0.010534558445215225,
526
+ "learning_rate": 8.676829565426646e-05,
527
+ "loss": 0.0132,
528
+ "mean_token_accuracy": 0.9964153295755387,
529
+ "num_tokens": 51055771.0,
530
+ "step": 2600
531
+ },
532
+ {
533
+ "entropy": 0.0148272705078125,
534
+ "epoch": 0.53,
535
+ "grad_norm": 0.007072142791002989,
536
+ "learning_rate": 8.62147966772702e-05,
537
+ "loss": 0.0129,
538
+ "mean_token_accuracy": 0.9966214698553085,
539
+ "num_tokens": 52051029.0,
540
+ "step": 2650
541
+ },
542
+ {
543
+ "entropy": 0.0146075439453125,
544
+ "epoch": 0.54,
545
+ "grad_norm": 0.007414950989186764,
546
+ "learning_rate": 8.565180099749355e-05,
547
+ "loss": 0.0125,
548
+ "mean_token_accuracy": 0.9966325616836548,
549
+ "num_tokens": 53064519.0,
550
+ "step": 2700
551
+ },
552
+ {
553
+ "entropy": 0.0161309814453125,
554
+ "epoch": 0.55,
555
+ "grad_norm": 0.0070669627748429775,
556
+ "learning_rate": 8.50794562507961e-05,
557
+ "loss": 0.013,
558
+ "mean_token_accuracy": 0.9964843678474427,
559
+ "num_tokens": 54043844.0,
560
+ "step": 2750
561
+ },
562
+ {
563
+ "entropy": 0.015706787109375,
564
+ "epoch": 0.56,
565
+ "grad_norm": 0.005744470749050379,
566
+ "learning_rate": 8.449791252466819e-05,
567
+ "loss": 0.013,
568
+ "mean_token_accuracy": 0.9965711253881454,
569
+ "num_tokens": 55026422.0,
570
+ "step": 2800
571
+ },
572
+ {
573
+ "entropy": 0.01641357421875,
574
+ "epoch": 0.57,
575
+ "grad_norm": 0.0069727362133562565,
576
+ "learning_rate": 8.390732231887314e-05,
577
+ "loss": 0.0129,
578
+ "mean_token_accuracy": 0.996541822552681,
579
+ "num_tokens": 56009908.0,
580
+ "step": 2850
581
+ },
582
+ {
583
+ "entropy": 0.016571044921875,
584
+ "epoch": 0.58,
585
+ "grad_norm": 0.007201408036053181,
586
+ "learning_rate": 8.330784050545672e-05,
587
+ "loss": 0.0133,
588
+ "mean_token_accuracy": 0.9964863443374634,
589
+ "num_tokens": 56948904.0,
590
+ "step": 2900
591
+ },
592
+ {
593
+ "entropy": 0.0156451416015625,
594
+ "epoch": 0.59,
595
+ "grad_norm": 0.005370237864553928,
596
+ "learning_rate": 8.269962428813474e-05,
597
+ "loss": 0.0128,
598
+ "mean_token_accuracy": 0.9966201782226562,
599
+ "num_tokens": 57925745.0,
600
+ "step": 2950
601
+ },
602
+ {
603
+ "entropy": 0.01558837890625,
604
+ "epoch": 0.6,
605
+ "grad_norm": 0.005006297491490841,
606
+ "learning_rate": 8.208283316106902e-05,
607
+ "loss": 0.0127,
608
+ "mean_token_accuracy": 0.9966268092393875,
609
+ "num_tokens": 58902798.0,
610
+ "step": 3000
611
+ },
612
+ {
613
+ "entropy": 0.015733642578125,
614
+ "epoch": 0.61,
615
+ "grad_norm": 0.06520090997219086,
616
+ "learning_rate": 8.145762886704286e-05,
617
+ "loss": 0.013,
618
+ "mean_token_accuracy": 0.9965688252449035,
619
+ "num_tokens": 59860890.0,
620
+ "step": 3050
621
+ },
622
+ {
623
+ "entropy": 0.015631103515625,
624
+ "epoch": 0.62,
625
+ "grad_norm": 0.017850181087851524,
626
+ "learning_rate": 8.082417535504683e-05,
627
+ "loss": 0.0134,
628
+ "mean_token_accuracy": 0.9963969469070435,
629
+ "num_tokens": 60845473.0,
630
+ "step": 3100
631
+ },
632
+ {
633
+ "entropy": 0.0175927734375,
634
+ "epoch": 0.63,
635
+ "grad_norm": 0.017514687031507492,
636
+ "learning_rate": 8.018263873728585e-05,
637
+ "loss": 0.014,
638
+ "mean_token_accuracy": 0.9963253819942475,
639
+ "num_tokens": 61784192.0,
640
+ "step": 3150
641
+ },
642
+ {
643
+ "entropy": 0.0167742919921875,
644
+ "epoch": 0.64,
645
+ "grad_norm": 0.008665528148412704,
646
+ "learning_rate": 7.953318724561932e-05,
647
+ "loss": 0.0132,
648
+ "mean_token_accuracy": 0.9964195656776428,
649
+ "num_tokens": 62766207.0,
650
+ "step": 3200
651
+ },
652
+ {
653
+ "entropy": 0.0157501220703125,
654
+ "epoch": 0.65,
655
+ "grad_norm": 0.019448991864919662,
656
+ "learning_rate": 7.887599118744509e-05,
657
+ "loss": 0.0128,
658
+ "mean_token_accuracy": 0.9966711962223053,
659
+ "num_tokens": 63762073.0,
660
+ "step": 3250
661
+ },
662
+ {
663
+ "entropy": 0.0180609130859375,
664
+ "epoch": 0.66,
665
+ "grad_norm": 0.004935233388096094,
666
+ "learning_rate": 7.821122290103938e-05,
667
+ "loss": 0.0133,
668
+ "mean_token_accuracy": 0.9964338165521621,
669
+ "num_tokens": 64710323.0,
670
+ "step": 3300
671
+ },
672
+ {
673
+ "entropy": 0.015982666015625,
674
+ "epoch": 0.67,
675
+ "grad_norm": 0.004395989701151848,
676
+ "learning_rate": 7.753905671036403e-05,
677
+ "loss": 0.0128,
678
+ "mean_token_accuracy": 0.9966453295946122,
679
+ "num_tokens": 65703355.0,
680
+ "step": 3350
681
+ },
682
+ {
683
+ "entropy": 0.0160992431640625,
684
+ "epoch": 0.68,
685
+ "grad_norm": 0.01108239684253931,
686
+ "learning_rate": 7.685966887935309e-05,
687
+ "loss": 0.0128,
688
+ "mean_token_accuracy": 0.996572095155716,
689
+ "num_tokens": 66693270.0,
690
+ "step": 3400
691
+ },
692
+ {
693
+ "entropy": 0.0156439208984375,
694
+ "epoch": 0.69,
695
+ "grad_norm": 0.007867238484323025,
696
+ "learning_rate": 7.617323756569053e-05,
697
+ "loss": 0.013,
698
+ "mean_token_accuracy": 0.9965278363227844,
699
+ "num_tokens": 67662491.0,
700
+ "step": 3450
701
+ },
702
+ {
703
+ "entropy": 0.0155462646484375,
704
+ "epoch": 0.7,
705
+ "grad_norm": 0.006034135352820158,
706
+ "learning_rate": 7.547994277409168e-05,
707
+ "loss": 0.0128,
708
+ "mean_token_accuracy": 0.9966409939527512,
709
+ "num_tokens": 68643197.0,
710
+ "step": 3500
711
+ },
712
+ {
713
+ "entropy": 0.015638427734375,
714
+ "epoch": 0.71,
715
+ "grad_norm": 0.007907884195446968,
716
+ "learning_rate": 7.477996630909994e-05,
717
+ "loss": 0.0127,
718
+ "mean_token_accuracy": 0.9966798382997513,
719
+ "num_tokens": 69638359.0,
720
+ "step": 3550
721
+ },
722
+ {
723
+ "entropy": 0.0161322021484375,
724
+ "epoch": 0.72,
725
+ "grad_norm": 0.007897689007222652,
726
+ "learning_rate": 7.40734917274118e-05,
727
+ "loss": 0.0127,
728
+ "mean_token_accuracy": 0.9966677987575531,
729
+ "num_tokens": 70621331.0,
730
+ "step": 3600
731
+ },
732
+ {
733
+ "entropy": 0.015335693359375,
734
+ "epoch": 0.73,
735
+ "grad_norm": 0.006248582154512405,
736
+ "learning_rate": 7.336070428974218e-05,
737
+ "loss": 0.0127,
738
+ "mean_token_accuracy": 0.9966181468963623,
739
+ "num_tokens": 71609172.0,
740
+ "step": 3650
741
+ },
742
+ {
743
+ "entropy": 0.0166754150390625,
744
+ "epoch": 0.74,
745
+ "grad_norm": 0.009811230935156345,
746
+ "learning_rate": 7.26417909122431e-05,
747
+ "loss": 0.0127,
748
+ "mean_token_accuracy": 0.9965909200906754,
749
+ "num_tokens": 72589978.0,
750
+ "step": 3700
751
+ },
752
+ {
753
+ "entropy": 0.0143695068359375,
754
+ "epoch": 0.75,
755
+ "grad_norm": 0.012172735296189785,
756
+ "learning_rate": 7.191694011748818e-05,
757
+ "loss": 0.0131,
758
+ "mean_token_accuracy": 0.9965182906389236,
759
+ "num_tokens": 73606622.0,
760
+ "step": 3750
761
+ },
762
+ {
763
+ "entropy": 0.0156829833984375,
764
+ "epoch": 0.76,
765
+ "grad_norm": 0.004879123531281948,
766
+ "learning_rate": 7.118634198503571e-05,
767
+ "loss": 0.0129,
768
+ "mean_token_accuracy": 0.9965479630231857,
769
+ "num_tokens": 74626982.0,
770
+ "step": 3800
771
+ },
772
+ {
773
+ "entropy": 0.016346435546875,
774
+ "epoch": 0.77,
775
+ "grad_norm": 0.005593061912804842,
776
+ "learning_rate": 7.045018810158375e-05,
777
+ "loss": 0.0127,
778
+ "mean_token_accuracy": 0.9966219502687454,
779
+ "num_tokens": 75622857.0,
780
+ "step": 3850
781
+ },
782
+ {
783
+ "entropy": 0.015889892578125,
784
+ "epoch": 0.78,
785
+ "grad_norm": 0.006423663813620806,
786
+ "learning_rate": 6.97086715107298e-05,
787
+ "loss": 0.0128,
788
+ "mean_token_accuracy": 0.9966537064313888,
789
+ "num_tokens": 76601614.0,
790
+ "step": 3900
791
+ },
792
+ {
793
+ "entropy": 0.015166015625,
794
+ "epoch": 0.79,
795
+ "grad_norm": 0.009724115021526814,
796
+ "learning_rate": 6.896198666234833e-05,
797
+ "loss": 0.0125,
798
+ "mean_token_accuracy": 0.9966633880138397,
799
+ "num_tokens": 77598059.0,
800
+ "step": 3950
801
+ },
802
+ {
803
+ "entropy": 0.0157177734375,
804
+ "epoch": 0.8,
805
+ "grad_norm": 0.014047837816178799,
806
+ "learning_rate": 6.821032936159986e-05,
807
+ "loss": 0.0128,
808
+ "mean_token_accuracy": 0.996613291501999,
809
+ "num_tokens": 78570180.0,
810
+ "step": 4000
811
+ },
812
+ {
813
+ "entropy": 0.0155902099609375,
814
+ "epoch": 0.81,
815
+ "grad_norm": 0.008200579322874546,
816
+ "learning_rate": 6.745389671758435e-05,
817
+ "loss": 0.0127,
818
+ "mean_token_accuracy": 0.9965583562850953,
819
+ "num_tokens": 79561943.0,
820
+ "step": 4050
821
+ },
822
+ {
823
+ "entropy": 0.014603271484375,
824
+ "epoch": 0.82,
825
+ "grad_norm": 0.0051344577223062515,
826
+ "learning_rate": 6.669288709165276e-05,
827
+ "loss": 0.0122,
828
+ "mean_token_accuracy": 0.9967030560970307,
829
+ "num_tokens": 80576757.0,
830
+ "step": 4100
831
+ },
832
+ {
833
+ "entropy": 0.0161895751953125,
834
+ "epoch": 0.83,
835
+ "grad_norm": 0.00540529889985919,
836
+ "learning_rate": 6.59275000453902e-05,
837
+ "loss": 0.0127,
838
+ "mean_token_accuracy": 0.9966316163539887,
839
+ "num_tokens": 81549255.0,
840
+ "step": 4150
841
+ },
842
+ {
843
+ "entropy": 0.01657470703125,
844
+ "epoch": 0.84,
845
+ "grad_norm": 0.005644636228680611,
846
+ "learning_rate": 6.515793628828447e-05,
847
+ "loss": 0.0131,
848
+ "mean_token_accuracy": 0.9965110903978348,
849
+ "num_tokens": 82499683.0,
850
+ "step": 4200
851
+ },
852
+ {
853
+ "entropy": 0.0160162353515625,
854
+ "epoch": 0.85,
855
+ "grad_norm": 0.004190398845821619,
856
+ "learning_rate": 6.438439762509332e-05,
857
+ "loss": 0.0127,
858
+ "mean_token_accuracy": 0.9966331452131272,
859
+ "num_tokens": 83475810.0,
860
+ "step": 4250
861
+ },
862
+ {
863
+ "entropy": 0.01598388671875,
864
+ "epoch": 0.86,
865
+ "grad_norm": 0.005400759633630514,
866
+ "learning_rate": 6.360708690292479e-05,
867
+ "loss": 0.0129,
868
+ "mean_token_accuracy": 0.9966162234544754,
869
+ "num_tokens": 84432102.0,
870
+ "step": 4300
871
+ },
872
+ {
873
+ "entropy": 0.014876708984375,
874
+ "epoch": 0.87,
875
+ "grad_norm": 0.012628648430109024,
876
+ "learning_rate": 6.2826207958044e-05,
877
+ "loss": 0.0124,
878
+ "mean_token_accuracy": 0.9967326593399047,
879
+ "num_tokens": 85429626.0,
880
+ "step": 4350
881
+ },
882
+ {
883
+ "entropy": 0.014339599609375,
884
+ "epoch": 0.88,
885
+ "grad_norm": 0.009649231098592281,
886
+ "learning_rate": 6.204196556242061e-05,
887
+ "loss": 0.0121,
888
+ "mean_token_accuracy": 0.9967543077468872,
889
+ "num_tokens": 86450572.0,
890
+ "step": 4400
891
+ },
892
+ {
893
+ "entropy": 0.014298095703125,
894
+ "epoch": 0.89,
895
+ "grad_norm": 0.0049432734958827496,
896
+ "learning_rate": 6.125456537003095e-05,
897
+ "loss": 0.0122,
898
+ "mean_token_accuracy": 0.9967639350891113,
899
+ "num_tokens": 87458843.0,
900
+ "step": 4450
901
+ },
902
+ {
903
+ "entropy": 0.015306396484375,
904
+ "epoch": 0.9,
905
+ "grad_norm": 0.004098173696547747,
906
+ "learning_rate": 6.046421386292887e-05,
907
+ "loss": 0.0123,
908
+ "mean_token_accuracy": 0.9966574615240097,
909
+ "num_tokens": 88453316.0,
910
+ "step": 4500
911
+ },
912
+ {
913
+ "entropy": 0.016375732421875,
914
+ "epoch": 0.91,
915
+ "grad_norm": 0.005302398465573788,
916
+ "learning_rate": 5.9671118297099274e-05,
917
+ "loss": 0.0125,
918
+ "mean_token_accuracy": 0.9966228741407395,
919
+ "num_tokens": 89427450.0,
920
+ "step": 4550
921
+ },
922
+ {
923
+ "entropy": 0.015936279296875,
924
+ "epoch": 0.92,
925
+ "grad_norm": 0.004837509244680405,
926
+ "learning_rate": 5.887548664810896e-05,
927
+ "loss": 0.0126,
928
+ "mean_token_accuracy": 0.9966443425416946,
929
+ "num_tokens": 90400676.0,
930
+ "step": 4600
931
+ },
932
+ {
933
+ "entropy": 0.0147259521484375,
934
+ "epoch": 0.93,
935
+ "grad_norm": 0.008831442333757877,
936
+ "learning_rate": 5.8077527556568614e-05,
937
+ "loss": 0.0124,
938
+ "mean_token_accuracy": 0.9967519730329514,
939
+ "num_tokens": 91394757.0,
940
+ "step": 4650
941
+ },
942
+ {
943
+ "entropy": 0.01557373046875,
944
+ "epoch": 0.94,
945
+ "grad_norm": 0.0063737258315086365,
946
+ "learning_rate": 5.727745027342031e-05,
947
+ "loss": 0.0125,
948
+ "mean_token_accuracy": 0.9966205650568009,
949
+ "num_tokens": 92386137.0,
950
+ "step": 4700
951
+ },
952
+ {
953
+ "entropy": 0.0151513671875,
954
+ "epoch": 0.95,
955
+ "grad_norm": 0.005380143877118826,
956
+ "learning_rate": 5.64754646050652e-05,
957
+ "loss": 0.0125,
958
+ "mean_token_accuracy": 0.9965577948093415,
959
+ "num_tokens": 93374756.0,
960
+ "step": 4750
961
+ },
962
+ {
963
+ "entropy": 0.0138897705078125,
964
+ "epoch": 0.96,
965
+ "grad_norm": 0.005837676115334034,
966
+ "learning_rate": 5.567178085834542e-05,
967
+ "loss": 0.0121,
968
+ "mean_token_accuracy": 0.9967484200000762,
969
+ "num_tokens": 94389846.0,
970
+ "step": 4800
971
+ },
972
+ {
973
+ "entropy": 0.0154791259765625,
974
+ "epoch": 0.97,
975
+ "grad_norm": 0.008733139373362064,
976
+ "learning_rate": 5.486660978539468e-05,
977
+ "loss": 0.0125,
978
+ "mean_token_accuracy": 0.9966599184274674,
979
+ "num_tokens": 95378637.0,
980
+ "step": 4850
981
+ },
982
+ {
983
+ "entropy": 0.015618896484375,
984
+ "epoch": 0.98,
985
+ "grad_norm": 0.016466081142425537,
986
+ "learning_rate": 5.406016252837219e-05,
987
+ "loss": 0.0126,
988
+ "mean_token_accuracy": 0.9966346627473831,
989
+ "num_tokens": 96357363.0,
990
+ "step": 4900
991
+ },
992
+ {
993
+ "entropy": 0.0171966552734375,
994
+ "epoch": 0.99,
995
+ "grad_norm": 0.007259083911776543,
996
+ "learning_rate": 5.325265056409442e-05,
997
+ "loss": 0.0129,
998
+ "mean_token_accuracy": 0.9965799397230148,
999
+ "num_tokens": 97295349.0,
1000
+ "step": 4950
1001
+ },
1002
+ {
1003
+ "entropy": 0.01597900390625,
1004
+ "epoch": 1.0,
1005
+ "grad_norm": 0.007410865277051926,
1006
+ "learning_rate": 5.244428564857874e-05,
1007
+ "loss": 0.0127,
1008
+ "mean_token_accuracy": 0.996621105670929,
1009
+ "num_tokens": 98262502.0,
1010
+ "step": 5000
1011
+ }
1012
+ ],
1013
+ "logging_steps": 50,
1014
+ "max_steps": 10000,
1015
+ "num_input_tokens_seen": 0,
1016
+ "num_train_epochs": 2,
1017
+ "save_steps": 500,
1018
+ "stateful_callbacks": {
1019
+ "TrainerControl": {
1020
+ "args": {
1021
+ "should_epoch_stop": false,
1022
+ "should_evaluate": false,
1023
+ "should_log": false,
1024
+ "should_save": true,
1025
+ "should_training_stop": false
1026
+ },
1027
+ "attributes": {}
1028
+ }
1029
+ },
1030
+ "total_flos": 1.7065296121015828e+18,
1031
+ "train_batch_size": 1,
1032
+ "trial_name": null,
1033
+ "trial_params": null
1034
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-5000/zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info("Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info("Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-3B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 128,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 64,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "up_proj",
33
+ "q_proj",
34
+ "o_proj",
35
+ "down_proj",
36
+ "gate_proj",
37
+ "v_proj",
38
+ "k_proj"
39
+ ],
40
+ "target_parameters": null,
41
+ "task_type": "CAUSAL_LM",
42
+ "trainable_token_indices": null,
43
+ "use_dora": false,
44
+ "use_qalora": false,
45
+ "use_rslora": false
46
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step6000
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-6000/zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info("Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info("Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-3B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 128,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 64,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "up_proj",
33
+ "q_proj",
34
+ "o_proj",
35
+ "down_proj",
36
+ "gate_proj",
37
+ "v_proj",
38
+ "k_proj"
39
+ ],
40
+ "target_parameters": null,
41
+ "task_type": "CAUSAL_LM",
42
+ "trainable_token_indices": null,
43
+ "use_dora": false,
44
+ "use_qalora": false,
45
+ "use_rslora": false
46
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step7000
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|endoftext|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/trainer_state.json ADDED
@@ -0,0 +1,1434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.4,
6
+ "eval_steps": 500,
7
+ "global_step": 7000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 0.166884765625,
14
+ "epoch": 0.01,
15
+ "grad_norm": 0.09714163839817047,
16
+ "learning_rate": 1.6333333333333335e-05,
17
+ "loss": 0.1512,
18
+ "mean_token_accuracy": 0.966504562497139,
19
+ "num_tokens": 973322.0,
20
+ "step": 50
21
+ },
22
+ {
23
+ "entropy": 0.048140869140625,
24
+ "epoch": 0.02,
25
+ "grad_norm": 0.057216521352529526,
26
+ "learning_rate": 3.3e-05,
27
+ "loss": 0.0424,
28
+ "mean_token_accuracy": 0.9896974349021912,
29
+ "num_tokens": 1965908.0,
30
+ "step": 100
31
+ },
32
+ {
33
+ "entropy": 0.029632568359375,
34
+ "epoch": 0.03,
35
+ "grad_norm": 0.054412033408880234,
36
+ "learning_rate": 4.966666666666667e-05,
37
+ "loss": 0.0248,
38
+ "mean_token_accuracy": 0.9936319047212601,
39
+ "num_tokens": 2931943.0,
40
+ "step": 150
41
+ },
42
+ {
43
+ "entropy": 0.02478515625,
44
+ "epoch": 0.04,
45
+ "grad_norm": 0.08800562471151352,
46
+ "learning_rate": 6.633333333333334e-05,
47
+ "loss": 0.0214,
48
+ "mean_token_accuracy": 0.9943305045366287,
49
+ "num_tokens": 3906979.0,
50
+ "step": 200
51
+ },
52
+ {
53
+ "entropy": 0.0224652099609375,
54
+ "epoch": 0.05,
55
+ "grad_norm": 0.041793134063482285,
56
+ "learning_rate": 8.3e-05,
57
+ "loss": 0.0191,
58
+ "mean_token_accuracy": 0.9948792272806167,
59
+ "num_tokens": 4891791.0,
60
+ "step": 250
61
+ },
62
+ {
63
+ "entropy": 0.01969482421875,
64
+ "epoch": 0.06,
65
+ "grad_norm": 0.04181526601314545,
66
+ "learning_rate": 9.966666666666667e-05,
67
+ "loss": 0.0173,
68
+ "mean_token_accuracy": 0.995435175895691,
69
+ "num_tokens": 5923811.0,
70
+ "step": 300
71
+ },
72
+ {
73
+ "entropy": 0.019368896484375,
74
+ "epoch": 0.07,
75
+ "grad_norm": 0.03995412588119507,
76
+ "learning_rate": 9.999370378817499e-05,
77
+ "loss": 0.0164,
78
+ "mean_token_accuracy": 0.9956372672319412,
79
+ "num_tokens": 6923169.0,
80
+ "step": 350
81
+ },
82
+ {
83
+ "entropy": 0.0204400634765625,
84
+ "epoch": 0.08,
85
+ "grad_norm": 0.06872096657752991,
86
+ "learning_rate": 9.997430021636957e-05,
87
+ "loss": 0.0163,
88
+ "mean_token_accuracy": 0.9956023478507996,
89
+ "num_tokens": 7901360.0,
90
+ "step": 400
91
+ },
92
+ {
93
+ "entropy": 0.0186175537109375,
94
+ "epoch": 0.09,
95
+ "grad_norm": 0.037068989127874374,
96
+ "learning_rate": 9.994179175045023e-05,
97
+ "loss": 0.0157,
98
+ "mean_token_accuracy": 0.9958296239376068,
99
+ "num_tokens": 8899074.0,
100
+ "step": 450
101
+ },
102
+ {
103
+ "entropy": 0.0173492431640625,
104
+ "epoch": 0.1,
105
+ "grad_norm": 0.014651700854301453,
106
+ "learning_rate": 9.989618691519873e-05,
107
+ "loss": 0.015,
108
+ "mean_token_accuracy": 0.9959587389230729,
109
+ "num_tokens": 9909340.0,
110
+ "step": 500
111
+ },
112
+ {
113
+ "entropy": 0.0183203125,
114
+ "epoch": 0.11,
115
+ "grad_norm": 0.016151444986462593,
116
+ "learning_rate": 9.983749766969271e-05,
117
+ "loss": 0.0151,
118
+ "mean_token_accuracy": 0.9959770923852921,
119
+ "num_tokens": 10894801.0,
120
+ "step": 550
121
+ },
122
+ {
123
+ "entropy": 0.02047607421875,
124
+ "epoch": 0.12,
125
+ "grad_norm": 0.021302781999111176,
126
+ "learning_rate": 9.976573940416966e-05,
127
+ "loss": 0.0157,
128
+ "mean_token_accuracy": 0.9958055526018142,
129
+ "num_tokens": 11862148.0,
130
+ "step": 600
131
+ },
132
+ {
133
+ "entropy": 0.017862548828125,
134
+ "epoch": 0.13,
135
+ "grad_norm": 0.013205700553953648,
136
+ "learning_rate": 9.968093093599106e-05,
137
+ "loss": 0.0152,
138
+ "mean_token_accuracy": 0.9958789277076722,
139
+ "num_tokens": 12851463.0,
140
+ "step": 650
141
+ },
142
+ {
143
+ "entropy": 0.0182177734375,
144
+ "epoch": 0.14,
145
+ "grad_norm": 0.013120726682245731,
146
+ "learning_rate": 9.958309450470784e-05,
147
+ "loss": 0.0144,
148
+ "mean_token_accuracy": 0.9962116169929505,
149
+ "num_tokens": 13849228.0,
150
+ "step": 700
151
+ },
152
+ {
153
+ "entropy": 0.0179010009765625,
154
+ "epoch": 0.15,
155
+ "grad_norm": 0.014701537787914276,
156
+ "learning_rate": 9.947225576622847e-05,
157
+ "loss": 0.0146,
158
+ "mean_token_accuracy": 0.9961142331361771,
159
+ "num_tokens": 14812166.0,
160
+ "step": 750
161
+ },
162
+ {
163
+ "entropy": 0.0173638916015625,
164
+ "epoch": 0.16,
165
+ "grad_norm": 0.033504419028759,
166
+ "learning_rate": 9.934844378609117e-05,
167
+ "loss": 0.0145,
168
+ "mean_token_accuracy": 0.9961155968904495,
169
+ "num_tokens": 15789120.0,
170
+ "step": 800
171
+ },
172
+ {
173
+ "entropy": 0.0183349609375,
174
+ "epoch": 0.17,
175
+ "grad_norm": 0.010470729321241379,
176
+ "learning_rate": 9.921169103184187e-05,
177
+ "loss": 0.0144,
178
+ "mean_token_accuracy": 0.9961740493774414,
179
+ "num_tokens": 16764645.0,
180
+ "step": 850
181
+ },
182
+ {
183
+ "entropy": 0.016279296875,
184
+ "epoch": 0.18,
185
+ "grad_norm": 0.02781980112195015,
186
+ "learning_rate": 9.906203336452029e-05,
187
+ "loss": 0.0142,
188
+ "mean_token_accuracy": 0.9962105673551559,
189
+ "num_tokens": 17730003.0,
190
+ "step": 900
191
+ },
192
+ {
193
+ "entropy": 0.0166461181640625,
194
+ "epoch": 0.19,
195
+ "grad_norm": 0.020350394770503044,
196
+ "learning_rate": 9.889951002925593e-05,
197
+ "loss": 0.0141,
198
+ "mean_token_accuracy": 0.9962483876943589,
199
+ "num_tokens": 18723861.0,
200
+ "step": 950
201
+ },
202
+ {
203
+ "entropy": 0.0165386962890625,
204
+ "epoch": 0.2,
205
+ "grad_norm": 0.013212243095040321,
206
+ "learning_rate": 9.872416364497675e-05,
207
+ "loss": 0.0141,
208
+ "mean_token_accuracy": 0.9961817175149917,
209
+ "num_tokens": 19695091.0,
210
+ "step": 1000
211
+ },
212
+ {
213
+ "entropy": 0.017391357421875,
214
+ "epoch": 0.21,
215
+ "grad_norm": 0.014041919261217117,
216
+ "learning_rate": 9.853604019323301e-05,
217
+ "loss": 0.0141,
218
+ "mean_token_accuracy": 0.9962570595741272,
219
+ "num_tokens": 20662255.0,
220
+ "step": 1050
221
+ },
222
+ {
223
+ "entropy": 0.016884765625,
224
+ "epoch": 0.22,
225
+ "grad_norm": 0.017174789682030678,
226
+ "learning_rate": 9.833518900613956e-05,
227
+ "loss": 0.0138,
228
+ "mean_token_accuracy": 0.9962636953592301,
229
+ "num_tokens": 21642092.0,
230
+ "step": 1100
231
+ },
232
+ {
233
+ "entropy": 0.019366455078125,
234
+ "epoch": 0.23,
235
+ "grad_norm": 0.017337976023554802,
236
+ "learning_rate": 9.812166275343917e-05,
237
+ "loss": 0.0143,
238
+ "mean_token_accuracy": 0.996159919500351,
239
+ "num_tokens": 22607505.0,
240
+ "step": 1150
241
+ },
242
+ {
243
+ "entropy": 0.0164886474609375,
244
+ "epoch": 0.24,
245
+ "grad_norm": 0.018486863002181053,
246
+ "learning_rate": 9.789551742869096e-05,
247
+ "loss": 0.0137,
248
+ "mean_token_accuracy": 0.9963399863243103,
249
+ "num_tokens": 23584579.0,
250
+ "step": 1200
251
+ },
252
+ {
253
+ "entropy": 0.016282958984375,
254
+ "epoch": 0.25,
255
+ "grad_norm": 0.009195365011692047,
256
+ "learning_rate": 9.765681233458693e-05,
257
+ "loss": 0.0136,
258
+ "mean_token_accuracy": 0.9963367432355881,
259
+ "num_tokens": 24580285.0,
260
+ "step": 1250
261
+ },
262
+ {
263
+ "entropy": 0.0197235107421875,
264
+ "epoch": 0.26,
265
+ "grad_norm": 0.01205432415008545,
266
+ "learning_rate": 9.740561006740098e-05,
267
+ "loss": 0.0144,
268
+ "mean_token_accuracy": 0.996049216389656,
269
+ "num_tokens": 25525513.0,
270
+ "step": 1300
271
+ },
272
+ {
273
+ "entropy": 0.0153009033203125,
274
+ "epoch": 0.27,
275
+ "grad_norm": 0.01174497976899147,
276
+ "learning_rate": 9.7141976500574e-05,
277
+ "loss": 0.0135,
278
+ "mean_token_accuracy": 0.9964245760440826,
279
+ "num_tokens": 26523426.0,
280
+ "step": 1350
281
+ },
282
+ {
283
+ "entropy": 0.0169378662109375,
284
+ "epoch": 0.28,
285
+ "grad_norm": 0.014242298901081085,
286
+ "learning_rate": 9.686598076743981e-05,
287
+ "loss": 0.0139,
288
+ "mean_token_accuracy": 0.9962529343366623,
289
+ "num_tokens": 27493091.0,
290
+ "step": 1400
291
+ },
292
+ {
293
+ "entropy": 0.017811279296875,
294
+ "epoch": 0.29,
295
+ "grad_norm": 0.0160844549536705,
296
+ "learning_rate": 9.657769524309605e-05,
297
+ "loss": 0.0145,
298
+ "mean_token_accuracy": 0.996181645989418,
299
+ "num_tokens": 28458553.0,
300
+ "step": 1450
301
+ },
302
+ {
303
+ "entropy": 0.0169232177734375,
304
+ "epoch": 0.3,
305
+ "grad_norm": 0.03241236135363579,
306
+ "learning_rate": 9.627719552542516e-05,
307
+ "loss": 0.014,
308
+ "mean_token_accuracy": 0.9962622857093811,
309
+ "num_tokens": 29430497.0,
310
+ "step": 1500
311
+ },
312
+ {
313
+ "entropy": 0.0163720703125,
314
+ "epoch": 0.31,
315
+ "grad_norm": 0.01750756986439228,
316
+ "learning_rate": 9.596456041527001e-05,
317
+ "loss": 0.0136,
318
+ "mean_token_accuracy": 0.9964328509569168,
319
+ "num_tokens": 30414152.0,
320
+ "step": 1550
321
+ },
322
+ {
323
+ "entropy": 0.017760009765625,
324
+ "epoch": 0.32,
325
+ "grad_norm": 0.02182549238204956,
326
+ "learning_rate": 9.563987189576991e-05,
327
+ "loss": 0.0147,
328
+ "mean_token_accuracy": 0.9961263346672058,
329
+ "num_tokens": 31396417.0,
330
+ "step": 1600
331
+ },
332
+ {
333
+ "entropy": 0.0171954345703125,
334
+ "epoch": 0.33,
335
+ "grad_norm": 0.011301269754767418,
336
+ "learning_rate": 9.530321511086183e-05,
337
+ "loss": 0.0139,
338
+ "mean_token_accuracy": 0.9962880289554596,
339
+ "num_tokens": 32364443.0,
340
+ "step": 1650
341
+ },
342
+ {
343
+ "entropy": 0.015126953125,
344
+ "epoch": 0.34,
345
+ "grad_norm": 0.009061276912689209,
346
+ "learning_rate": 9.495467834295291e-05,
347
+ "loss": 0.0134,
348
+ "mean_token_accuracy": 0.9964066076278687,
349
+ "num_tokens": 33374821.0,
350
+ "step": 1700
351
+ },
352
+ {
353
+ "entropy": 0.018043212890625,
354
+ "epoch": 0.35,
355
+ "grad_norm": 0.014127642847597599,
356
+ "learning_rate": 9.459435298976998e-05,
357
+ "loss": 0.0137,
358
+ "mean_token_accuracy": 0.9963885354995727,
359
+ "num_tokens": 34338570.0,
360
+ "step": 1750
361
+ },
362
+ {
363
+ "entropy": 0.0155584716796875,
364
+ "epoch": 0.36,
365
+ "grad_norm": 0.00913177989423275,
366
+ "learning_rate": 9.422233354039198e-05,
367
+ "loss": 0.0132,
368
+ "mean_token_accuracy": 0.9965262812376022,
369
+ "num_tokens": 35333253.0,
370
+ "step": 1800
371
+ },
372
+ {
373
+ "entropy": 0.0177911376953125,
374
+ "epoch": 0.37,
375
+ "grad_norm": 0.009629561565816402,
376
+ "learning_rate": 9.383871755047198e-05,
377
+ "loss": 0.0131,
378
+ "mean_token_accuracy": 0.9964758467674255,
379
+ "num_tokens": 36316255.0,
380
+ "step": 1850
381
+ },
382
+ {
383
+ "entropy": 0.0159539794921875,
384
+ "epoch": 0.38,
385
+ "grad_norm": 0.01395715307444334,
386
+ "learning_rate": 9.344360561665476e-05,
387
+ "loss": 0.0132,
388
+ "mean_token_accuracy": 0.9964872688055039,
389
+ "num_tokens": 37303247.0,
390
+ "step": 1900
391
+ },
392
+ {
393
+ "entropy": 0.0152783203125,
394
+ "epoch": 0.39,
395
+ "grad_norm": 0.007368483114987612,
396
+ "learning_rate": 9.30371013501972e-05,
397
+ "loss": 0.013,
398
+ "mean_token_accuracy": 0.9965988719463348,
399
+ "num_tokens": 38287463.0,
400
+ "step": 1950
401
+ },
402
+ {
403
+ "entropy": 0.0158270263671875,
404
+ "epoch": 0.4,
405
+ "grad_norm": 0.01508031040430069,
406
+ "learning_rate": 9.261931134979791e-05,
407
+ "loss": 0.0133,
408
+ "mean_token_accuracy": 0.9965020543336869,
409
+ "num_tokens": 39273383.0,
410
+ "step": 2000
411
+ },
412
+ {
413
+ "entropy": 0.0167919921875,
414
+ "epoch": 0.41,
415
+ "grad_norm": 0.009929426945745945,
416
+ "learning_rate": 9.219034517364369e-05,
417
+ "loss": 0.0131,
418
+ "mean_token_accuracy": 0.9965337771177292,
419
+ "num_tokens": 40251665.0,
420
+ "step": 2050
421
+ },
422
+ {
423
+ "entropy": 0.0153082275390625,
424
+ "epoch": 0.42,
425
+ "grad_norm": 0.007909618318080902,
426
+ "learning_rate": 9.17503153106797e-05,
427
+ "loss": 0.0129,
428
+ "mean_token_accuracy": 0.9965605139732361,
429
+ "num_tokens": 41243731.0,
430
+ "step": 2100
431
+ },
432
+ {
433
+ "entropy": 0.0154461669921875,
434
+ "epoch": 0.43,
435
+ "grad_norm": 0.015921294689178467,
436
+ "learning_rate": 9.129933715111125e-05,
437
+ "loss": 0.0129,
438
+ "mean_token_accuracy": 0.9964988273382187,
439
+ "num_tokens": 42247346.0,
440
+ "step": 2150
441
+ },
442
+ {
443
+ "entropy": 0.01652587890625,
444
+ "epoch": 0.44,
445
+ "grad_norm": 0.006036434322595596,
446
+ "learning_rate": 9.083752895614464e-05,
447
+ "loss": 0.0133,
448
+ "mean_token_accuracy": 0.9964230692386628,
449
+ "num_tokens": 43220001.0,
450
+ "step": 2200
451
+ },
452
+ {
453
+ "entropy": 0.015128173828125,
454
+ "epoch": 0.45,
455
+ "grad_norm": 0.009778963401913643,
456
+ "learning_rate": 9.03650118269753e-05,
457
+ "loss": 0.0131,
458
+ "mean_token_accuracy": 0.9965469115972518,
459
+ "num_tokens": 44205197.0,
460
+ "step": 2250
461
+ },
462
+ {
463
+ "entropy": 0.015570068359375,
464
+ "epoch": 0.46,
465
+ "grad_norm": 0.014646470546722412,
466
+ "learning_rate": 8.988190967303101e-05,
467
+ "loss": 0.0131,
468
+ "mean_token_accuracy": 0.9965356832742691,
469
+ "num_tokens": 45185181.0,
470
+ "step": 2300
471
+ },
472
+ {
473
+ "entropy": 0.01632568359375,
474
+ "epoch": 0.47,
475
+ "grad_norm": 0.006433142349123955,
476
+ "learning_rate": 8.938834917947889e-05,
477
+ "loss": 0.0132,
478
+ "mean_token_accuracy": 0.9965211725234986,
479
+ "num_tokens": 46170545.0,
480
+ "step": 2350
481
+ },
482
+ {
483
+ "entropy": 0.01555419921875,
484
+ "epoch": 0.48,
485
+ "grad_norm": 0.007329673506319523,
486
+ "learning_rate": 8.888445977400435e-05,
487
+ "loss": 0.0131,
488
+ "mean_token_accuracy": 0.9965089631080627,
489
+ "num_tokens": 47152165.0,
490
+ "step": 2400
491
+ },
492
+ {
493
+ "entropy": 0.0156890869140625,
494
+ "epoch": 0.49,
495
+ "grad_norm": 0.010034145787358284,
496
+ "learning_rate": 8.837037359287092e-05,
497
+ "loss": 0.0131,
498
+ "mean_token_accuracy": 0.9965383523702621,
499
+ "num_tokens": 48122023.0,
500
+ "step": 2450
501
+ },
502
+ {
503
+ "entropy": 0.0163201904296875,
504
+ "epoch": 0.5,
505
+ "grad_norm": 0.008338450454175472,
506
+ "learning_rate": 8.784622544626985e-05,
507
+ "loss": 0.0131,
508
+ "mean_token_accuracy": 0.9965311539173126,
509
+ "num_tokens": 49094218.0,
510
+ "step": 2500
511
+ },
512
+ {
513
+ "entropy": 0.0152789306640625,
514
+ "epoch": 0.51,
515
+ "grad_norm": 0.007771766744554043,
516
+ "learning_rate": 8.731215278296843e-05,
517
+ "loss": 0.0128,
518
+ "mean_token_accuracy": 0.9965461915731431,
519
+ "num_tokens": 50090248.0,
520
+ "step": 2550
521
+ },
522
+ {
523
+ "entropy": 0.0171893310546875,
524
+ "epoch": 0.52,
525
+ "grad_norm": 0.010534558445215225,
526
+ "learning_rate": 8.676829565426646e-05,
527
+ "loss": 0.0132,
528
+ "mean_token_accuracy": 0.9964153295755387,
529
+ "num_tokens": 51055771.0,
530
+ "step": 2600
531
+ },
532
+ {
533
+ "entropy": 0.0148272705078125,
534
+ "epoch": 0.53,
535
+ "grad_norm": 0.007072142791002989,
536
+ "learning_rate": 8.62147966772702e-05,
537
+ "loss": 0.0129,
538
+ "mean_token_accuracy": 0.9966214698553085,
539
+ "num_tokens": 52051029.0,
540
+ "step": 2650
541
+ },
542
+ {
543
+ "entropy": 0.0146075439453125,
544
+ "epoch": 0.54,
545
+ "grad_norm": 0.007414950989186764,
546
+ "learning_rate": 8.565180099749355e-05,
547
+ "loss": 0.0125,
548
+ "mean_token_accuracy": 0.9966325616836548,
549
+ "num_tokens": 53064519.0,
550
+ "step": 2700
551
+ },
552
+ {
553
+ "entropy": 0.0161309814453125,
554
+ "epoch": 0.55,
555
+ "grad_norm": 0.0070669627748429775,
556
+ "learning_rate": 8.50794562507961e-05,
557
+ "loss": 0.013,
558
+ "mean_token_accuracy": 0.9964843678474427,
559
+ "num_tokens": 54043844.0,
560
+ "step": 2750
561
+ },
562
+ {
563
+ "entropy": 0.015706787109375,
564
+ "epoch": 0.56,
565
+ "grad_norm": 0.005744470749050379,
566
+ "learning_rate": 8.449791252466819e-05,
567
+ "loss": 0.013,
568
+ "mean_token_accuracy": 0.9965711253881454,
569
+ "num_tokens": 55026422.0,
570
+ "step": 2800
571
+ },
572
+ {
573
+ "entropy": 0.01641357421875,
574
+ "epoch": 0.57,
575
+ "grad_norm": 0.0069727362133562565,
576
+ "learning_rate": 8.390732231887314e-05,
577
+ "loss": 0.0129,
578
+ "mean_token_accuracy": 0.996541822552681,
579
+ "num_tokens": 56009908.0,
580
+ "step": 2850
581
+ },
582
+ {
583
+ "entropy": 0.016571044921875,
584
+ "epoch": 0.58,
585
+ "grad_norm": 0.007201408036053181,
586
+ "learning_rate": 8.330784050545672e-05,
587
+ "loss": 0.0133,
588
+ "mean_token_accuracy": 0.9964863443374634,
589
+ "num_tokens": 56948904.0,
590
+ "step": 2900
591
+ },
592
+ {
593
+ "entropy": 0.0156451416015625,
594
+ "epoch": 0.59,
595
+ "grad_norm": 0.005370237864553928,
596
+ "learning_rate": 8.269962428813474e-05,
597
+ "loss": 0.0128,
598
+ "mean_token_accuracy": 0.9966201782226562,
599
+ "num_tokens": 57925745.0,
600
+ "step": 2950
601
+ },
602
+ {
603
+ "entropy": 0.01558837890625,
604
+ "epoch": 0.6,
605
+ "grad_norm": 0.005006297491490841,
606
+ "learning_rate": 8.208283316106902e-05,
607
+ "loss": 0.0127,
608
+ "mean_token_accuracy": 0.9966268092393875,
609
+ "num_tokens": 58902798.0,
610
+ "step": 3000
611
+ },
612
+ {
613
+ "entropy": 0.015733642578125,
614
+ "epoch": 0.61,
615
+ "grad_norm": 0.06520090997219086,
616
+ "learning_rate": 8.145762886704286e-05,
617
+ "loss": 0.013,
618
+ "mean_token_accuracy": 0.9965688252449035,
619
+ "num_tokens": 59860890.0,
620
+ "step": 3050
621
+ },
622
+ {
623
+ "entropy": 0.015631103515625,
624
+ "epoch": 0.62,
625
+ "grad_norm": 0.017850181087851524,
626
+ "learning_rate": 8.082417535504683e-05,
627
+ "loss": 0.0134,
628
+ "mean_token_accuracy": 0.9963969469070435,
629
+ "num_tokens": 60845473.0,
630
+ "step": 3100
631
+ },
632
+ {
633
+ "entropy": 0.0175927734375,
634
+ "epoch": 0.63,
635
+ "grad_norm": 0.017514687031507492,
636
+ "learning_rate": 8.018263873728585e-05,
637
+ "loss": 0.014,
638
+ "mean_token_accuracy": 0.9963253819942475,
639
+ "num_tokens": 61784192.0,
640
+ "step": 3150
641
+ },
642
+ {
643
+ "entropy": 0.0167742919921875,
644
+ "epoch": 0.64,
645
+ "grad_norm": 0.008665528148412704,
646
+ "learning_rate": 7.953318724561932e-05,
647
+ "loss": 0.0132,
648
+ "mean_token_accuracy": 0.9964195656776428,
649
+ "num_tokens": 62766207.0,
650
+ "step": 3200
651
+ },
652
+ {
653
+ "entropy": 0.0157501220703125,
654
+ "epoch": 0.65,
655
+ "grad_norm": 0.019448991864919662,
656
+ "learning_rate": 7.887599118744509e-05,
657
+ "loss": 0.0128,
658
+ "mean_token_accuracy": 0.9966711962223053,
659
+ "num_tokens": 63762073.0,
660
+ "step": 3250
661
+ },
662
+ {
663
+ "entropy": 0.0180609130859375,
664
+ "epoch": 0.66,
665
+ "grad_norm": 0.004935233388096094,
666
+ "learning_rate": 7.821122290103938e-05,
667
+ "loss": 0.0133,
668
+ "mean_token_accuracy": 0.9964338165521621,
669
+ "num_tokens": 64710323.0,
670
+ "step": 3300
671
+ },
672
+ {
673
+ "entropy": 0.015982666015625,
674
+ "epoch": 0.67,
675
+ "grad_norm": 0.004395989701151848,
676
+ "learning_rate": 7.753905671036403e-05,
677
+ "loss": 0.0128,
678
+ "mean_token_accuracy": 0.9966453295946122,
679
+ "num_tokens": 65703355.0,
680
+ "step": 3350
681
+ },
682
+ {
683
+ "entropy": 0.0160992431640625,
684
+ "epoch": 0.68,
685
+ "grad_norm": 0.01108239684253931,
686
+ "learning_rate": 7.685966887935309e-05,
687
+ "loss": 0.0128,
688
+ "mean_token_accuracy": 0.996572095155716,
689
+ "num_tokens": 66693270.0,
690
+ "step": 3400
691
+ },
692
+ {
693
+ "entropy": 0.0156439208984375,
694
+ "epoch": 0.69,
695
+ "grad_norm": 0.007867238484323025,
696
+ "learning_rate": 7.617323756569053e-05,
697
+ "loss": 0.013,
698
+ "mean_token_accuracy": 0.9965278363227844,
699
+ "num_tokens": 67662491.0,
700
+ "step": 3450
701
+ },
702
+ {
703
+ "entropy": 0.0155462646484375,
704
+ "epoch": 0.7,
705
+ "grad_norm": 0.006034135352820158,
706
+ "learning_rate": 7.547994277409168e-05,
707
+ "loss": 0.0128,
708
+ "mean_token_accuracy": 0.9966409939527512,
709
+ "num_tokens": 68643197.0,
710
+ "step": 3500
711
+ },
712
+ {
713
+ "entropy": 0.015638427734375,
714
+ "epoch": 0.71,
715
+ "grad_norm": 0.007907884195446968,
716
+ "learning_rate": 7.477996630909994e-05,
717
+ "loss": 0.0127,
718
+ "mean_token_accuracy": 0.9966798382997513,
719
+ "num_tokens": 69638359.0,
720
+ "step": 3550
721
+ },
722
+ {
723
+ "entropy": 0.0161322021484375,
724
+ "epoch": 0.72,
725
+ "grad_norm": 0.007897689007222652,
726
+ "learning_rate": 7.40734917274118e-05,
727
+ "loss": 0.0127,
728
+ "mean_token_accuracy": 0.9966677987575531,
729
+ "num_tokens": 70621331.0,
730
+ "step": 3600
731
+ },
732
+ {
733
+ "entropy": 0.015335693359375,
734
+ "epoch": 0.73,
735
+ "grad_norm": 0.006248582154512405,
736
+ "learning_rate": 7.336070428974218e-05,
737
+ "loss": 0.0127,
738
+ "mean_token_accuracy": 0.9966181468963623,
739
+ "num_tokens": 71609172.0,
740
+ "step": 3650
741
+ },
742
+ {
743
+ "entropy": 0.0166754150390625,
744
+ "epoch": 0.74,
745
+ "grad_norm": 0.009811230935156345,
746
+ "learning_rate": 7.26417909122431e-05,
747
+ "loss": 0.0127,
748
+ "mean_token_accuracy": 0.9965909200906754,
749
+ "num_tokens": 72589978.0,
750
+ "step": 3700
751
+ },
752
+ {
753
+ "entropy": 0.0143695068359375,
754
+ "epoch": 0.75,
755
+ "grad_norm": 0.012172735296189785,
756
+ "learning_rate": 7.191694011748818e-05,
757
+ "loss": 0.0131,
758
+ "mean_token_accuracy": 0.9965182906389236,
759
+ "num_tokens": 73606622.0,
760
+ "step": 3750
761
+ },
762
+ {
763
+ "entropy": 0.0156829833984375,
764
+ "epoch": 0.76,
765
+ "grad_norm": 0.004879123531281948,
766
+ "learning_rate": 7.118634198503571e-05,
767
+ "loss": 0.0129,
768
+ "mean_token_accuracy": 0.9965479630231857,
769
+ "num_tokens": 74626982.0,
770
+ "step": 3800
771
+ },
772
+ {
773
+ "entropy": 0.016346435546875,
774
+ "epoch": 0.77,
775
+ "grad_norm": 0.005593061912804842,
776
+ "learning_rate": 7.045018810158375e-05,
777
+ "loss": 0.0127,
778
+ "mean_token_accuracy": 0.9966219502687454,
779
+ "num_tokens": 75622857.0,
780
+ "step": 3850
781
+ },
782
+ {
783
+ "entropy": 0.015889892578125,
784
+ "epoch": 0.78,
785
+ "grad_norm": 0.006423663813620806,
786
+ "learning_rate": 6.97086715107298e-05,
787
+ "loss": 0.0128,
788
+ "mean_token_accuracy": 0.9966537064313888,
789
+ "num_tokens": 76601614.0,
790
+ "step": 3900
791
+ },
792
+ {
793
+ "entropy": 0.015166015625,
794
+ "epoch": 0.79,
795
+ "grad_norm": 0.009724115021526814,
796
+ "learning_rate": 6.896198666234833e-05,
797
+ "loss": 0.0125,
798
+ "mean_token_accuracy": 0.9966633880138397,
799
+ "num_tokens": 77598059.0,
800
+ "step": 3950
801
+ },
802
+ {
803
+ "entropy": 0.0157177734375,
804
+ "epoch": 0.8,
805
+ "grad_norm": 0.014047837816178799,
806
+ "learning_rate": 6.821032936159986e-05,
807
+ "loss": 0.0128,
808
+ "mean_token_accuracy": 0.996613291501999,
809
+ "num_tokens": 78570180.0,
810
+ "step": 4000
811
+ },
812
+ {
813
+ "entropy": 0.0155902099609375,
814
+ "epoch": 0.81,
815
+ "grad_norm": 0.008200579322874546,
816
+ "learning_rate": 6.745389671758435e-05,
817
+ "loss": 0.0127,
818
+ "mean_token_accuracy": 0.9965583562850953,
819
+ "num_tokens": 79561943.0,
820
+ "step": 4050
821
+ },
822
+ {
823
+ "entropy": 0.014603271484375,
824
+ "epoch": 0.82,
825
+ "grad_norm": 0.0051344577223062515,
826
+ "learning_rate": 6.669288709165276e-05,
827
+ "loss": 0.0122,
828
+ "mean_token_accuracy": 0.9967030560970307,
829
+ "num_tokens": 80576757.0,
830
+ "step": 4100
831
+ },
832
+ {
833
+ "entropy": 0.0161895751953125,
834
+ "epoch": 0.83,
835
+ "grad_norm": 0.00540529889985919,
836
+ "learning_rate": 6.59275000453902e-05,
837
+ "loss": 0.0127,
838
+ "mean_token_accuracy": 0.9966316163539887,
839
+ "num_tokens": 81549255.0,
840
+ "step": 4150
841
+ },
842
+ {
843
+ "entropy": 0.01657470703125,
844
+ "epoch": 0.84,
845
+ "grad_norm": 0.005644636228680611,
846
+ "learning_rate": 6.515793628828447e-05,
847
+ "loss": 0.0131,
848
+ "mean_token_accuracy": 0.9965110903978348,
849
+ "num_tokens": 82499683.0,
850
+ "step": 4200
851
+ },
852
+ {
853
+ "entropy": 0.0160162353515625,
854
+ "epoch": 0.85,
855
+ "grad_norm": 0.004190398845821619,
856
+ "learning_rate": 6.438439762509332e-05,
857
+ "loss": 0.0127,
858
+ "mean_token_accuracy": 0.9966331452131272,
859
+ "num_tokens": 83475810.0,
860
+ "step": 4250
861
+ },
862
+ {
863
+ "entropy": 0.01598388671875,
864
+ "epoch": 0.86,
865
+ "grad_norm": 0.005400759633630514,
866
+ "learning_rate": 6.360708690292479e-05,
867
+ "loss": 0.0129,
868
+ "mean_token_accuracy": 0.9966162234544754,
869
+ "num_tokens": 84432102.0,
870
+ "step": 4300
871
+ },
872
+ {
873
+ "entropy": 0.014876708984375,
874
+ "epoch": 0.87,
875
+ "grad_norm": 0.012628648430109024,
876
+ "learning_rate": 6.2826207958044e-05,
877
+ "loss": 0.0124,
878
+ "mean_token_accuracy": 0.9967326593399047,
879
+ "num_tokens": 85429626.0,
880
+ "step": 4350
881
+ },
882
+ {
883
+ "entropy": 0.014339599609375,
884
+ "epoch": 0.88,
885
+ "grad_norm": 0.009649231098592281,
886
+ "learning_rate": 6.204196556242061e-05,
887
+ "loss": 0.0121,
888
+ "mean_token_accuracy": 0.9967543077468872,
889
+ "num_tokens": 86450572.0,
890
+ "step": 4400
891
+ },
892
+ {
893
+ "entropy": 0.014298095703125,
894
+ "epoch": 0.89,
895
+ "grad_norm": 0.0049432734958827496,
896
+ "learning_rate": 6.125456537003095e-05,
897
+ "loss": 0.0122,
898
+ "mean_token_accuracy": 0.9967639350891113,
899
+ "num_tokens": 87458843.0,
900
+ "step": 4450
901
+ },
902
+ {
903
+ "entropy": 0.015306396484375,
904
+ "epoch": 0.9,
905
+ "grad_norm": 0.004098173696547747,
906
+ "learning_rate": 6.046421386292887e-05,
907
+ "loss": 0.0123,
908
+ "mean_token_accuracy": 0.9966574615240097,
909
+ "num_tokens": 88453316.0,
910
+ "step": 4500
911
+ },
912
+ {
913
+ "entropy": 0.016375732421875,
914
+ "epoch": 0.91,
915
+ "grad_norm": 0.005302398465573788,
916
+ "learning_rate": 5.9671118297099274e-05,
917
+ "loss": 0.0125,
918
+ "mean_token_accuracy": 0.9966228741407395,
919
+ "num_tokens": 89427450.0,
920
+ "step": 4550
921
+ },
922
+ {
923
+ "entropy": 0.015936279296875,
924
+ "epoch": 0.92,
925
+ "grad_norm": 0.004837509244680405,
926
+ "learning_rate": 5.887548664810896e-05,
927
+ "loss": 0.0126,
928
+ "mean_token_accuracy": 0.9966443425416946,
929
+ "num_tokens": 90400676.0,
930
+ "step": 4600
931
+ },
932
+ {
933
+ "entropy": 0.0147259521484375,
934
+ "epoch": 0.93,
935
+ "grad_norm": 0.008831442333757877,
936
+ "learning_rate": 5.8077527556568614e-05,
937
+ "loss": 0.0124,
938
+ "mean_token_accuracy": 0.9967519730329514,
939
+ "num_tokens": 91394757.0,
940
+ "step": 4650
941
+ },
942
+ {
943
+ "entropy": 0.01557373046875,
944
+ "epoch": 0.94,
945
+ "grad_norm": 0.0063737258315086365,
946
+ "learning_rate": 5.727745027342031e-05,
947
+ "loss": 0.0125,
948
+ "mean_token_accuracy": 0.9966205650568009,
949
+ "num_tokens": 92386137.0,
950
+ "step": 4700
951
+ },
952
+ {
953
+ "entropy": 0.0151513671875,
954
+ "epoch": 0.95,
955
+ "grad_norm": 0.005380143877118826,
956
+ "learning_rate": 5.64754646050652e-05,
957
+ "loss": 0.0125,
958
+ "mean_token_accuracy": 0.9965577948093415,
959
+ "num_tokens": 93374756.0,
960
+ "step": 4750
961
+ },
962
+ {
963
+ "entropy": 0.0138897705078125,
964
+ "epoch": 0.96,
965
+ "grad_norm": 0.005837676115334034,
966
+ "learning_rate": 5.567178085834542e-05,
967
+ "loss": 0.0121,
968
+ "mean_token_accuracy": 0.9967484200000762,
969
+ "num_tokens": 94389846.0,
970
+ "step": 4800
971
+ },
972
+ {
973
+ "entropy": 0.0154791259765625,
974
+ "epoch": 0.97,
975
+ "grad_norm": 0.008733139373362064,
976
+ "learning_rate": 5.486660978539468e-05,
977
+ "loss": 0.0125,
978
+ "mean_token_accuracy": 0.9966599184274674,
979
+ "num_tokens": 95378637.0,
980
+ "step": 4850
981
+ },
982
+ {
983
+ "entropy": 0.015618896484375,
984
+ "epoch": 0.98,
985
+ "grad_norm": 0.016466081142425537,
986
+ "learning_rate": 5.406016252837219e-05,
987
+ "loss": 0.0126,
988
+ "mean_token_accuracy": 0.9966346627473831,
989
+ "num_tokens": 96357363.0,
990
+ "step": 4900
991
+ },
992
+ {
993
+ "entropy": 0.0171966552734375,
994
+ "epoch": 0.99,
995
+ "grad_norm": 0.007259083911776543,
996
+ "learning_rate": 5.325265056409442e-05,
997
+ "loss": 0.0129,
998
+ "mean_token_accuracy": 0.9965799397230148,
999
+ "num_tokens": 97295349.0,
1000
+ "step": 4950
1001
+ },
1002
+ {
1003
+ "entropy": 0.01597900390625,
1004
+ "epoch": 1.0,
1005
+ "grad_norm": 0.007410865277051926,
1006
+ "learning_rate": 5.244428564857874e-05,
1007
+ "loss": 0.0127,
1008
+ "mean_token_accuracy": 0.996621105670929,
1009
+ "num_tokens": 98262502.0,
1010
+ "step": 5000
1011
+ },
1012
+ {
1013
+ "entropy": 0.0154595947265625,
1014
+ "epoch": 1.01,
1015
+ "grad_norm": 0.0048329527489840984,
1016
+ "learning_rate": 5.163527976151428e-05,
1017
+ "loss": 0.0125,
1018
+ "mean_token_accuracy": 0.9966327011585235,
1019
+ "num_tokens": 99235771.0,
1020
+ "step": 5050
1021
+ },
1022
+ {
1023
+ "entropy": 0.0155859375,
1024
+ "epoch": 1.02,
1025
+ "grad_norm": 0.004490161780267954,
1026
+ "learning_rate": 5.0825845050673834e-05,
1027
+ "loss": 0.0129,
1028
+ "mean_token_accuracy": 0.9965754437446595,
1029
+ "num_tokens": 100195990.0,
1030
+ "step": 5100
1031
+ },
1032
+ {
1033
+ "entropy": 0.01652099609375,
1034
+ "epoch": 1.03,
1035
+ "grad_norm": 0.00914011336863041,
1036
+ "learning_rate": 5.0016193776281794e-05,
1037
+ "loss": 0.013,
1038
+ "mean_token_accuracy": 0.9964404582977295,
1039
+ "num_tokens": 101140160.0,
1040
+ "step": 5150
1041
+ },
1042
+ {
1043
+ "entropy": 0.015284423828125,
1044
+ "epoch": 1.04,
1045
+ "grad_norm": 0.0057498314417898655,
1046
+ "learning_rate": 4.920653825535258e-05,
1047
+ "loss": 0.0125,
1048
+ "mean_token_accuracy": 0.99665458381176,
1049
+ "num_tokens": 102128346.0,
1050
+ "step": 5200
1051
+ },
1052
+ {
1053
+ "entropy": 0.015015869140625,
1054
+ "epoch": 1.05,
1055
+ "grad_norm": 0.004660275299102068,
1056
+ "learning_rate": 4.839709080601419e-05,
1057
+ "loss": 0.0123,
1058
+ "mean_token_accuracy": 0.9967097079753876,
1059
+ "num_tokens": 103125113.0,
1060
+ "step": 5250
1061
+ },
1062
+ {
1063
+ "entropy": 0.0143218994140625,
1064
+ "epoch": 1.06,
1065
+ "grad_norm": 0.006082482635974884,
1066
+ "learning_rate": 4.758806369183149e-05,
1067
+ "loss": 0.0121,
1068
+ "mean_token_accuracy": 0.9967189061641694,
1069
+ "num_tokens": 104136988.0,
1070
+ "step": 5300
1071
+ },
1072
+ {
1073
+ "entropy": 0.0155865478515625,
1074
+ "epoch": 1.07,
1075
+ "grad_norm": 0.00519182626157999,
1076
+ "learning_rate": 4.6779669066143716e-05,
1077
+ "loss": 0.0124,
1078
+ "mean_token_accuracy": 0.9966564351320266,
1079
+ "num_tokens": 105122070.0,
1080
+ "step": 5350
1081
+ },
1082
+ {
1083
+ "entropy": 0.0145452880859375,
1084
+ "epoch": 1.08,
1085
+ "grad_norm": 0.005050841718912125,
1086
+ "learning_rate": 4.597211891643093e-05,
1087
+ "loss": 0.0122,
1088
+ "mean_token_accuracy": 0.9967609119415283,
1089
+ "num_tokens": 106117109.0,
1090
+ "step": 5400
1091
+ },
1092
+ {
1093
+ "entropy": 0.0144915771484375,
1094
+ "epoch": 1.09,
1095
+ "grad_norm": 0.004848674405366182,
1096
+ "learning_rate": 4.5165625008724035e-05,
1097
+ "loss": 0.0123,
1098
+ "mean_token_accuracy": 0.9967008078098297,
1099
+ "num_tokens": 107110319.0,
1100
+ "step": 5450
1101
+ },
1102
+ {
1103
+ "entropy": 0.0150311279296875,
1104
+ "epoch": 1.1,
1105
+ "grad_norm": 0.0036910909693688154,
1106
+ "learning_rate": 4.4360398832072764e-05,
1107
+ "loss": 0.0122,
1108
+ "mean_token_accuracy": 0.9967303448915481,
1109
+ "num_tokens": 108104820.0,
1110
+ "step": 5500
1111
+ },
1112
+ {
1113
+ "entropy": 0.015078125,
1114
+ "epoch": 1.11,
1115
+ "grad_norm": 0.0053949966095387936,
1116
+ "learning_rate": 4.3556651543086364e-05,
1117
+ "loss": 0.0125,
1118
+ "mean_token_accuracy": 0.9966647911071778,
1119
+ "num_tokens": 109091493.0,
1120
+ "step": 5550
1121
+ },
1122
+ {
1123
+ "entropy": 0.0147705078125,
1124
+ "epoch": 1.12,
1125
+ "grad_norm": 0.0037009185180068016,
1126
+ "learning_rate": 4.275459391056142e-05,
1127
+ "loss": 0.0124,
1128
+ "mean_token_accuracy": 0.9966964650154114,
1129
+ "num_tokens": 110077375.0,
1130
+ "step": 5600
1131
+ },
1132
+ {
1133
+ "entropy": 0.0162713623046875,
1134
+ "epoch": 1.13,
1135
+ "grad_norm": 0.0031313870567828417,
1136
+ "learning_rate": 4.195443626021139e-05,
1137
+ "loss": 0.0126,
1138
+ "mean_token_accuracy": 0.9966827458143235,
1139
+ "num_tokens": 111042295.0,
1140
+ "step": 5650
1141
+ },
1142
+ {
1143
+ "entropy": 0.0145147705078125,
1144
+ "epoch": 1.1400000000000001,
1145
+ "grad_norm": 0.004629215691238642,
1146
+ "learning_rate": 4.1156388419512324e-05,
1147
+ "loss": 0.0124,
1148
+ "mean_token_accuracy": 0.9966849237680435,
1149
+ "num_tokens": 112033836.0,
1150
+ "step": 5700
1151
+ },
1152
+ {
1153
+ "entropy": 0.0152557373046875,
1154
+ "epoch": 1.15,
1155
+ "grad_norm": 0.00527191162109375,
1156
+ "learning_rate": 4.0360659662679265e-05,
1157
+ "loss": 0.0125,
1158
+ "mean_token_accuracy": 0.996674497127533,
1159
+ "num_tokens": 113006848.0,
1160
+ "step": 5750
1161
+ },
1162
+ {
1163
+ "entropy": 0.0149151611328125,
1164
+ "epoch": 1.16,
1165
+ "grad_norm": 0.004986864514648914,
1166
+ "learning_rate": 3.956745865578773e-05,
1167
+ "loss": 0.0124,
1168
+ "mean_token_accuracy": 0.9966980391740798,
1169
+ "num_tokens": 113998253.0,
1170
+ "step": 5800
1171
+ },
1172
+ {
1173
+ "entropy": 0.015072021484375,
1174
+ "epoch": 1.17,
1175
+ "grad_norm": 0.004452258348464966,
1176
+ "learning_rate": 3.877699340205455e-05,
1177
+ "loss": 0.0125,
1178
+ "mean_token_accuracy": 0.9966688090562821,
1179
+ "num_tokens": 114979026.0,
1180
+ "step": 5850
1181
+ },
1182
+ {
1183
+ "entropy": 0.0157171630859375,
1184
+ "epoch": 1.18,
1185
+ "grad_norm": 0.0048246984370052814,
1186
+ "learning_rate": 3.798947118729282e-05,
1187
+ "loss": 0.0126,
1188
+ "mean_token_accuracy": 0.9966557210683823,
1189
+ "num_tokens": 115947279.0,
1190
+ "step": 5900
1191
+ },
1192
+ {
1193
+ "entropy": 0.015772705078125,
1194
+ "epoch": 1.19,
1195
+ "grad_norm": 0.003676283173263073,
1196
+ "learning_rate": 3.720509852555456e-05,
1197
+ "loss": 0.0127,
1198
+ "mean_token_accuracy": 0.9966129893064499,
1199
+ "num_tokens": 116911661.0,
1200
+ "step": 5950
1201
+ },
1202
+ {
1203
+ "entropy": 0.01443603515625,
1204
+ "epoch": 1.2,
1205
+ "grad_norm": 0.004810075741261244,
1206
+ "learning_rate": 3.642408110497616e-05,
1207
+ "loss": 0.0125,
1208
+ "mean_token_accuracy": 0.9966686731576919,
1209
+ "num_tokens": 117890306.0,
1210
+ "step": 6000
1211
+ },
1212
+ {
1213
+ "entropy": 0.0159271240234375,
1214
+ "epoch": 1.21,
1215
+ "grad_norm": 0.003690144745633006,
1216
+ "learning_rate": 3.5646623733840134e-05,
1217
+ "loss": 0.0126,
1218
+ "mean_token_accuracy": 0.9966743797063827,
1219
+ "num_tokens": 118844495.0,
1220
+ "step": 6050
1221
+ },
1222
+ {
1223
+ "entropy": 0.0148345947265625,
1224
+ "epoch": 1.22,
1225
+ "grad_norm": 0.0036596781574189663,
1226
+ "learning_rate": 3.4872930286867763e-05,
1227
+ "loss": 0.0127,
1228
+ "mean_token_accuracy": 0.9966356498003006,
1229
+ "num_tokens": 119807321.0,
1230
+ "step": 6100
1231
+ },
1232
+ {
1233
+ "entropy": 0.0147174072265625,
1234
+ "epoch": 1.23,
1235
+ "grad_norm": 0.0046323081478476524,
1236
+ "learning_rate": 3.4103203651756403e-05,
1237
+ "loss": 0.0123,
1238
+ "mean_token_accuracy": 0.9967032814025879,
1239
+ "num_tokens": 120794059.0,
1240
+ "step": 6150
1241
+ },
1242
+ {
1243
+ "entropy": 0.0155657958984375,
1244
+ "epoch": 1.24,
1245
+ "grad_norm": 0.005745012313127518,
1246
+ "learning_rate": 3.333764567597579e-05,
1247
+ "loss": 0.0125,
1248
+ "mean_token_accuracy": 0.996661651134491,
1249
+ "num_tokens": 121765426.0,
1250
+ "step": 6200
1251
+ },
1252
+ {
1253
+ "entropy": 0.0151751708984375,
1254
+ "epoch": 1.25,
1255
+ "grad_norm": 0.00422937236726284,
1256
+ "learning_rate": 3.257645711383699e-05,
1257
+ "loss": 0.0124,
1258
+ "mean_token_accuracy": 0.9967177057266235,
1259
+ "num_tokens": 122747186.0,
1260
+ "step": 6250
1261
+ },
1262
+ {
1263
+ "entropy": 0.014896240234375,
1264
+ "epoch": 1.26,
1265
+ "grad_norm": 0.0041396524757146835,
1266
+ "learning_rate": 3.1819837573848055e-05,
1267
+ "loss": 0.0123,
1268
+ "mean_token_accuracy": 0.9967236334085464,
1269
+ "num_tokens": 123735045.0,
1270
+ "step": 6300
1271
+ },
1272
+ {
1273
+ "entropy": 0.014747314453125,
1274
+ "epoch": 1.27,
1275
+ "grad_norm": 0.0034245741553604603,
1276
+ "learning_rate": 3.106798546637019e-05,
1277
+ "loss": 0.012,
1278
+ "mean_token_accuracy": 0.9968506091833115,
1279
+ "num_tokens": 124748439.0,
1280
+ "step": 6350
1281
+ },
1282
+ {
1283
+ "entropy": 0.014637451171875,
1284
+ "epoch": 1.28,
1285
+ "grad_norm": 0.0044257547706365585,
1286
+ "learning_rate": 3.0321097951588016e-05,
1287
+ "loss": 0.0125,
1288
+ "mean_token_accuracy": 0.996695158481598,
1289
+ "num_tokens": 125723365.0,
1290
+ "step": 6400
1291
+ },
1292
+ {
1293
+ "entropy": 0.0153790283203125,
1294
+ "epoch": 1.29,
1295
+ "grad_norm": 0.005239363294094801,
1296
+ "learning_rate": 2.957937088780779e-05,
1297
+ "loss": 0.0126,
1298
+ "mean_token_accuracy": 0.9966009676456451,
1299
+ "num_tokens": 126678062.0,
1300
+ "step": 6450
1301
+ },
1302
+ {
1303
+ "entropy": 0.0145068359375,
1304
+ "epoch": 1.3,
1305
+ "grad_norm": 0.005349221173673868,
1306
+ "learning_rate": 2.8842998780096896e-05,
1307
+ "loss": 0.0123,
1308
+ "mean_token_accuracy": 0.9967689532041549,
1309
+ "num_tokens": 127675372.0,
1310
+ "step": 6500
1311
+ },
1312
+ {
1313
+ "entropy": 0.0149395751953125,
1314
+ "epoch": 1.31,
1315
+ "grad_norm": 0.005388084799051285,
1316
+ "learning_rate": 2.811217472927835e-05,
1317
+ "loss": 0.0123,
1318
+ "mean_token_accuracy": 0.9967477923631668,
1319
+ "num_tokens": 128665669.0,
1320
+ "step": 6550
1321
+ },
1322
+ {
1323
+ "entropy": 0.0159820556640625,
1324
+ "epoch": 1.32,
1325
+ "grad_norm": 0.006312755402177572,
1326
+ "learning_rate": 2.7387090381293372e-05,
1327
+ "loss": 0.0127,
1328
+ "mean_token_accuracy": 0.9966998076438904,
1329
+ "num_tokens": 129622542.0,
1330
+ "step": 6600
1331
+ },
1332
+ {
1333
+ "entropy": 0.016163330078125,
1334
+ "epoch": 1.33,
1335
+ "grad_norm": 0.005128033459186554,
1336
+ "learning_rate": 2.6667935876945582e-05,
1337
+ "loss": 0.0124,
1338
+ "mean_token_accuracy": 0.9966605240106583,
1339
+ "num_tokens": 130598823.0,
1340
+ "step": 6650
1341
+ },
1342
+ {
1343
+ "entropy": 0.017476806640625,
1344
+ "epoch": 1.34,
1345
+ "grad_norm": 0.005179600324481726,
1346
+ "learning_rate": 2.59548998020399e-05,
1347
+ "loss": 0.0127,
1348
+ "mean_token_accuracy": 0.996593764424324,
1349
+ "num_tokens": 131552204.0,
1350
+ "step": 6700
1351
+ },
1352
+ {
1353
+ "entropy": 0.0151275634765625,
1354
+ "epoch": 1.35,
1355
+ "grad_norm": 0.005807869601994753,
1356
+ "learning_rate": 2.5248169137929156e-05,
1357
+ "loss": 0.0126,
1358
+ "mean_token_accuracy": 0.9966244864463806,
1359
+ "num_tokens": 132526021.0,
1360
+ "step": 6750
1361
+ },
1362
+ {
1363
+ "entropy": 0.015770263671875,
1364
+ "epoch": 1.3599999999999999,
1365
+ "grad_norm": 0.005202575121074915,
1366
+ "learning_rate": 2.4547929212481435e-05,
1367
+ "loss": 0.0127,
1368
+ "mean_token_accuracy": 0.9966981512308121,
1369
+ "num_tokens": 133483207.0,
1370
+ "step": 6800
1371
+ },
1372
+ {
1373
+ "entropy": 0.014925537109375,
1374
+ "epoch": 1.37,
1375
+ "grad_norm": 0.003400217741727829,
1376
+ "learning_rate": 2.3854363651481194e-05,
1377
+ "loss": 0.0122,
1378
+ "mean_token_accuracy": 0.9967307335138321,
1379
+ "num_tokens": 134482802.0,
1380
+ "step": 6850
1381
+ },
1382
+ {
1383
+ "entropy": 0.0151629638671875,
1384
+ "epoch": 1.38,
1385
+ "grad_norm": 0.004814519081264734,
1386
+ "learning_rate": 2.3167654330476412e-05,
1387
+ "loss": 0.0121,
1388
+ "mean_token_accuracy": 0.996745794415474,
1389
+ "num_tokens": 135484861.0,
1390
+ "step": 6900
1391
+ },
1392
+ {
1393
+ "entropy": 0.0138885498046875,
1394
+ "epoch": 1.3900000000000001,
1395
+ "grad_norm": 0.0049650054425001144,
1396
+ "learning_rate": 2.2487981327084995e-05,
1397
+ "loss": 0.012,
1398
+ "mean_token_accuracy": 0.9967457884550095,
1399
+ "num_tokens": 136499177.0,
1400
+ "step": 6950
1401
+ },
1402
+ {
1403
+ "entropy": 0.014207763671875,
1404
+ "epoch": 1.4,
1405
+ "grad_norm": 0.0068522971123456955,
1406
+ "learning_rate": 2.1815522873772475e-05,
1407
+ "loss": 0.0121,
1408
+ "mean_token_accuracy": 0.9967319291830062,
1409
+ "num_tokens": 137503523.0,
1410
+ "step": 7000
1411
+ }
1412
+ ],
1413
+ "logging_steps": 50,
1414
+ "max_steps": 10000,
1415
+ "num_input_tokens_seen": 0,
1416
+ "num_train_epochs": 2,
1417
+ "save_steps": 500,
1418
+ "stateful_callbacks": {
1419
+ "TrainerControl": {
1420
+ "args": {
1421
+ "should_epoch_stop": false,
1422
+ "should_evaluate": false,
1423
+ "should_log": false,
1424
+ "should_save": true,
1425
+ "should_training_stop": false
1426
+ },
1427
+ "attributes": {}
1428
+ }
1429
+ },
1430
+ "total_flos": 2.388030314757751e+18,
1431
+ "train_batch_size": 1,
1432
+ "trial_name": null,
1433
+ "trial_params": null
1434
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7000/zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info("Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info("Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-3B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 128,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 64,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "up_proj",
33
+ "q_proj",
34
+ "o_proj",
35
+ "down_proj",
36
+ "gate_proj",
37
+ "v_proj",
38
+ "k_proj"
39
+ ],
40
+ "target_parameters": null,
41
+ "task_type": "CAUSAL_LM",
42
+ "trainable_token_indices": null,
43
+ "use_dora": false,
44
+ "use_qalora": false,
45
+ "use_rslora": false
46
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|endoftext|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/trainer_state.json ADDED
@@ -0,0 +1,1534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.5,
6
+ "eval_steps": 500,
7
+ "global_step": 7500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 0.166884765625,
14
+ "epoch": 0.01,
15
+ "grad_norm": 0.09714163839817047,
16
+ "learning_rate": 1.6333333333333335e-05,
17
+ "loss": 0.1512,
18
+ "mean_token_accuracy": 0.966504562497139,
19
+ "num_tokens": 973322.0,
20
+ "step": 50
21
+ },
22
+ {
23
+ "entropy": 0.048140869140625,
24
+ "epoch": 0.02,
25
+ "grad_norm": 0.057216521352529526,
26
+ "learning_rate": 3.3e-05,
27
+ "loss": 0.0424,
28
+ "mean_token_accuracy": 0.9896974349021912,
29
+ "num_tokens": 1965908.0,
30
+ "step": 100
31
+ },
32
+ {
33
+ "entropy": 0.029632568359375,
34
+ "epoch": 0.03,
35
+ "grad_norm": 0.054412033408880234,
36
+ "learning_rate": 4.966666666666667e-05,
37
+ "loss": 0.0248,
38
+ "mean_token_accuracy": 0.9936319047212601,
39
+ "num_tokens": 2931943.0,
40
+ "step": 150
41
+ },
42
+ {
43
+ "entropy": 0.02478515625,
44
+ "epoch": 0.04,
45
+ "grad_norm": 0.08800562471151352,
46
+ "learning_rate": 6.633333333333334e-05,
47
+ "loss": 0.0214,
48
+ "mean_token_accuracy": 0.9943305045366287,
49
+ "num_tokens": 3906979.0,
50
+ "step": 200
51
+ },
52
+ {
53
+ "entropy": 0.0224652099609375,
54
+ "epoch": 0.05,
55
+ "grad_norm": 0.041793134063482285,
56
+ "learning_rate": 8.3e-05,
57
+ "loss": 0.0191,
58
+ "mean_token_accuracy": 0.9948792272806167,
59
+ "num_tokens": 4891791.0,
60
+ "step": 250
61
+ },
62
+ {
63
+ "entropy": 0.01969482421875,
64
+ "epoch": 0.06,
65
+ "grad_norm": 0.04181526601314545,
66
+ "learning_rate": 9.966666666666667e-05,
67
+ "loss": 0.0173,
68
+ "mean_token_accuracy": 0.995435175895691,
69
+ "num_tokens": 5923811.0,
70
+ "step": 300
71
+ },
72
+ {
73
+ "entropy": 0.019368896484375,
74
+ "epoch": 0.07,
75
+ "grad_norm": 0.03995412588119507,
76
+ "learning_rate": 9.999370378817499e-05,
77
+ "loss": 0.0164,
78
+ "mean_token_accuracy": 0.9956372672319412,
79
+ "num_tokens": 6923169.0,
80
+ "step": 350
81
+ },
82
+ {
83
+ "entropy": 0.0204400634765625,
84
+ "epoch": 0.08,
85
+ "grad_norm": 0.06872096657752991,
86
+ "learning_rate": 9.997430021636957e-05,
87
+ "loss": 0.0163,
88
+ "mean_token_accuracy": 0.9956023478507996,
89
+ "num_tokens": 7901360.0,
90
+ "step": 400
91
+ },
92
+ {
93
+ "entropy": 0.0186175537109375,
94
+ "epoch": 0.09,
95
+ "grad_norm": 0.037068989127874374,
96
+ "learning_rate": 9.994179175045023e-05,
97
+ "loss": 0.0157,
98
+ "mean_token_accuracy": 0.9958296239376068,
99
+ "num_tokens": 8899074.0,
100
+ "step": 450
101
+ },
102
+ {
103
+ "entropy": 0.0173492431640625,
104
+ "epoch": 0.1,
105
+ "grad_norm": 0.014651700854301453,
106
+ "learning_rate": 9.989618691519873e-05,
107
+ "loss": 0.015,
108
+ "mean_token_accuracy": 0.9959587389230729,
109
+ "num_tokens": 9909340.0,
110
+ "step": 500
111
+ },
112
+ {
113
+ "entropy": 0.0183203125,
114
+ "epoch": 0.11,
115
+ "grad_norm": 0.016151444986462593,
116
+ "learning_rate": 9.983749766969271e-05,
117
+ "loss": 0.0151,
118
+ "mean_token_accuracy": 0.9959770923852921,
119
+ "num_tokens": 10894801.0,
120
+ "step": 550
121
+ },
122
+ {
123
+ "entropy": 0.02047607421875,
124
+ "epoch": 0.12,
125
+ "grad_norm": 0.021302781999111176,
126
+ "learning_rate": 9.976573940416966e-05,
127
+ "loss": 0.0157,
128
+ "mean_token_accuracy": 0.9958055526018142,
129
+ "num_tokens": 11862148.0,
130
+ "step": 600
131
+ },
132
+ {
133
+ "entropy": 0.017862548828125,
134
+ "epoch": 0.13,
135
+ "grad_norm": 0.013205700553953648,
136
+ "learning_rate": 9.968093093599106e-05,
137
+ "loss": 0.0152,
138
+ "mean_token_accuracy": 0.9958789277076722,
139
+ "num_tokens": 12851463.0,
140
+ "step": 650
141
+ },
142
+ {
143
+ "entropy": 0.0182177734375,
144
+ "epoch": 0.14,
145
+ "grad_norm": 0.013120726682245731,
146
+ "learning_rate": 9.958309450470784e-05,
147
+ "loss": 0.0144,
148
+ "mean_token_accuracy": 0.9962116169929505,
149
+ "num_tokens": 13849228.0,
150
+ "step": 700
151
+ },
152
+ {
153
+ "entropy": 0.0179010009765625,
154
+ "epoch": 0.15,
155
+ "grad_norm": 0.014701537787914276,
156
+ "learning_rate": 9.947225576622847e-05,
157
+ "loss": 0.0146,
158
+ "mean_token_accuracy": 0.9961142331361771,
159
+ "num_tokens": 14812166.0,
160
+ "step": 750
161
+ },
162
+ {
163
+ "entropy": 0.0173638916015625,
164
+ "epoch": 0.16,
165
+ "grad_norm": 0.033504419028759,
166
+ "learning_rate": 9.934844378609117e-05,
167
+ "loss": 0.0145,
168
+ "mean_token_accuracy": 0.9961155968904495,
169
+ "num_tokens": 15789120.0,
170
+ "step": 800
171
+ },
172
+ {
173
+ "entropy": 0.0183349609375,
174
+ "epoch": 0.17,
175
+ "grad_norm": 0.010470729321241379,
176
+ "learning_rate": 9.921169103184187e-05,
177
+ "loss": 0.0144,
178
+ "mean_token_accuracy": 0.9961740493774414,
179
+ "num_tokens": 16764645.0,
180
+ "step": 850
181
+ },
182
+ {
183
+ "entropy": 0.016279296875,
184
+ "epoch": 0.18,
185
+ "grad_norm": 0.02781980112195015,
186
+ "learning_rate": 9.906203336452029e-05,
187
+ "loss": 0.0142,
188
+ "mean_token_accuracy": 0.9962105673551559,
189
+ "num_tokens": 17730003.0,
190
+ "step": 900
191
+ },
192
+ {
193
+ "entropy": 0.0166461181640625,
194
+ "epoch": 0.19,
195
+ "grad_norm": 0.020350394770503044,
196
+ "learning_rate": 9.889951002925593e-05,
197
+ "loss": 0.0141,
198
+ "mean_token_accuracy": 0.9962483876943589,
199
+ "num_tokens": 18723861.0,
200
+ "step": 950
201
+ },
202
+ {
203
+ "entropy": 0.0165386962890625,
204
+ "epoch": 0.2,
205
+ "grad_norm": 0.013212243095040321,
206
+ "learning_rate": 9.872416364497675e-05,
207
+ "loss": 0.0141,
208
+ "mean_token_accuracy": 0.9961817175149917,
209
+ "num_tokens": 19695091.0,
210
+ "step": 1000
211
+ },
212
+ {
213
+ "entropy": 0.017391357421875,
214
+ "epoch": 0.21,
215
+ "grad_norm": 0.014041919261217117,
216
+ "learning_rate": 9.853604019323301e-05,
217
+ "loss": 0.0141,
218
+ "mean_token_accuracy": 0.9962570595741272,
219
+ "num_tokens": 20662255.0,
220
+ "step": 1050
221
+ },
222
+ {
223
+ "entropy": 0.016884765625,
224
+ "epoch": 0.22,
225
+ "grad_norm": 0.017174789682030678,
226
+ "learning_rate": 9.833518900613956e-05,
227
+ "loss": 0.0138,
228
+ "mean_token_accuracy": 0.9962636953592301,
229
+ "num_tokens": 21642092.0,
230
+ "step": 1100
231
+ },
232
+ {
233
+ "entropy": 0.019366455078125,
234
+ "epoch": 0.23,
235
+ "grad_norm": 0.017337976023554802,
236
+ "learning_rate": 9.812166275343917e-05,
237
+ "loss": 0.0143,
238
+ "mean_token_accuracy": 0.996159919500351,
239
+ "num_tokens": 22607505.0,
240
+ "step": 1150
241
+ },
242
+ {
243
+ "entropy": 0.0164886474609375,
244
+ "epoch": 0.24,
245
+ "grad_norm": 0.018486863002181053,
246
+ "learning_rate": 9.789551742869096e-05,
247
+ "loss": 0.0137,
248
+ "mean_token_accuracy": 0.9963399863243103,
249
+ "num_tokens": 23584579.0,
250
+ "step": 1200
251
+ },
252
+ {
253
+ "entropy": 0.016282958984375,
254
+ "epoch": 0.25,
255
+ "grad_norm": 0.009195365011692047,
256
+ "learning_rate": 9.765681233458693e-05,
257
+ "loss": 0.0136,
258
+ "mean_token_accuracy": 0.9963367432355881,
259
+ "num_tokens": 24580285.0,
260
+ "step": 1250
261
+ },
262
+ {
263
+ "entropy": 0.0197235107421875,
264
+ "epoch": 0.26,
265
+ "grad_norm": 0.01205432415008545,
266
+ "learning_rate": 9.740561006740098e-05,
267
+ "loss": 0.0144,
268
+ "mean_token_accuracy": 0.996049216389656,
269
+ "num_tokens": 25525513.0,
270
+ "step": 1300
271
+ },
272
+ {
273
+ "entropy": 0.0153009033203125,
274
+ "epoch": 0.27,
275
+ "grad_norm": 0.01174497976899147,
276
+ "learning_rate": 9.7141976500574e-05,
277
+ "loss": 0.0135,
278
+ "mean_token_accuracy": 0.9964245760440826,
279
+ "num_tokens": 26523426.0,
280
+ "step": 1350
281
+ },
282
+ {
283
+ "entropy": 0.0169378662109375,
284
+ "epoch": 0.28,
285
+ "grad_norm": 0.014242298901081085,
286
+ "learning_rate": 9.686598076743981e-05,
287
+ "loss": 0.0139,
288
+ "mean_token_accuracy": 0.9962529343366623,
289
+ "num_tokens": 27493091.0,
290
+ "step": 1400
291
+ },
292
+ {
293
+ "entropy": 0.017811279296875,
294
+ "epoch": 0.29,
295
+ "grad_norm": 0.0160844549536705,
296
+ "learning_rate": 9.657769524309605e-05,
297
+ "loss": 0.0145,
298
+ "mean_token_accuracy": 0.996181645989418,
299
+ "num_tokens": 28458553.0,
300
+ "step": 1450
301
+ },
302
+ {
303
+ "entropy": 0.0169232177734375,
304
+ "epoch": 0.3,
305
+ "grad_norm": 0.03241236135363579,
306
+ "learning_rate": 9.627719552542516e-05,
307
+ "loss": 0.014,
308
+ "mean_token_accuracy": 0.9962622857093811,
309
+ "num_tokens": 29430497.0,
310
+ "step": 1500
311
+ },
312
+ {
313
+ "entropy": 0.0163720703125,
314
+ "epoch": 0.31,
315
+ "grad_norm": 0.01750756986439228,
316
+ "learning_rate": 9.596456041527001e-05,
317
+ "loss": 0.0136,
318
+ "mean_token_accuracy": 0.9964328509569168,
319
+ "num_tokens": 30414152.0,
320
+ "step": 1550
321
+ },
322
+ {
323
+ "entropy": 0.017760009765625,
324
+ "epoch": 0.32,
325
+ "grad_norm": 0.02182549238204956,
326
+ "learning_rate": 9.563987189576991e-05,
327
+ "loss": 0.0147,
328
+ "mean_token_accuracy": 0.9961263346672058,
329
+ "num_tokens": 31396417.0,
330
+ "step": 1600
331
+ },
332
+ {
333
+ "entropy": 0.0171954345703125,
334
+ "epoch": 0.33,
335
+ "grad_norm": 0.011301269754767418,
336
+ "learning_rate": 9.530321511086183e-05,
337
+ "loss": 0.0139,
338
+ "mean_token_accuracy": 0.9962880289554596,
339
+ "num_tokens": 32364443.0,
340
+ "step": 1650
341
+ },
342
+ {
343
+ "entropy": 0.015126953125,
344
+ "epoch": 0.34,
345
+ "grad_norm": 0.009061276912689209,
346
+ "learning_rate": 9.495467834295291e-05,
347
+ "loss": 0.0134,
348
+ "mean_token_accuracy": 0.9964066076278687,
349
+ "num_tokens": 33374821.0,
350
+ "step": 1700
351
+ },
352
+ {
353
+ "entropy": 0.018043212890625,
354
+ "epoch": 0.35,
355
+ "grad_norm": 0.014127642847597599,
356
+ "learning_rate": 9.459435298976998e-05,
357
+ "loss": 0.0137,
358
+ "mean_token_accuracy": 0.9963885354995727,
359
+ "num_tokens": 34338570.0,
360
+ "step": 1750
361
+ },
362
+ {
363
+ "entropy": 0.0155584716796875,
364
+ "epoch": 0.36,
365
+ "grad_norm": 0.00913177989423275,
366
+ "learning_rate": 9.422233354039198e-05,
367
+ "loss": 0.0132,
368
+ "mean_token_accuracy": 0.9965262812376022,
369
+ "num_tokens": 35333253.0,
370
+ "step": 1800
371
+ },
372
+ {
373
+ "entropy": 0.0177911376953125,
374
+ "epoch": 0.37,
375
+ "grad_norm": 0.009629561565816402,
376
+ "learning_rate": 9.383871755047198e-05,
377
+ "loss": 0.0131,
378
+ "mean_token_accuracy": 0.9964758467674255,
379
+ "num_tokens": 36316255.0,
380
+ "step": 1850
381
+ },
382
+ {
383
+ "entropy": 0.0159539794921875,
384
+ "epoch": 0.38,
385
+ "grad_norm": 0.01395715307444334,
386
+ "learning_rate": 9.344360561665476e-05,
387
+ "loss": 0.0132,
388
+ "mean_token_accuracy": 0.9964872688055039,
389
+ "num_tokens": 37303247.0,
390
+ "step": 1900
391
+ },
392
+ {
393
+ "entropy": 0.0152783203125,
394
+ "epoch": 0.39,
395
+ "grad_norm": 0.007368483114987612,
396
+ "learning_rate": 9.30371013501972e-05,
397
+ "loss": 0.013,
398
+ "mean_token_accuracy": 0.9965988719463348,
399
+ "num_tokens": 38287463.0,
400
+ "step": 1950
401
+ },
402
+ {
403
+ "entropy": 0.0158270263671875,
404
+ "epoch": 0.4,
405
+ "grad_norm": 0.01508031040430069,
406
+ "learning_rate": 9.261931134979791e-05,
407
+ "loss": 0.0133,
408
+ "mean_token_accuracy": 0.9965020543336869,
409
+ "num_tokens": 39273383.0,
410
+ "step": 2000
411
+ },
412
+ {
413
+ "entropy": 0.0167919921875,
414
+ "epoch": 0.41,
415
+ "grad_norm": 0.009929426945745945,
416
+ "learning_rate": 9.219034517364369e-05,
417
+ "loss": 0.0131,
418
+ "mean_token_accuracy": 0.9965337771177292,
419
+ "num_tokens": 40251665.0,
420
+ "step": 2050
421
+ },
422
+ {
423
+ "entropy": 0.0153082275390625,
424
+ "epoch": 0.42,
425
+ "grad_norm": 0.007909618318080902,
426
+ "learning_rate": 9.17503153106797e-05,
427
+ "loss": 0.0129,
428
+ "mean_token_accuracy": 0.9965605139732361,
429
+ "num_tokens": 41243731.0,
430
+ "step": 2100
431
+ },
432
+ {
433
+ "entropy": 0.0154461669921875,
434
+ "epoch": 0.43,
435
+ "grad_norm": 0.015921294689178467,
436
+ "learning_rate": 9.129933715111125e-05,
437
+ "loss": 0.0129,
438
+ "mean_token_accuracy": 0.9964988273382187,
439
+ "num_tokens": 42247346.0,
440
+ "step": 2150
441
+ },
442
+ {
443
+ "entropy": 0.01652587890625,
444
+ "epoch": 0.44,
445
+ "grad_norm": 0.006036434322595596,
446
+ "learning_rate": 9.083752895614464e-05,
447
+ "loss": 0.0133,
448
+ "mean_token_accuracy": 0.9964230692386628,
449
+ "num_tokens": 43220001.0,
450
+ "step": 2200
451
+ },
452
+ {
453
+ "entropy": 0.015128173828125,
454
+ "epoch": 0.45,
455
+ "grad_norm": 0.009778963401913643,
456
+ "learning_rate": 9.03650118269753e-05,
457
+ "loss": 0.0131,
458
+ "mean_token_accuracy": 0.9965469115972518,
459
+ "num_tokens": 44205197.0,
460
+ "step": 2250
461
+ },
462
+ {
463
+ "entropy": 0.015570068359375,
464
+ "epoch": 0.46,
465
+ "grad_norm": 0.014646470546722412,
466
+ "learning_rate": 8.988190967303101e-05,
467
+ "loss": 0.0131,
468
+ "mean_token_accuracy": 0.9965356832742691,
469
+ "num_tokens": 45185181.0,
470
+ "step": 2300
471
+ },
472
+ {
473
+ "entropy": 0.01632568359375,
474
+ "epoch": 0.47,
475
+ "grad_norm": 0.006433142349123955,
476
+ "learning_rate": 8.938834917947889e-05,
477
+ "loss": 0.0132,
478
+ "mean_token_accuracy": 0.9965211725234986,
479
+ "num_tokens": 46170545.0,
480
+ "step": 2350
481
+ },
482
+ {
483
+ "entropy": 0.01555419921875,
484
+ "epoch": 0.48,
485
+ "grad_norm": 0.007329673506319523,
486
+ "learning_rate": 8.888445977400435e-05,
487
+ "loss": 0.0131,
488
+ "mean_token_accuracy": 0.9965089631080627,
489
+ "num_tokens": 47152165.0,
490
+ "step": 2400
491
+ },
492
+ {
493
+ "entropy": 0.0156890869140625,
494
+ "epoch": 0.49,
495
+ "grad_norm": 0.010034145787358284,
496
+ "learning_rate": 8.837037359287092e-05,
497
+ "loss": 0.0131,
498
+ "mean_token_accuracy": 0.9965383523702621,
499
+ "num_tokens": 48122023.0,
500
+ "step": 2450
501
+ },
502
+ {
503
+ "entropy": 0.0163201904296875,
504
+ "epoch": 0.5,
505
+ "grad_norm": 0.008338450454175472,
506
+ "learning_rate": 8.784622544626985e-05,
507
+ "loss": 0.0131,
508
+ "mean_token_accuracy": 0.9965311539173126,
509
+ "num_tokens": 49094218.0,
510
+ "step": 2500
511
+ },
512
+ {
513
+ "entropy": 0.0152789306640625,
514
+ "epoch": 0.51,
515
+ "grad_norm": 0.007771766744554043,
516
+ "learning_rate": 8.731215278296843e-05,
517
+ "loss": 0.0128,
518
+ "mean_token_accuracy": 0.9965461915731431,
519
+ "num_tokens": 50090248.0,
520
+ "step": 2550
521
+ },
522
+ {
523
+ "entropy": 0.0171893310546875,
524
+ "epoch": 0.52,
525
+ "grad_norm": 0.010534558445215225,
526
+ "learning_rate": 8.676829565426646e-05,
527
+ "loss": 0.0132,
528
+ "mean_token_accuracy": 0.9964153295755387,
529
+ "num_tokens": 51055771.0,
530
+ "step": 2600
531
+ },
532
+ {
533
+ "entropy": 0.0148272705078125,
534
+ "epoch": 0.53,
535
+ "grad_norm": 0.007072142791002989,
536
+ "learning_rate": 8.62147966772702e-05,
537
+ "loss": 0.0129,
538
+ "mean_token_accuracy": 0.9966214698553085,
539
+ "num_tokens": 52051029.0,
540
+ "step": 2650
541
+ },
542
+ {
543
+ "entropy": 0.0146075439453125,
544
+ "epoch": 0.54,
545
+ "grad_norm": 0.007414950989186764,
546
+ "learning_rate": 8.565180099749355e-05,
547
+ "loss": 0.0125,
548
+ "mean_token_accuracy": 0.9966325616836548,
549
+ "num_tokens": 53064519.0,
550
+ "step": 2700
551
+ },
552
+ {
553
+ "entropy": 0.0161309814453125,
554
+ "epoch": 0.55,
555
+ "grad_norm": 0.0070669627748429775,
556
+ "learning_rate": 8.50794562507961e-05,
557
+ "loss": 0.013,
558
+ "mean_token_accuracy": 0.9964843678474427,
559
+ "num_tokens": 54043844.0,
560
+ "step": 2750
561
+ },
562
+ {
563
+ "entropy": 0.015706787109375,
564
+ "epoch": 0.56,
565
+ "grad_norm": 0.005744470749050379,
566
+ "learning_rate": 8.449791252466819e-05,
567
+ "loss": 0.013,
568
+ "mean_token_accuracy": 0.9965711253881454,
569
+ "num_tokens": 55026422.0,
570
+ "step": 2800
571
+ },
572
+ {
573
+ "entropy": 0.01641357421875,
574
+ "epoch": 0.57,
575
+ "grad_norm": 0.0069727362133562565,
576
+ "learning_rate": 8.390732231887314e-05,
577
+ "loss": 0.0129,
578
+ "mean_token_accuracy": 0.996541822552681,
579
+ "num_tokens": 56009908.0,
580
+ "step": 2850
581
+ },
582
+ {
583
+ "entropy": 0.016571044921875,
584
+ "epoch": 0.58,
585
+ "grad_norm": 0.007201408036053181,
586
+ "learning_rate": 8.330784050545672e-05,
587
+ "loss": 0.0133,
588
+ "mean_token_accuracy": 0.9964863443374634,
589
+ "num_tokens": 56948904.0,
590
+ "step": 2900
591
+ },
592
+ {
593
+ "entropy": 0.0156451416015625,
594
+ "epoch": 0.59,
595
+ "grad_norm": 0.005370237864553928,
596
+ "learning_rate": 8.269962428813474e-05,
597
+ "loss": 0.0128,
598
+ "mean_token_accuracy": 0.9966201782226562,
599
+ "num_tokens": 57925745.0,
600
+ "step": 2950
601
+ },
602
+ {
603
+ "entropy": 0.01558837890625,
604
+ "epoch": 0.6,
605
+ "grad_norm": 0.005006297491490841,
606
+ "learning_rate": 8.208283316106902e-05,
607
+ "loss": 0.0127,
608
+ "mean_token_accuracy": 0.9966268092393875,
609
+ "num_tokens": 58902798.0,
610
+ "step": 3000
611
+ },
612
+ {
613
+ "entropy": 0.015733642578125,
614
+ "epoch": 0.61,
615
+ "grad_norm": 0.06520090997219086,
616
+ "learning_rate": 8.145762886704286e-05,
617
+ "loss": 0.013,
618
+ "mean_token_accuracy": 0.9965688252449035,
619
+ "num_tokens": 59860890.0,
620
+ "step": 3050
621
+ },
622
+ {
623
+ "entropy": 0.015631103515625,
624
+ "epoch": 0.62,
625
+ "grad_norm": 0.017850181087851524,
626
+ "learning_rate": 8.082417535504683e-05,
627
+ "loss": 0.0134,
628
+ "mean_token_accuracy": 0.9963969469070435,
629
+ "num_tokens": 60845473.0,
630
+ "step": 3100
631
+ },
632
+ {
633
+ "entropy": 0.0175927734375,
634
+ "epoch": 0.63,
635
+ "grad_norm": 0.017514687031507492,
636
+ "learning_rate": 8.018263873728585e-05,
637
+ "loss": 0.014,
638
+ "mean_token_accuracy": 0.9963253819942475,
639
+ "num_tokens": 61784192.0,
640
+ "step": 3150
641
+ },
642
+ {
643
+ "entropy": 0.0167742919921875,
644
+ "epoch": 0.64,
645
+ "grad_norm": 0.008665528148412704,
646
+ "learning_rate": 7.953318724561932e-05,
647
+ "loss": 0.0132,
648
+ "mean_token_accuracy": 0.9964195656776428,
649
+ "num_tokens": 62766207.0,
650
+ "step": 3200
651
+ },
652
+ {
653
+ "entropy": 0.0157501220703125,
654
+ "epoch": 0.65,
655
+ "grad_norm": 0.019448991864919662,
656
+ "learning_rate": 7.887599118744509e-05,
657
+ "loss": 0.0128,
658
+ "mean_token_accuracy": 0.9966711962223053,
659
+ "num_tokens": 63762073.0,
660
+ "step": 3250
661
+ },
662
+ {
663
+ "entropy": 0.0180609130859375,
664
+ "epoch": 0.66,
665
+ "grad_norm": 0.004935233388096094,
666
+ "learning_rate": 7.821122290103938e-05,
667
+ "loss": 0.0133,
668
+ "mean_token_accuracy": 0.9964338165521621,
669
+ "num_tokens": 64710323.0,
670
+ "step": 3300
671
+ },
672
+ {
673
+ "entropy": 0.015982666015625,
674
+ "epoch": 0.67,
675
+ "grad_norm": 0.004395989701151848,
676
+ "learning_rate": 7.753905671036403e-05,
677
+ "loss": 0.0128,
678
+ "mean_token_accuracy": 0.9966453295946122,
679
+ "num_tokens": 65703355.0,
680
+ "step": 3350
681
+ },
682
+ {
683
+ "entropy": 0.0160992431640625,
684
+ "epoch": 0.68,
685
+ "grad_norm": 0.01108239684253931,
686
+ "learning_rate": 7.685966887935309e-05,
687
+ "loss": 0.0128,
688
+ "mean_token_accuracy": 0.996572095155716,
689
+ "num_tokens": 66693270.0,
690
+ "step": 3400
691
+ },
692
+ {
693
+ "entropy": 0.0156439208984375,
694
+ "epoch": 0.69,
695
+ "grad_norm": 0.007867238484323025,
696
+ "learning_rate": 7.617323756569053e-05,
697
+ "loss": 0.013,
698
+ "mean_token_accuracy": 0.9965278363227844,
699
+ "num_tokens": 67662491.0,
700
+ "step": 3450
701
+ },
702
+ {
703
+ "entropy": 0.0155462646484375,
704
+ "epoch": 0.7,
705
+ "grad_norm": 0.006034135352820158,
706
+ "learning_rate": 7.547994277409168e-05,
707
+ "loss": 0.0128,
708
+ "mean_token_accuracy": 0.9966409939527512,
709
+ "num_tokens": 68643197.0,
710
+ "step": 3500
711
+ },
712
+ {
713
+ "entropy": 0.015638427734375,
714
+ "epoch": 0.71,
715
+ "grad_norm": 0.007907884195446968,
716
+ "learning_rate": 7.477996630909994e-05,
717
+ "loss": 0.0127,
718
+ "mean_token_accuracy": 0.9966798382997513,
719
+ "num_tokens": 69638359.0,
720
+ "step": 3550
721
+ },
722
+ {
723
+ "entropy": 0.0161322021484375,
724
+ "epoch": 0.72,
725
+ "grad_norm": 0.007897689007222652,
726
+ "learning_rate": 7.40734917274118e-05,
727
+ "loss": 0.0127,
728
+ "mean_token_accuracy": 0.9966677987575531,
729
+ "num_tokens": 70621331.0,
730
+ "step": 3600
731
+ },
732
+ {
733
+ "entropy": 0.015335693359375,
734
+ "epoch": 0.73,
735
+ "grad_norm": 0.006248582154512405,
736
+ "learning_rate": 7.336070428974218e-05,
737
+ "loss": 0.0127,
738
+ "mean_token_accuracy": 0.9966181468963623,
739
+ "num_tokens": 71609172.0,
740
+ "step": 3650
741
+ },
742
+ {
743
+ "entropy": 0.0166754150390625,
744
+ "epoch": 0.74,
745
+ "grad_norm": 0.009811230935156345,
746
+ "learning_rate": 7.26417909122431e-05,
747
+ "loss": 0.0127,
748
+ "mean_token_accuracy": 0.9965909200906754,
749
+ "num_tokens": 72589978.0,
750
+ "step": 3700
751
+ },
752
+ {
753
+ "entropy": 0.0143695068359375,
754
+ "epoch": 0.75,
755
+ "grad_norm": 0.012172735296189785,
756
+ "learning_rate": 7.191694011748818e-05,
757
+ "loss": 0.0131,
758
+ "mean_token_accuracy": 0.9965182906389236,
759
+ "num_tokens": 73606622.0,
760
+ "step": 3750
761
+ },
762
+ {
763
+ "entropy": 0.0156829833984375,
764
+ "epoch": 0.76,
765
+ "grad_norm": 0.004879123531281948,
766
+ "learning_rate": 7.118634198503571e-05,
767
+ "loss": 0.0129,
768
+ "mean_token_accuracy": 0.9965479630231857,
769
+ "num_tokens": 74626982.0,
770
+ "step": 3800
771
+ },
772
+ {
773
+ "entropy": 0.016346435546875,
774
+ "epoch": 0.77,
775
+ "grad_norm": 0.005593061912804842,
776
+ "learning_rate": 7.045018810158375e-05,
777
+ "loss": 0.0127,
778
+ "mean_token_accuracy": 0.9966219502687454,
779
+ "num_tokens": 75622857.0,
780
+ "step": 3850
781
+ },
782
+ {
783
+ "entropy": 0.015889892578125,
784
+ "epoch": 0.78,
785
+ "grad_norm": 0.006423663813620806,
786
+ "learning_rate": 6.97086715107298e-05,
787
+ "loss": 0.0128,
788
+ "mean_token_accuracy": 0.9966537064313888,
789
+ "num_tokens": 76601614.0,
790
+ "step": 3900
791
+ },
792
+ {
793
+ "entropy": 0.015166015625,
794
+ "epoch": 0.79,
795
+ "grad_norm": 0.009724115021526814,
796
+ "learning_rate": 6.896198666234833e-05,
797
+ "loss": 0.0125,
798
+ "mean_token_accuracy": 0.9966633880138397,
799
+ "num_tokens": 77598059.0,
800
+ "step": 3950
801
+ },
802
+ {
803
+ "entropy": 0.0157177734375,
804
+ "epoch": 0.8,
805
+ "grad_norm": 0.014047837816178799,
806
+ "learning_rate": 6.821032936159986e-05,
807
+ "loss": 0.0128,
808
+ "mean_token_accuracy": 0.996613291501999,
809
+ "num_tokens": 78570180.0,
810
+ "step": 4000
811
+ },
812
+ {
813
+ "entropy": 0.0155902099609375,
814
+ "epoch": 0.81,
815
+ "grad_norm": 0.008200579322874546,
816
+ "learning_rate": 6.745389671758435e-05,
817
+ "loss": 0.0127,
818
+ "mean_token_accuracy": 0.9965583562850953,
819
+ "num_tokens": 79561943.0,
820
+ "step": 4050
821
+ },
822
+ {
823
+ "entropy": 0.014603271484375,
824
+ "epoch": 0.82,
825
+ "grad_norm": 0.0051344577223062515,
826
+ "learning_rate": 6.669288709165276e-05,
827
+ "loss": 0.0122,
828
+ "mean_token_accuracy": 0.9967030560970307,
829
+ "num_tokens": 80576757.0,
830
+ "step": 4100
831
+ },
832
+ {
833
+ "entropy": 0.0161895751953125,
834
+ "epoch": 0.83,
835
+ "grad_norm": 0.00540529889985919,
836
+ "learning_rate": 6.59275000453902e-05,
837
+ "loss": 0.0127,
838
+ "mean_token_accuracy": 0.9966316163539887,
839
+ "num_tokens": 81549255.0,
840
+ "step": 4150
841
+ },
842
+ {
843
+ "entropy": 0.01657470703125,
844
+ "epoch": 0.84,
845
+ "grad_norm": 0.005644636228680611,
846
+ "learning_rate": 6.515793628828447e-05,
847
+ "loss": 0.0131,
848
+ "mean_token_accuracy": 0.9965110903978348,
849
+ "num_tokens": 82499683.0,
850
+ "step": 4200
851
+ },
852
+ {
853
+ "entropy": 0.0160162353515625,
854
+ "epoch": 0.85,
855
+ "grad_norm": 0.004190398845821619,
856
+ "learning_rate": 6.438439762509332e-05,
857
+ "loss": 0.0127,
858
+ "mean_token_accuracy": 0.9966331452131272,
859
+ "num_tokens": 83475810.0,
860
+ "step": 4250
861
+ },
862
+ {
863
+ "entropy": 0.01598388671875,
864
+ "epoch": 0.86,
865
+ "grad_norm": 0.005400759633630514,
866
+ "learning_rate": 6.360708690292479e-05,
867
+ "loss": 0.0129,
868
+ "mean_token_accuracy": 0.9966162234544754,
869
+ "num_tokens": 84432102.0,
870
+ "step": 4300
871
+ },
872
+ {
873
+ "entropy": 0.014876708984375,
874
+ "epoch": 0.87,
875
+ "grad_norm": 0.012628648430109024,
876
+ "learning_rate": 6.2826207958044e-05,
877
+ "loss": 0.0124,
878
+ "mean_token_accuracy": 0.9967326593399047,
879
+ "num_tokens": 85429626.0,
880
+ "step": 4350
881
+ },
882
+ {
883
+ "entropy": 0.014339599609375,
884
+ "epoch": 0.88,
885
+ "grad_norm": 0.009649231098592281,
886
+ "learning_rate": 6.204196556242061e-05,
887
+ "loss": 0.0121,
888
+ "mean_token_accuracy": 0.9967543077468872,
889
+ "num_tokens": 86450572.0,
890
+ "step": 4400
891
+ },
892
+ {
893
+ "entropy": 0.014298095703125,
894
+ "epoch": 0.89,
895
+ "grad_norm": 0.0049432734958827496,
896
+ "learning_rate": 6.125456537003095e-05,
897
+ "loss": 0.0122,
898
+ "mean_token_accuracy": 0.9967639350891113,
899
+ "num_tokens": 87458843.0,
900
+ "step": 4450
901
+ },
902
+ {
903
+ "entropy": 0.015306396484375,
904
+ "epoch": 0.9,
905
+ "grad_norm": 0.004098173696547747,
906
+ "learning_rate": 6.046421386292887e-05,
907
+ "loss": 0.0123,
908
+ "mean_token_accuracy": 0.9966574615240097,
909
+ "num_tokens": 88453316.0,
910
+ "step": 4500
911
+ },
912
+ {
913
+ "entropy": 0.016375732421875,
914
+ "epoch": 0.91,
915
+ "grad_norm": 0.005302398465573788,
916
+ "learning_rate": 5.9671118297099274e-05,
917
+ "loss": 0.0125,
918
+ "mean_token_accuracy": 0.9966228741407395,
919
+ "num_tokens": 89427450.0,
920
+ "step": 4550
921
+ },
922
+ {
923
+ "entropy": 0.015936279296875,
924
+ "epoch": 0.92,
925
+ "grad_norm": 0.004837509244680405,
926
+ "learning_rate": 5.887548664810896e-05,
927
+ "loss": 0.0126,
928
+ "mean_token_accuracy": 0.9966443425416946,
929
+ "num_tokens": 90400676.0,
930
+ "step": 4600
931
+ },
932
+ {
933
+ "entropy": 0.0147259521484375,
934
+ "epoch": 0.93,
935
+ "grad_norm": 0.008831442333757877,
936
+ "learning_rate": 5.8077527556568614e-05,
937
+ "loss": 0.0124,
938
+ "mean_token_accuracy": 0.9967519730329514,
939
+ "num_tokens": 91394757.0,
940
+ "step": 4650
941
+ },
942
+ {
943
+ "entropy": 0.01557373046875,
944
+ "epoch": 0.94,
945
+ "grad_norm": 0.0063737258315086365,
946
+ "learning_rate": 5.727745027342031e-05,
947
+ "loss": 0.0125,
948
+ "mean_token_accuracy": 0.9966205650568009,
949
+ "num_tokens": 92386137.0,
950
+ "step": 4700
951
+ },
952
+ {
953
+ "entropy": 0.0151513671875,
954
+ "epoch": 0.95,
955
+ "grad_norm": 0.005380143877118826,
956
+ "learning_rate": 5.64754646050652e-05,
957
+ "loss": 0.0125,
958
+ "mean_token_accuracy": 0.9965577948093415,
959
+ "num_tokens": 93374756.0,
960
+ "step": 4750
961
+ },
962
+ {
963
+ "entropy": 0.0138897705078125,
964
+ "epoch": 0.96,
965
+ "grad_norm": 0.005837676115334034,
966
+ "learning_rate": 5.567178085834542e-05,
967
+ "loss": 0.0121,
968
+ "mean_token_accuracy": 0.9967484200000762,
969
+ "num_tokens": 94389846.0,
970
+ "step": 4800
971
+ },
972
+ {
973
+ "entropy": 0.0154791259765625,
974
+ "epoch": 0.97,
975
+ "grad_norm": 0.008733139373362064,
976
+ "learning_rate": 5.486660978539468e-05,
977
+ "loss": 0.0125,
978
+ "mean_token_accuracy": 0.9966599184274674,
979
+ "num_tokens": 95378637.0,
980
+ "step": 4850
981
+ },
982
+ {
983
+ "entropy": 0.015618896484375,
984
+ "epoch": 0.98,
985
+ "grad_norm": 0.016466081142425537,
986
+ "learning_rate": 5.406016252837219e-05,
987
+ "loss": 0.0126,
988
+ "mean_token_accuracy": 0.9966346627473831,
989
+ "num_tokens": 96357363.0,
990
+ "step": 4900
991
+ },
992
+ {
993
+ "entropy": 0.0171966552734375,
994
+ "epoch": 0.99,
995
+ "grad_norm": 0.007259083911776543,
996
+ "learning_rate": 5.325265056409442e-05,
997
+ "loss": 0.0129,
998
+ "mean_token_accuracy": 0.9965799397230148,
999
+ "num_tokens": 97295349.0,
1000
+ "step": 4950
1001
+ },
1002
+ {
1003
+ "entropy": 0.01597900390625,
1004
+ "epoch": 1.0,
1005
+ "grad_norm": 0.007410865277051926,
1006
+ "learning_rate": 5.244428564857874e-05,
1007
+ "loss": 0.0127,
1008
+ "mean_token_accuracy": 0.996621105670929,
1009
+ "num_tokens": 98262502.0,
1010
+ "step": 5000
1011
+ },
1012
+ {
1013
+ "entropy": 0.0154595947265625,
1014
+ "epoch": 1.01,
1015
+ "grad_norm": 0.0048329527489840984,
1016
+ "learning_rate": 5.163527976151428e-05,
1017
+ "loss": 0.0125,
1018
+ "mean_token_accuracy": 0.9966327011585235,
1019
+ "num_tokens": 99235771.0,
1020
+ "step": 5050
1021
+ },
1022
+ {
1023
+ "entropy": 0.0155859375,
1024
+ "epoch": 1.02,
1025
+ "grad_norm": 0.004490161780267954,
1026
+ "learning_rate": 5.0825845050673834e-05,
1027
+ "loss": 0.0129,
1028
+ "mean_token_accuracy": 0.9965754437446595,
1029
+ "num_tokens": 100195990.0,
1030
+ "step": 5100
1031
+ },
1032
+ {
1033
+ "entropy": 0.01652099609375,
1034
+ "epoch": 1.03,
1035
+ "grad_norm": 0.00914011336863041,
1036
+ "learning_rate": 5.0016193776281794e-05,
1037
+ "loss": 0.013,
1038
+ "mean_token_accuracy": 0.9964404582977295,
1039
+ "num_tokens": 101140160.0,
1040
+ "step": 5150
1041
+ },
1042
+ {
1043
+ "entropy": 0.015284423828125,
1044
+ "epoch": 1.04,
1045
+ "grad_norm": 0.0057498314417898655,
1046
+ "learning_rate": 4.920653825535258e-05,
1047
+ "loss": 0.0125,
1048
+ "mean_token_accuracy": 0.99665458381176,
1049
+ "num_tokens": 102128346.0,
1050
+ "step": 5200
1051
+ },
1052
+ {
1053
+ "entropy": 0.015015869140625,
1054
+ "epoch": 1.05,
1055
+ "grad_norm": 0.004660275299102068,
1056
+ "learning_rate": 4.839709080601419e-05,
1057
+ "loss": 0.0123,
1058
+ "mean_token_accuracy": 0.9967097079753876,
1059
+ "num_tokens": 103125113.0,
1060
+ "step": 5250
1061
+ },
1062
+ {
1063
+ "entropy": 0.0143218994140625,
1064
+ "epoch": 1.06,
1065
+ "grad_norm": 0.006082482635974884,
1066
+ "learning_rate": 4.758806369183149e-05,
1067
+ "loss": 0.0121,
1068
+ "mean_token_accuracy": 0.9967189061641694,
1069
+ "num_tokens": 104136988.0,
1070
+ "step": 5300
1071
+ },
1072
+ {
1073
+ "entropy": 0.0155865478515625,
1074
+ "epoch": 1.07,
1075
+ "grad_norm": 0.00519182626157999,
1076
+ "learning_rate": 4.6779669066143716e-05,
1077
+ "loss": 0.0124,
1078
+ "mean_token_accuracy": 0.9966564351320266,
1079
+ "num_tokens": 105122070.0,
1080
+ "step": 5350
1081
+ },
1082
+ {
1083
+ "entropy": 0.0145452880859375,
1084
+ "epoch": 1.08,
1085
+ "grad_norm": 0.005050841718912125,
1086
+ "learning_rate": 4.597211891643093e-05,
1087
+ "loss": 0.0122,
1088
+ "mean_token_accuracy": 0.9967609119415283,
1089
+ "num_tokens": 106117109.0,
1090
+ "step": 5400
1091
+ },
1092
+ {
1093
+ "entropy": 0.0144915771484375,
1094
+ "epoch": 1.09,
1095
+ "grad_norm": 0.004848674405366182,
1096
+ "learning_rate": 4.5165625008724035e-05,
1097
+ "loss": 0.0123,
1098
+ "mean_token_accuracy": 0.9967008078098297,
1099
+ "num_tokens": 107110319.0,
1100
+ "step": 5450
1101
+ },
1102
+ {
1103
+ "entropy": 0.0150311279296875,
1104
+ "epoch": 1.1,
1105
+ "grad_norm": 0.0036910909693688154,
1106
+ "learning_rate": 4.4360398832072764e-05,
1107
+ "loss": 0.0122,
1108
+ "mean_token_accuracy": 0.9967303448915481,
1109
+ "num_tokens": 108104820.0,
1110
+ "step": 5500
1111
+ },
1112
+ {
1113
+ "entropy": 0.015078125,
1114
+ "epoch": 1.11,
1115
+ "grad_norm": 0.0053949966095387936,
1116
+ "learning_rate": 4.3556651543086364e-05,
1117
+ "loss": 0.0125,
1118
+ "mean_token_accuracy": 0.9966647911071778,
1119
+ "num_tokens": 109091493.0,
1120
+ "step": 5550
1121
+ },
1122
+ {
1123
+ "entropy": 0.0147705078125,
1124
+ "epoch": 1.12,
1125
+ "grad_norm": 0.0037009185180068016,
1126
+ "learning_rate": 4.275459391056142e-05,
1127
+ "loss": 0.0124,
1128
+ "mean_token_accuracy": 0.9966964650154114,
1129
+ "num_tokens": 110077375.0,
1130
+ "step": 5600
1131
+ },
1132
+ {
1133
+ "entropy": 0.0162713623046875,
1134
+ "epoch": 1.13,
1135
+ "grad_norm": 0.0031313870567828417,
1136
+ "learning_rate": 4.195443626021139e-05,
1137
+ "loss": 0.0126,
1138
+ "mean_token_accuracy": 0.9966827458143235,
1139
+ "num_tokens": 111042295.0,
1140
+ "step": 5650
1141
+ },
1142
+ {
1143
+ "entropy": 0.0145147705078125,
1144
+ "epoch": 1.1400000000000001,
1145
+ "grad_norm": 0.004629215691238642,
1146
+ "learning_rate": 4.1156388419512324e-05,
1147
+ "loss": 0.0124,
1148
+ "mean_token_accuracy": 0.9966849237680435,
1149
+ "num_tokens": 112033836.0,
1150
+ "step": 5700
1151
+ },
1152
+ {
1153
+ "entropy": 0.0152557373046875,
1154
+ "epoch": 1.15,
1155
+ "grad_norm": 0.00527191162109375,
1156
+ "learning_rate": 4.0360659662679265e-05,
1157
+ "loss": 0.0125,
1158
+ "mean_token_accuracy": 0.996674497127533,
1159
+ "num_tokens": 113006848.0,
1160
+ "step": 5750
1161
+ },
1162
+ {
1163
+ "entropy": 0.0149151611328125,
1164
+ "epoch": 1.16,
1165
+ "grad_norm": 0.004986864514648914,
1166
+ "learning_rate": 3.956745865578773e-05,
1167
+ "loss": 0.0124,
1168
+ "mean_token_accuracy": 0.9966980391740798,
1169
+ "num_tokens": 113998253.0,
1170
+ "step": 5800
1171
+ },
1172
+ {
1173
+ "entropy": 0.015072021484375,
1174
+ "epoch": 1.17,
1175
+ "grad_norm": 0.004452258348464966,
1176
+ "learning_rate": 3.877699340205455e-05,
1177
+ "loss": 0.0125,
1178
+ "mean_token_accuracy": 0.9966688090562821,
1179
+ "num_tokens": 114979026.0,
1180
+ "step": 5850
1181
+ },
1182
+ {
1183
+ "entropy": 0.0157171630859375,
1184
+ "epoch": 1.18,
1185
+ "grad_norm": 0.0048246984370052814,
1186
+ "learning_rate": 3.798947118729282e-05,
1187
+ "loss": 0.0126,
1188
+ "mean_token_accuracy": 0.9966557210683823,
1189
+ "num_tokens": 115947279.0,
1190
+ "step": 5900
1191
+ },
1192
+ {
1193
+ "entropy": 0.015772705078125,
1194
+ "epoch": 1.19,
1195
+ "grad_norm": 0.003676283173263073,
1196
+ "learning_rate": 3.720509852555456e-05,
1197
+ "loss": 0.0127,
1198
+ "mean_token_accuracy": 0.9966129893064499,
1199
+ "num_tokens": 116911661.0,
1200
+ "step": 5950
1201
+ },
1202
+ {
1203
+ "entropy": 0.01443603515625,
1204
+ "epoch": 1.2,
1205
+ "grad_norm": 0.004810075741261244,
1206
+ "learning_rate": 3.642408110497616e-05,
1207
+ "loss": 0.0125,
1208
+ "mean_token_accuracy": 0.9966686731576919,
1209
+ "num_tokens": 117890306.0,
1210
+ "step": 6000
1211
+ },
1212
+ {
1213
+ "entropy": 0.0159271240234375,
1214
+ "epoch": 1.21,
1215
+ "grad_norm": 0.003690144745633006,
1216
+ "learning_rate": 3.5646623733840134e-05,
1217
+ "loss": 0.0126,
1218
+ "mean_token_accuracy": 0.9966743797063827,
1219
+ "num_tokens": 118844495.0,
1220
+ "step": 6050
1221
+ },
1222
+ {
1223
+ "entropy": 0.0148345947265625,
1224
+ "epoch": 1.22,
1225
+ "grad_norm": 0.0036596781574189663,
1226
+ "learning_rate": 3.4872930286867763e-05,
1227
+ "loss": 0.0127,
1228
+ "mean_token_accuracy": 0.9966356498003006,
1229
+ "num_tokens": 119807321.0,
1230
+ "step": 6100
1231
+ },
1232
+ {
1233
+ "entropy": 0.0147174072265625,
1234
+ "epoch": 1.23,
1235
+ "grad_norm": 0.0046323081478476524,
1236
+ "learning_rate": 3.4103203651756403e-05,
1237
+ "loss": 0.0123,
1238
+ "mean_token_accuracy": 0.9967032814025879,
1239
+ "num_tokens": 120794059.0,
1240
+ "step": 6150
1241
+ },
1242
+ {
1243
+ "entropy": 0.0155657958984375,
1244
+ "epoch": 1.24,
1245
+ "grad_norm": 0.005745012313127518,
1246
+ "learning_rate": 3.333764567597579e-05,
1247
+ "loss": 0.0125,
1248
+ "mean_token_accuracy": 0.996661651134491,
1249
+ "num_tokens": 121765426.0,
1250
+ "step": 6200
1251
+ },
1252
+ {
1253
+ "entropy": 0.0151751708984375,
1254
+ "epoch": 1.25,
1255
+ "grad_norm": 0.00422937236726284,
1256
+ "learning_rate": 3.257645711383699e-05,
1257
+ "loss": 0.0124,
1258
+ "mean_token_accuracy": 0.9967177057266235,
1259
+ "num_tokens": 122747186.0,
1260
+ "step": 6250
1261
+ },
1262
+ {
1263
+ "entropy": 0.014896240234375,
1264
+ "epoch": 1.26,
1265
+ "grad_norm": 0.0041396524757146835,
1266
+ "learning_rate": 3.1819837573848055e-05,
1267
+ "loss": 0.0123,
1268
+ "mean_token_accuracy": 0.9967236334085464,
1269
+ "num_tokens": 123735045.0,
1270
+ "step": 6300
1271
+ },
1272
+ {
1273
+ "entropy": 0.014747314453125,
1274
+ "epoch": 1.27,
1275
+ "grad_norm": 0.0034245741553604603,
1276
+ "learning_rate": 3.106798546637019e-05,
1277
+ "loss": 0.012,
1278
+ "mean_token_accuracy": 0.9968506091833115,
1279
+ "num_tokens": 124748439.0,
1280
+ "step": 6350
1281
+ },
1282
+ {
1283
+ "entropy": 0.014637451171875,
1284
+ "epoch": 1.28,
1285
+ "grad_norm": 0.0044257547706365585,
1286
+ "learning_rate": 3.0321097951588016e-05,
1287
+ "loss": 0.0125,
1288
+ "mean_token_accuracy": 0.996695158481598,
1289
+ "num_tokens": 125723365.0,
1290
+ "step": 6400
1291
+ },
1292
+ {
1293
+ "entropy": 0.0153790283203125,
1294
+ "epoch": 1.29,
1295
+ "grad_norm": 0.005239363294094801,
1296
+ "learning_rate": 2.957937088780779e-05,
1297
+ "loss": 0.0126,
1298
+ "mean_token_accuracy": 0.9966009676456451,
1299
+ "num_tokens": 126678062.0,
1300
+ "step": 6450
1301
+ },
1302
+ {
1303
+ "entropy": 0.0145068359375,
1304
+ "epoch": 1.3,
1305
+ "grad_norm": 0.005349221173673868,
1306
+ "learning_rate": 2.8842998780096896e-05,
1307
+ "loss": 0.0123,
1308
+ "mean_token_accuracy": 0.9967689532041549,
1309
+ "num_tokens": 127675372.0,
1310
+ "step": 6500
1311
+ },
1312
+ {
1313
+ "entropy": 0.0149395751953125,
1314
+ "epoch": 1.31,
1315
+ "grad_norm": 0.005388084799051285,
1316
+ "learning_rate": 2.811217472927835e-05,
1317
+ "loss": 0.0123,
1318
+ "mean_token_accuracy": 0.9967477923631668,
1319
+ "num_tokens": 128665669.0,
1320
+ "step": 6550
1321
+ },
1322
+ {
1323
+ "entropy": 0.0159820556640625,
1324
+ "epoch": 1.32,
1325
+ "grad_norm": 0.006312755402177572,
1326
+ "learning_rate": 2.7387090381293372e-05,
1327
+ "loss": 0.0127,
1328
+ "mean_token_accuracy": 0.9966998076438904,
1329
+ "num_tokens": 129622542.0,
1330
+ "step": 6600
1331
+ },
1332
+ {
1333
+ "entropy": 0.016163330078125,
1334
+ "epoch": 1.33,
1335
+ "grad_norm": 0.005128033459186554,
1336
+ "learning_rate": 2.6667935876945582e-05,
1337
+ "loss": 0.0124,
1338
+ "mean_token_accuracy": 0.9966605240106583,
1339
+ "num_tokens": 130598823.0,
1340
+ "step": 6650
1341
+ },
1342
+ {
1343
+ "entropy": 0.017476806640625,
1344
+ "epoch": 1.34,
1345
+ "grad_norm": 0.005179600324481726,
1346
+ "learning_rate": 2.59548998020399e-05,
1347
+ "loss": 0.0127,
1348
+ "mean_token_accuracy": 0.996593764424324,
1349
+ "num_tokens": 131552204.0,
1350
+ "step": 6700
1351
+ },
1352
+ {
1353
+ "entropy": 0.0151275634765625,
1354
+ "epoch": 1.35,
1355
+ "grad_norm": 0.005807869601994753,
1356
+ "learning_rate": 2.5248169137929156e-05,
1357
+ "loss": 0.0126,
1358
+ "mean_token_accuracy": 0.9966244864463806,
1359
+ "num_tokens": 132526021.0,
1360
+ "step": 6750
1361
+ },
1362
+ {
1363
+ "entropy": 0.015770263671875,
1364
+ "epoch": 1.3599999999999999,
1365
+ "grad_norm": 0.005202575121074915,
1366
+ "learning_rate": 2.4547929212481435e-05,
1367
+ "loss": 0.0127,
1368
+ "mean_token_accuracy": 0.9966981512308121,
1369
+ "num_tokens": 133483207.0,
1370
+ "step": 6800
1371
+ },
1372
+ {
1373
+ "entropy": 0.014925537109375,
1374
+ "epoch": 1.37,
1375
+ "grad_norm": 0.003400217741727829,
1376
+ "learning_rate": 2.3854363651481194e-05,
1377
+ "loss": 0.0122,
1378
+ "mean_token_accuracy": 0.9967307335138321,
1379
+ "num_tokens": 134482802.0,
1380
+ "step": 6850
1381
+ },
1382
+ {
1383
+ "entropy": 0.0151629638671875,
1384
+ "epoch": 1.38,
1385
+ "grad_norm": 0.004814519081264734,
1386
+ "learning_rate": 2.3167654330476412e-05,
1387
+ "loss": 0.0121,
1388
+ "mean_token_accuracy": 0.996745794415474,
1389
+ "num_tokens": 135484861.0,
1390
+ "step": 6900
1391
+ },
1392
+ {
1393
+ "entropy": 0.0138885498046875,
1394
+ "epoch": 1.3900000000000001,
1395
+ "grad_norm": 0.0049650054425001144,
1396
+ "learning_rate": 2.2487981327084995e-05,
1397
+ "loss": 0.012,
1398
+ "mean_token_accuracy": 0.9967457884550095,
1399
+ "num_tokens": 136499177.0,
1400
+ "step": 6950
1401
+ },
1402
+ {
1403
+ "entropy": 0.014207763671875,
1404
+ "epoch": 1.4,
1405
+ "grad_norm": 0.0068522971123456955,
1406
+ "learning_rate": 2.1815522873772475e-05,
1407
+ "loss": 0.0121,
1408
+ "mean_token_accuracy": 0.9967319291830062,
1409
+ "num_tokens": 137503523.0,
1410
+ "step": 7000
1411
+ },
1412
+ {
1413
+ "entropy": 0.0145257568359375,
1414
+ "epoch": 1.41,
1415
+ "grad_norm": 0.00624112319201231,
1416
+ "learning_rate": 2.1150455311113708e-05,
1417
+ "loss": 0.0122,
1418
+ "mean_token_accuracy": 0.9966984778642655,
1419
+ "num_tokens": 138497821.0,
1420
+ "step": 7050
1421
+ },
1422
+ {
1423
+ "entropy": 0.0151031494140625,
1424
+ "epoch": 1.42,
1425
+ "grad_norm": 0.00566850695759058,
1426
+ "learning_rate": 2.0492953041550483e-05,
1427
+ "loss": 0.0123,
1428
+ "mean_token_accuracy": 0.9967049795389176,
1429
+ "num_tokens": 139480692.0,
1430
+ "step": 7100
1431
+ },
1432
+ {
1433
+ "entropy": 0.014796142578125,
1434
+ "epoch": 1.43,
1435
+ "grad_norm": 0.00434489268809557,
1436
+ "learning_rate": 1.9843188483657697e-05,
1437
+ "loss": 0.0121,
1438
+ "mean_token_accuracy": 0.9967473483085633,
1439
+ "num_tokens": 140475746.0,
1440
+ "step": 7150
1441
+ },
1442
+ {
1443
+ "entropy": 0.0156658935546875,
1444
+ "epoch": 1.44,
1445
+ "grad_norm": 0.004734216723591089,
1446
+ "learning_rate": 1.9201332026929396e-05,
1447
+ "loss": 0.0125,
1448
+ "mean_token_accuracy": 0.9966971826553345,
1449
+ "num_tokens": 141443323.0,
1450
+ "step": 7200
1451
+ },
1452
+ {
1453
+ "entropy": 0.0151654052734375,
1454
+ "epoch": 1.45,
1455
+ "grad_norm": 0.004659540485590696,
1456
+ "learning_rate": 1.8567551987097188e-05,
1457
+ "loss": 0.0121,
1458
+ "mean_token_accuracy": 0.9967643207311631,
1459
+ "num_tokens": 142442496.0,
1460
+ "step": 7250
1461
+ },
1462
+ {
1463
+ "entropy": 0.0146234130859375,
1464
+ "epoch": 1.46,
1465
+ "grad_norm": 0.0037653304170817137,
1466
+ "learning_rate": 1.794201456199231e-05,
1467
+ "loss": 0.0122,
1468
+ "mean_token_accuracy": 0.9967849618196487,
1469
+ "num_tokens": 143436493.0,
1470
+ "step": 7300
1471
+ },
1472
+ {
1473
+ "entropy": 0.016064453125,
1474
+ "epoch": 1.47,
1475
+ "grad_norm": 0.004325070418417454,
1476
+ "learning_rate": 1.732488378796311e-05,
1477
+ "loss": 0.0125,
1478
+ "mean_token_accuracy": 0.9967092323303223,
1479
+ "num_tokens": 144400422.0,
1480
+ "step": 7350
1481
+ },
1482
+ {
1483
+ "entropy": 0.0147503662109375,
1484
+ "epoch": 1.48,
1485
+ "grad_norm": 0.0051532830111682415,
1486
+ "learning_rate": 1.671632149685943e-05,
1487
+ "loss": 0.012,
1488
+ "mean_token_accuracy": 0.9967712318897247,
1489
+ "num_tokens": 145397669.0,
1490
+ "step": 7400
1491
+ },
1492
+ {
1493
+ "entropy": 0.0142218017578125,
1494
+ "epoch": 1.49,
1495
+ "grad_norm": 0.01885133981704712,
1496
+ "learning_rate": 1.611648727359498e-05,
1497
+ "loss": 0.0122,
1498
+ "mean_token_accuracy": 0.9967398834228516,
1499
+ "num_tokens": 146393328.0,
1500
+ "step": 7450
1501
+ },
1502
+ {
1503
+ "entropy": 0.015579833984375,
1504
+ "epoch": 1.5,
1505
+ "grad_norm": 0.00416885782033205,
1506
+ "learning_rate": 1.5525538414298925e-05,
1507
+ "loss": 0.0125,
1508
+ "mean_token_accuracy": 0.9966780996322632,
1509
+ "num_tokens": 147355946.0,
1510
+ "step": 7500
1511
+ }
1512
+ ],
1513
+ "logging_steps": 50,
1514
+ "max_steps": 10000,
1515
+ "num_input_tokens_seen": 0,
1516
+ "num_train_epochs": 2,
1517
+ "save_steps": 500,
1518
+ "stateful_callbacks": {
1519
+ "TrainerControl": {
1520
+ "args": {
1521
+ "should_epoch_stop": false,
1522
+ "should_evaluate": false,
1523
+ "should_log": false,
1524
+ "should_save": true,
1525
+ "should_training_stop": false
1526
+ },
1527
+ "attributes": {}
1528
+ }
1529
+ },
1530
+ "total_flos": 2.559137821805773e+18,
1531
+ "train_batch_size": 1,
1532
+ "trial_name": null,
1533
+ "trial_params": null
1534
+ }
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-7500/zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info("Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info("Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
qwen2.5-coder-3B_r64_lr1e-4/checkpoint-8000/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }