starwarindia commited on
Commit
e1ac6ff
·
verified ·
1 Parent(s): 35a552c

Batch upload: checkpoint-1000 checkpoint-10000 checkpoint-100000 checkpoint-101000 checkpoint-102000 checkpoint-103000 checkpoint-104000 checkpoint-105000 checkpoint-106000 checkpoint-107000 checkpoint-108000 checkpoint-108168 checkpoint-11000 checkpoint-12000 checkpoint-13000 checkpoint-14000 checkpoint-15000 checkpoint-16000 checkpoint-17000 checkpoint-18000 checkpoint-19000 checkpoint-2000 checkpoint-20000 checkpoint-21000 checkpoint-22000 checkpoint-23000 checkpoint-24000 checkpoint-25000 checkpoint-26000 checkpoint-27000 checkpoint-28000 checkpoint-29000 checkpoint-3000 checkpoint-30000 checkpoint-31000 checkpoint-32000 checkpoint-33000 checkpoint-34000 checkpoint-35000 checkpoint-36000 checkpoint-37000 checkpoint-38000 checkpoint-39000 checkpoint-4000 checkpoint-40000 checkpoint-41000 checkpoint-42000 checkpoint-43000 checkpoint-44000 checkpoint-45000

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. checkpoint-1000/README.md +202 -0
  2. checkpoint-1000/adapter_config.json +34 -0
  3. checkpoint-1000/adapter_model.safetensors +3 -0
  4. checkpoint-1000/added_tokens.json +24 -0
  5. checkpoint-1000/merges.txt +0 -0
  6. checkpoint-1000/optimizer.pt +3 -0
  7. checkpoint-1000/rng_state.pth +3 -0
  8. checkpoint-1000/scheduler.pt +3 -0
  9. checkpoint-1000/special_tokens_map.json +31 -0
  10. checkpoint-1000/tokenizer.json +0 -0
  11. checkpoint-1000/tokenizer_config.json +207 -0
  12. checkpoint-1000/trainer_state.json +181 -0
  13. checkpoint-1000/training_args.bin +3 -0
  14. checkpoint-1000/vocab.json +0 -0
  15. checkpoint-10000/README.md +202 -0
  16. checkpoint-10000/adapter_config.json +34 -0
  17. checkpoint-10000/adapter_model.safetensors +3 -0
  18. checkpoint-10000/added_tokens.json +24 -0
  19. checkpoint-10000/merges.txt +0 -0
  20. checkpoint-10000/optimizer.pt +3 -0
  21. checkpoint-10000/rng_state.pth +3 -0
  22. checkpoint-10000/scheduler.pt +3 -0
  23. checkpoint-10000/special_tokens_map.json +31 -0
  24. checkpoint-10000/tokenizer.json +0 -0
  25. checkpoint-10000/tokenizer_config.json +207 -0
  26. checkpoint-10000/trainer_state.json +1513 -0
  27. checkpoint-10000/training_args.bin +3 -0
  28. checkpoint-10000/vocab.json +0 -0
  29. checkpoint-100000/README.md +202 -0
  30. checkpoint-100000/adapter_config.json +34 -0
  31. checkpoint-100000/adapter_model.safetensors +3 -0
  32. checkpoint-100000/added_tokens.json +24 -0
  33. checkpoint-100000/merges.txt +0 -0
  34. checkpoint-100000/optimizer.pt +3 -0
  35. checkpoint-100000/rng_state.pth +3 -0
  36. checkpoint-100000/scheduler.pt +3 -0
  37. checkpoint-100000/special_tokens_map.json +31 -0
  38. checkpoint-100000/tokenizer.json +0 -0
  39. checkpoint-100000/tokenizer_config.json +207 -0
  40. checkpoint-100000/trainer_state.json +0 -0
  41. checkpoint-100000/training_args.bin +3 -0
  42. checkpoint-100000/vocab.json +0 -0
  43. checkpoint-101000/README.md +202 -0
  44. checkpoint-101000/adapter_config.json +34 -0
  45. checkpoint-101000/adapter_model.safetensors +3 -0
  46. checkpoint-101000/added_tokens.json +24 -0
  47. checkpoint-101000/merges.txt +0 -0
  48. checkpoint-101000/optimizer.pt +3 -0
  49. checkpoint-101000/rng_state.pth +3 -0
  50. checkpoint-101000/scheduler.pt +3 -0
checkpoint-1000/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-0.5B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.12.0
checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "down_proj",
25
+ "o_proj",
26
+ "k_proj",
27
+ "v_proj",
28
+ "up_proj",
29
+ "gate_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d0f65c41d08fa4d4863948bc82f64633161640733c4b1b468893122b579f1f2
3
+ size 35237104
checkpoint-1000/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
checkpoint-1000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ff6ad3cd0439c1d789571f2cd1c6bf55e7f1cc9a82ca2945e3922244b7c30b6
3
+ size 70667778
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67c15fc23e0ef69d5e4826db880ea884dae1fba42ab4cc76a66b49ba792d4953
3
+ size 14244
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:430c7b0d8be62cd36ca0d3ffcd4b3e719e4c633121d2f3915be116aeffd29e7c
3
+ size 1064
checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
checkpoint-1000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.02773453886128917,
5
+ "eval_steps": 1000,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0013867269430644586,
13
+ "grad_norm": 1.8933687210083008,
14
+ "learning_rate": 2.957486136783734e-06,
15
+ "loss": 1.2241,
16
+ "step": 50
17
+ },
18
+ {
19
+ "epoch": 0.002773453886128917,
20
+ "grad_norm": 0.7502820491790771,
21
+ "learning_rate": 6.038200862600124e-06,
22
+ "loss": 1.0267,
23
+ "step": 100
24
+ },
25
+ {
26
+ "epoch": 0.004160180829193376,
27
+ "grad_norm": 0.5821689963340759,
28
+ "learning_rate": 9.118915588416513e-06,
29
+ "loss": 0.8167,
30
+ "step": 150
31
+ },
32
+ {
33
+ "epoch": 0.005546907772257834,
34
+ "grad_norm": 0.5138927698135376,
35
+ "learning_rate": 1.2199630314232902e-05,
36
+ "loss": 0.6408,
37
+ "step": 200
38
+ },
39
+ {
40
+ "epoch": 0.006933634715322293,
41
+ "grad_norm": 0.619263768196106,
42
+ "learning_rate": 1.5280345040049293e-05,
43
+ "loss": 0.5468,
44
+ "step": 250
45
+ },
46
+ {
47
+ "epoch": 0.008320361658386751,
48
+ "grad_norm": 0.5078439712524414,
49
+ "learning_rate": 1.836105976586568e-05,
50
+ "loss": 0.4952,
51
+ "step": 300
52
+ },
53
+ {
54
+ "epoch": 0.00970708860145121,
55
+ "grad_norm": 0.5653749108314514,
56
+ "learning_rate": 2.144177449168207e-05,
57
+ "loss": 0.4388,
58
+ "step": 350
59
+ },
60
+ {
61
+ "epoch": 0.011093815544515669,
62
+ "grad_norm": 0.6189213991165161,
63
+ "learning_rate": 2.452248921749846e-05,
64
+ "loss": 0.4232,
65
+ "step": 400
66
+ },
67
+ {
68
+ "epoch": 0.012480542487580126,
69
+ "grad_norm": 0.6082913875579834,
70
+ "learning_rate": 2.760320394331485e-05,
71
+ "loss": 0.401,
72
+ "step": 450
73
+ },
74
+ {
75
+ "epoch": 0.013867269430644586,
76
+ "grad_norm": 0.6956301331520081,
77
+ "learning_rate": 3.068391866913124e-05,
78
+ "loss": 0.3895,
79
+ "step": 500
80
+ },
81
+ {
82
+ "epoch": 0.015253996373709043,
83
+ "grad_norm": 0.7030412554740906,
84
+ "learning_rate": 3.3764633394947633e-05,
85
+ "loss": 0.3676,
86
+ "step": 550
87
+ },
88
+ {
89
+ "epoch": 0.016640723316773503,
90
+ "grad_norm": 0.6779190897941589,
91
+ "learning_rate": 3.684534812076402e-05,
92
+ "loss": 0.3653,
93
+ "step": 600
94
+ },
95
+ {
96
+ "epoch": 0.01802745025983796,
97
+ "grad_norm": 0.8930213451385498,
98
+ "learning_rate": 3.992606284658041e-05,
99
+ "loss": 0.3645,
100
+ "step": 650
101
+ },
102
+ {
103
+ "epoch": 0.01941417720290242,
104
+ "grad_norm": 0.6423994302749634,
105
+ "learning_rate": 4.30067775723968e-05,
106
+ "loss": 0.3514,
107
+ "step": 700
108
+ },
109
+ {
110
+ "epoch": 0.02080090414596688,
111
+ "grad_norm": 0.7728660106658936,
112
+ "learning_rate": 4.608749229821319e-05,
113
+ "loss": 0.3468,
114
+ "step": 750
115
+ },
116
+ {
117
+ "epoch": 0.022187631089031337,
118
+ "grad_norm": 0.7561061978340149,
119
+ "learning_rate": 4.916820702402958e-05,
120
+ "loss": 0.3499,
121
+ "step": 800
122
+ },
123
+ {
124
+ "epoch": 0.023574358032095795,
125
+ "grad_norm": 0.6163890957832336,
126
+ "learning_rate": 5.224892174984597e-05,
127
+ "loss": 0.3417,
128
+ "step": 850
129
+ },
130
+ {
131
+ "epoch": 0.024961084975160253,
132
+ "grad_norm": 0.7334563732147217,
133
+ "learning_rate": 5.532963647566236e-05,
134
+ "loss": 0.3299,
135
+ "step": 900
136
+ },
137
+ {
138
+ "epoch": 0.026347811918224714,
139
+ "grad_norm": 0.655237078666687,
140
+ "learning_rate": 5.841035120147874e-05,
141
+ "loss": 0.3306,
142
+ "step": 950
143
+ },
144
+ {
145
+ "epoch": 0.02773453886128917,
146
+ "grad_norm": 0.8147113919258118,
147
+ "learning_rate": 6.149106592729513e-05,
148
+ "loss": 0.3281,
149
+ "step": 1000
150
+ },
151
+ {
152
+ "epoch": 0.02773453886128917,
153
+ "eval_loss": 0.32194069027900696,
154
+ "eval_runtime": 501.2457,
155
+ "eval_samples_per_second": 5.7,
156
+ "eval_steps_per_second": 5.7,
157
+ "step": 1000
158
+ }
159
+ ],
160
+ "logging_steps": 50,
161
+ "max_steps": 108168,
162
+ "num_input_tokens_seen": 0,
163
+ "num_train_epochs": 3,
164
+ "save_steps": 1000,
165
+ "stateful_callbacks": {
166
+ "TrainerControl": {
167
+ "args": {
168
+ "should_epoch_stop": false,
169
+ "should_evaluate": false,
170
+ "should_log": false,
171
+ "should_save": true,
172
+ "should_training_stop": false
173
+ },
174
+ "attributes": {}
175
+ }
176
+ },
177
+ "total_flos": 1.802385752064e+16,
178
+ "train_batch_size": 1,
179
+ "trial_name": null,
180
+ "trial_params": null
181
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e593d4080c231741ba3acb7aa362bf85842c0c2beaca29489d434eca5cc7fa0
3
+ size 5432
checkpoint-1000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10000/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-0.5B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.12.0
checkpoint-10000/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "down_proj",
25
+ "o_proj",
26
+ "k_proj",
27
+ "v_proj",
28
+ "up_proj",
29
+ "gate_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-10000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8388c74c627a1d4b1598c16d860bc46e2549ecae8baa506fdeb87d3453ed311d
3
+ size 35237104
checkpoint-10000/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
checkpoint-10000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7e049b89765e9a37930bebf3a5be684170c214ca657ed3dae2c97ceee36ee0e
3
+ size 70667778
checkpoint-10000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edc8f9131d4d1c2a3fb07a9d3ef9b3ece7220bb4f3a2a5762f9ad2f9635dbc3c
3
+ size 14244
checkpoint-10000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97fefaec88ab34fa963e63591c6f53ba9a4a528c387c8d03a9a416316b7d69fe
3
+ size 1064
checkpoint-10000/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
checkpoint-10000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10000/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
checkpoint-10000/trainer_state.json ADDED
@@ -0,0 +1,1513 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.2773453886128917,
5
+ "eval_steps": 1000,
6
+ "global_step": 10000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0013867269430644586,
13
+ "grad_norm": 1.8933687210083008,
14
+ "learning_rate": 2.957486136783734e-06,
15
+ "loss": 1.2241,
16
+ "step": 50
17
+ },
18
+ {
19
+ "epoch": 0.002773453886128917,
20
+ "grad_norm": 0.7502820491790771,
21
+ "learning_rate": 6.038200862600124e-06,
22
+ "loss": 1.0267,
23
+ "step": 100
24
+ },
25
+ {
26
+ "epoch": 0.004160180829193376,
27
+ "grad_norm": 0.5821689963340759,
28
+ "learning_rate": 9.118915588416513e-06,
29
+ "loss": 0.8167,
30
+ "step": 150
31
+ },
32
+ {
33
+ "epoch": 0.005546907772257834,
34
+ "grad_norm": 0.5138927698135376,
35
+ "learning_rate": 1.2199630314232902e-05,
36
+ "loss": 0.6408,
37
+ "step": 200
38
+ },
39
+ {
40
+ "epoch": 0.006933634715322293,
41
+ "grad_norm": 0.619263768196106,
42
+ "learning_rate": 1.5280345040049293e-05,
43
+ "loss": 0.5468,
44
+ "step": 250
45
+ },
46
+ {
47
+ "epoch": 0.008320361658386751,
48
+ "grad_norm": 0.5078439712524414,
49
+ "learning_rate": 1.836105976586568e-05,
50
+ "loss": 0.4952,
51
+ "step": 300
52
+ },
53
+ {
54
+ "epoch": 0.00970708860145121,
55
+ "grad_norm": 0.5653749108314514,
56
+ "learning_rate": 2.144177449168207e-05,
57
+ "loss": 0.4388,
58
+ "step": 350
59
+ },
60
+ {
61
+ "epoch": 0.011093815544515669,
62
+ "grad_norm": 0.6189213991165161,
63
+ "learning_rate": 2.452248921749846e-05,
64
+ "loss": 0.4232,
65
+ "step": 400
66
+ },
67
+ {
68
+ "epoch": 0.012480542487580126,
69
+ "grad_norm": 0.6082913875579834,
70
+ "learning_rate": 2.760320394331485e-05,
71
+ "loss": 0.401,
72
+ "step": 450
73
+ },
74
+ {
75
+ "epoch": 0.013867269430644586,
76
+ "grad_norm": 0.6956301331520081,
77
+ "learning_rate": 3.068391866913124e-05,
78
+ "loss": 0.3895,
79
+ "step": 500
80
+ },
81
+ {
82
+ "epoch": 0.015253996373709043,
83
+ "grad_norm": 0.7030412554740906,
84
+ "learning_rate": 3.3764633394947633e-05,
85
+ "loss": 0.3676,
86
+ "step": 550
87
+ },
88
+ {
89
+ "epoch": 0.016640723316773503,
90
+ "grad_norm": 0.6779190897941589,
91
+ "learning_rate": 3.684534812076402e-05,
92
+ "loss": 0.3653,
93
+ "step": 600
94
+ },
95
+ {
96
+ "epoch": 0.01802745025983796,
97
+ "grad_norm": 0.8930213451385498,
98
+ "learning_rate": 3.992606284658041e-05,
99
+ "loss": 0.3645,
100
+ "step": 650
101
+ },
102
+ {
103
+ "epoch": 0.01941417720290242,
104
+ "grad_norm": 0.6423994302749634,
105
+ "learning_rate": 4.30067775723968e-05,
106
+ "loss": 0.3514,
107
+ "step": 700
108
+ },
109
+ {
110
+ "epoch": 0.02080090414596688,
111
+ "grad_norm": 0.7728660106658936,
112
+ "learning_rate": 4.608749229821319e-05,
113
+ "loss": 0.3468,
114
+ "step": 750
115
+ },
116
+ {
117
+ "epoch": 0.022187631089031337,
118
+ "grad_norm": 0.7561061978340149,
119
+ "learning_rate": 4.916820702402958e-05,
120
+ "loss": 0.3499,
121
+ "step": 800
122
+ },
123
+ {
124
+ "epoch": 0.023574358032095795,
125
+ "grad_norm": 0.6163890957832336,
126
+ "learning_rate": 5.224892174984597e-05,
127
+ "loss": 0.3417,
128
+ "step": 850
129
+ },
130
+ {
131
+ "epoch": 0.024961084975160253,
132
+ "grad_norm": 0.7334563732147217,
133
+ "learning_rate": 5.532963647566236e-05,
134
+ "loss": 0.3299,
135
+ "step": 900
136
+ },
137
+ {
138
+ "epoch": 0.026347811918224714,
139
+ "grad_norm": 0.655237078666687,
140
+ "learning_rate": 5.841035120147874e-05,
141
+ "loss": 0.3306,
142
+ "step": 950
143
+ },
144
+ {
145
+ "epoch": 0.02773453886128917,
146
+ "grad_norm": 0.8147113919258118,
147
+ "learning_rate": 6.149106592729513e-05,
148
+ "loss": 0.3281,
149
+ "step": 1000
150
+ },
151
+ {
152
+ "epoch": 0.02773453886128917,
153
+ "eval_loss": 0.32194069027900696,
154
+ "eval_runtime": 501.2457,
155
+ "eval_samples_per_second": 5.7,
156
+ "eval_steps_per_second": 5.7,
157
+ "step": 1000
158
+ },
159
+ {
160
+ "epoch": 0.02912126580435363,
161
+ "grad_norm": 0.6397083401679993,
162
+ "learning_rate": 6.457178065311152e-05,
163
+ "loss": 0.3204,
164
+ "step": 1050
165
+ },
166
+ {
167
+ "epoch": 0.030507992747418087,
168
+ "grad_norm": 0.5808627009391785,
169
+ "learning_rate": 6.765249537892791e-05,
170
+ "loss": 0.3229,
171
+ "step": 1100
172
+ },
173
+ {
174
+ "epoch": 0.03189471969048255,
175
+ "grad_norm": 0.6929567456245422,
176
+ "learning_rate": 7.073321010474431e-05,
177
+ "loss": 0.3148,
178
+ "step": 1150
179
+ },
180
+ {
181
+ "epoch": 0.033281446633547006,
182
+ "grad_norm": 0.620298445224762,
183
+ "learning_rate": 7.38139248305607e-05,
184
+ "loss": 0.32,
185
+ "step": 1200
186
+ },
187
+ {
188
+ "epoch": 0.034668173576611463,
189
+ "grad_norm": 0.5947968363761902,
190
+ "learning_rate": 7.689463955637708e-05,
191
+ "loss": 0.306,
192
+ "step": 1250
193
+ },
194
+ {
195
+ "epoch": 0.03605490051967592,
196
+ "grad_norm": 0.6097683906555176,
197
+ "learning_rate": 7.997535428219347e-05,
198
+ "loss": 0.3179,
199
+ "step": 1300
200
+ },
201
+ {
202
+ "epoch": 0.03744162746274038,
203
+ "grad_norm": 0.6339348554611206,
204
+ "learning_rate": 8.305606900800986e-05,
205
+ "loss": 0.3161,
206
+ "step": 1350
207
+ },
208
+ {
209
+ "epoch": 0.03882835440580484,
210
+ "grad_norm": 0.5278933644294739,
211
+ "learning_rate": 8.613678373382625e-05,
212
+ "loss": 0.3153,
213
+ "step": 1400
214
+ },
215
+ {
216
+ "epoch": 0.040215081348869294,
217
+ "grad_norm": 0.4927423894405365,
218
+ "learning_rate": 8.921749845964264e-05,
219
+ "loss": 0.3111,
220
+ "step": 1450
221
+ },
222
+ {
223
+ "epoch": 0.04160180829193376,
224
+ "grad_norm": 0.4745596945285797,
225
+ "learning_rate": 9.229821318545902e-05,
226
+ "loss": 0.304,
227
+ "step": 1500
228
+ },
229
+ {
230
+ "epoch": 0.04298853523499822,
231
+ "grad_norm": 0.6532231569290161,
232
+ "learning_rate": 9.537892791127541e-05,
233
+ "loss": 0.3084,
234
+ "step": 1550
235
+ },
236
+ {
237
+ "epoch": 0.044375262178062674,
238
+ "grad_norm": 0.5528659820556641,
239
+ "learning_rate": 9.84596426370918e-05,
240
+ "loss": 0.3084,
241
+ "step": 1600
242
+ },
243
+ {
244
+ "epoch": 0.04576198912112713,
245
+ "grad_norm": 0.45793089270591736,
246
+ "learning_rate": 0.0001015403573629082,
247
+ "loss": 0.2964,
248
+ "step": 1650
249
+ },
250
+ {
251
+ "epoch": 0.04714871606419159,
252
+ "grad_norm": 0.5063529014587402,
253
+ "learning_rate": 0.00010462107208872458,
254
+ "loss": 0.2924,
255
+ "step": 1700
256
+ },
257
+ {
258
+ "epoch": 0.04853544300725605,
259
+ "grad_norm": 0.48600247502326965,
260
+ "learning_rate": 0.00010770178681454097,
261
+ "loss": 0.2947,
262
+ "step": 1750
263
+ },
264
+ {
265
+ "epoch": 0.049922169950320505,
266
+ "grad_norm": 0.4872143268585205,
267
+ "learning_rate": 0.00011078250154035737,
268
+ "loss": 0.297,
269
+ "step": 1800
270
+ },
271
+ {
272
+ "epoch": 0.05130889689338496,
273
+ "grad_norm": 0.5091805458068848,
274
+ "learning_rate": 0.00011386321626617376,
275
+ "loss": 0.2888,
276
+ "step": 1850
277
+ },
278
+ {
279
+ "epoch": 0.05269562383644943,
280
+ "grad_norm": 0.41649994254112244,
281
+ "learning_rate": 0.00011694393099199015,
282
+ "loss": 0.2871,
283
+ "step": 1900
284
+ },
285
+ {
286
+ "epoch": 0.054082350779513885,
287
+ "grad_norm": 0.5174862146377563,
288
+ "learning_rate": 0.00012002464571780654,
289
+ "loss": 0.2922,
290
+ "step": 1950
291
+ },
292
+ {
293
+ "epoch": 0.05546907772257834,
294
+ "grad_norm": 0.45786553621292114,
295
+ "learning_rate": 0.00012310536044362293,
296
+ "loss": 0.2883,
297
+ "step": 2000
298
+ },
299
+ {
300
+ "epoch": 0.05546907772257834,
301
+ "eval_loss": 0.28488224744796753,
302
+ "eval_runtime": 500.9558,
303
+ "eval_samples_per_second": 5.703,
304
+ "eval_steps_per_second": 5.703,
305
+ "step": 2000
306
+ },
307
+ {
308
+ "epoch": 0.0568558046656428,
309
+ "grad_norm": 0.4992533326148987,
310
+ "learning_rate": 0.00012606284658040666,
311
+ "loss": 0.3033,
312
+ "step": 2050
313
+ },
314
+ {
315
+ "epoch": 0.05824253160870726,
316
+ "grad_norm": 0.4205988049507141,
317
+ "learning_rate": 0.00012914356130622304,
318
+ "loss": 0.2867,
319
+ "step": 2100
320
+ },
321
+ {
322
+ "epoch": 0.059629258551771716,
323
+ "grad_norm": 0.4288152754306793,
324
+ "learning_rate": 0.00013222427603203944,
325
+ "loss": 0.2795,
326
+ "step": 2150
327
+ },
328
+ {
329
+ "epoch": 0.061015985494836174,
330
+ "grad_norm": 0.4856145977973938,
331
+ "learning_rate": 0.00013530499075785582,
332
+ "loss": 0.2833,
333
+ "step": 2200
334
+ },
335
+ {
336
+ "epoch": 0.06240271243790063,
337
+ "grad_norm": 0.4891654849052429,
338
+ "learning_rate": 0.00013838570548367222,
339
+ "loss": 0.2797,
340
+ "step": 2250
341
+ },
342
+ {
343
+ "epoch": 0.0637894393809651,
344
+ "grad_norm": 0.39899352192878723,
345
+ "learning_rate": 0.00014146642020948863,
346
+ "loss": 0.2785,
347
+ "step": 2300
348
+ },
349
+ {
350
+ "epoch": 0.06517616632402955,
351
+ "grad_norm": 0.3616255819797516,
352
+ "learning_rate": 0.000144547134935305,
353
+ "loss": 0.2798,
354
+ "step": 2350
355
+ },
356
+ {
357
+ "epoch": 0.06656289326709401,
358
+ "grad_norm": 0.3556617498397827,
359
+ "learning_rate": 0.0001476278496611214,
360
+ "loss": 0.2811,
361
+ "step": 2400
362
+ },
363
+ {
364
+ "epoch": 0.06794962021015846,
365
+ "grad_norm": 0.39639297127723694,
366
+ "learning_rate": 0.00015070856438693776,
367
+ "loss": 0.2813,
368
+ "step": 2450
369
+ },
370
+ {
371
+ "epoch": 0.06933634715322293,
372
+ "grad_norm": 0.35177573561668396,
373
+ "learning_rate": 0.00015378927911275416,
374
+ "loss": 0.2797,
375
+ "step": 2500
376
+ },
377
+ {
378
+ "epoch": 0.07072307409628739,
379
+ "grad_norm": 0.38610222935676575,
380
+ "learning_rate": 0.00015686999383857054,
381
+ "loss": 0.2747,
382
+ "step": 2550
383
+ },
384
+ {
385
+ "epoch": 0.07210980103935184,
386
+ "grad_norm": 0.36727309226989746,
387
+ "learning_rate": 0.00015995070856438694,
388
+ "loss": 0.2776,
389
+ "step": 2600
390
+ },
391
+ {
392
+ "epoch": 0.07349652798241631,
393
+ "grad_norm": 0.3905107378959656,
394
+ "learning_rate": 0.00016303142329020332,
395
+ "loss": 0.2772,
396
+ "step": 2650
397
+ },
398
+ {
399
+ "epoch": 0.07488325492548076,
400
+ "grad_norm": 0.3958912193775177,
401
+ "learning_rate": 0.00016611213801601973,
402
+ "loss": 0.2707,
403
+ "step": 2700
404
+ },
405
+ {
406
+ "epoch": 0.07626998186854522,
407
+ "grad_norm": 0.4029497504234314,
408
+ "learning_rate": 0.0001691928527418361,
409
+ "loss": 0.2692,
410
+ "step": 2750
411
+ },
412
+ {
413
+ "epoch": 0.07765670881160967,
414
+ "grad_norm": 0.3514055907726288,
415
+ "learning_rate": 0.0001722735674676525,
416
+ "loss": 0.2759,
417
+ "step": 2800
418
+ },
419
+ {
420
+ "epoch": 0.07904343575467414,
421
+ "grad_norm": 0.34912553429603577,
422
+ "learning_rate": 0.00017529266789895255,
423
+ "loss": 0.2793,
424
+ "step": 2850
425
+ },
426
+ {
427
+ "epoch": 0.08043016269773859,
428
+ "grad_norm": 0.3493233621120453,
429
+ "learning_rate": 0.00017831176833025262,
430
+ "loss": 0.2845,
431
+ "step": 2900
432
+ },
433
+ {
434
+ "epoch": 0.08181688964080305,
435
+ "grad_norm": 0.30080145597457886,
436
+ "learning_rate": 0.00018139248305606902,
437
+ "loss": 0.2686,
438
+ "step": 2950
439
+ },
440
+ {
441
+ "epoch": 0.08320361658386752,
442
+ "grad_norm": 0.3265998959541321,
443
+ "learning_rate": 0.0001844731977818854,
444
+ "loss": 0.2695,
445
+ "step": 3000
446
+ },
447
+ {
448
+ "epoch": 0.08320361658386752,
449
+ "eval_loss": 0.26523345708847046,
450
+ "eval_runtime": 500.4565,
451
+ "eval_samples_per_second": 5.709,
452
+ "eval_steps_per_second": 5.709,
453
+ "step": 3000
454
+ },
455
+ {
456
+ "epoch": 0.08459034352693197,
457
+ "grad_norm": 0.29866209626197815,
458
+ "learning_rate": 0.0001875539125077018,
459
+ "loss": 0.2679,
460
+ "step": 3050
461
+ },
462
+ {
463
+ "epoch": 0.08597707046999643,
464
+ "grad_norm": 0.3191625475883484,
465
+ "learning_rate": 0.00019063462723351818,
466
+ "loss": 0.267,
467
+ "step": 3100
468
+ },
469
+ {
470
+ "epoch": 0.08736379741306088,
471
+ "grad_norm": 0.3110339939594269,
472
+ "learning_rate": 0.00019371534195933459,
473
+ "loss": 0.2658,
474
+ "step": 3150
475
+ },
476
+ {
477
+ "epoch": 0.08875052435612535,
478
+ "grad_norm": 0.32120850682258606,
479
+ "learning_rate": 0.00019679605668515096,
480
+ "loss": 0.2724,
481
+ "step": 3200
482
+ },
483
+ {
484
+ "epoch": 0.0901372512991898,
485
+ "grad_norm": 0.28446418046951294,
486
+ "learning_rate": 0.00019987677141096734,
487
+ "loss": 0.268,
488
+ "step": 3250
489
+ },
490
+ {
491
+ "epoch": 0.09152397824225426,
492
+ "grad_norm": 0.2722443640232086,
493
+ "learning_rate": 0.00019999989671933422,
494
+ "loss": 0.2716,
495
+ "step": 3300
496
+ },
497
+ {
498
+ "epoch": 0.09291070518531871,
499
+ "grad_norm": 0.31304416060447693,
500
+ "learning_rate": 0.00019999956948482068,
501
+ "loss": 0.2631,
502
+ "step": 3350
503
+ },
504
+ {
505
+ "epoch": 0.09429743212838318,
506
+ "grad_norm": 0.2516928017139435,
507
+ "learning_rate": 0.00019999901811788604,
508
+ "loss": 0.2647,
509
+ "step": 3400
510
+ },
511
+ {
512
+ "epoch": 0.09568415907144764,
513
+ "grad_norm": 0.288006067276001,
514
+ "learning_rate": 0.00019999824261976613,
515
+ "loss": 0.263,
516
+ "step": 3450
517
+ },
518
+ {
519
+ "epoch": 0.0970708860145121,
520
+ "grad_norm": 0.2745107114315033,
521
+ "learning_rate": 0.00019999724299219913,
522
+ "loss": 0.2642,
523
+ "step": 3500
524
+ },
525
+ {
526
+ "epoch": 0.09845761295757656,
527
+ "grad_norm": 2.800987720489502,
528
+ "learning_rate": 0.00019999601923742548,
529
+ "loss": 0.7176,
530
+ "step": 3550
531
+ },
532
+ {
533
+ "epoch": 0.09984433990064101,
534
+ "grad_norm": 0.3590925931930542,
535
+ "learning_rate": 0.00019999457135818805,
536
+ "loss": 0.3146,
537
+ "step": 3600
538
+ },
539
+ {
540
+ "epoch": 0.10123106684370548,
541
+ "grad_norm": 0.32617494463920593,
542
+ "learning_rate": 0.00019999289935773202,
543
+ "loss": 0.2786,
544
+ "step": 3650
545
+ },
546
+ {
547
+ "epoch": 0.10261779378676993,
548
+ "grad_norm": 0.3239264488220215,
549
+ "learning_rate": 0.0001999910032398049,
550
+ "loss": 0.2807,
551
+ "step": 3700
552
+ },
553
+ {
554
+ "epoch": 0.10400452072983439,
555
+ "grad_norm": 0.3022274076938629,
556
+ "learning_rate": 0.00019998888300865652,
557
+ "loss": 0.2758,
558
+ "step": 3750
559
+ },
560
+ {
561
+ "epoch": 0.10539124767289886,
562
+ "grad_norm": 0.33024862408638,
563
+ "learning_rate": 0.000199986538669039,
564
+ "loss": 0.2687,
565
+ "step": 3800
566
+ },
567
+ {
568
+ "epoch": 0.1067779746159633,
569
+ "grad_norm": 0.6899451017379761,
570
+ "learning_rate": 0.00019998397022620687,
571
+ "loss": 0.2699,
572
+ "step": 3850
573
+ },
574
+ {
575
+ "epoch": 0.10816470155902777,
576
+ "grad_norm": 0.2794604003429413,
577
+ "learning_rate": 0.0001999811776859168,
578
+ "loss": 0.2667,
579
+ "step": 3900
580
+ },
581
+ {
582
+ "epoch": 0.10955142850209222,
583
+ "grad_norm": 0.2764255106449127,
584
+ "learning_rate": 0.00019997816105442778,
585
+ "loss": 0.2658,
586
+ "step": 3950
587
+ },
588
+ {
589
+ "epoch": 0.11093815544515669,
590
+ "grad_norm": 0.43574222922325134,
591
+ "learning_rate": 0.0001999749203385012,
592
+ "loss": 0.2664,
593
+ "step": 4000
594
+ },
595
+ {
596
+ "epoch": 0.11093815544515669,
597
+ "eval_loss": 0.26065966486930847,
598
+ "eval_runtime": 500.842,
599
+ "eval_samples_per_second": 5.704,
600
+ "eval_steps_per_second": 5.704,
601
+ "step": 4000
602
+ },
603
+ {
604
+ "epoch": 0.11232488238822114,
605
+ "grad_norm": 0.5340762734413147,
606
+ "learning_rate": 0.00019997145554540046,
607
+ "loss": 0.272,
608
+ "step": 4050
609
+ },
610
+ {
611
+ "epoch": 0.1137116093312856,
612
+ "grad_norm": 0.32403895258903503,
613
+ "learning_rate": 0.00019996776668289136,
614
+ "loss": 0.2679,
615
+ "step": 4100
616
+ },
617
+ {
618
+ "epoch": 0.11509833627435005,
619
+ "grad_norm": 0.2928290367126465,
620
+ "learning_rate": 0.0001999638537592419,
621
+ "loss": 0.2624,
622
+ "step": 4150
623
+ },
624
+ {
625
+ "epoch": 0.11648506321741452,
626
+ "grad_norm": 0.23226021230220795,
627
+ "learning_rate": 0.00019995971678322228,
628
+ "loss": 0.2557,
629
+ "step": 4200
630
+ },
631
+ {
632
+ "epoch": 0.11787179016047898,
633
+ "grad_norm": 0.2748055160045624,
634
+ "learning_rate": 0.00019995535576410476,
635
+ "loss": 0.2625,
636
+ "step": 4250
637
+ },
638
+ {
639
+ "epoch": 0.11925851710354343,
640
+ "grad_norm": 0.2713299095630646,
641
+ "learning_rate": 0.00019995077071166385,
642
+ "loss": 0.2611,
643
+ "step": 4300
644
+ },
645
+ {
646
+ "epoch": 0.1206452440466079,
647
+ "grad_norm": 0.24674977362155914,
648
+ "learning_rate": 0.00019994596163617624,
649
+ "loss": 0.2647,
650
+ "step": 4350
651
+ },
652
+ {
653
+ "epoch": 0.12203197098967235,
654
+ "grad_norm": 0.359017014503479,
655
+ "learning_rate": 0.00019994092854842065,
656
+ "loss": 0.2601,
657
+ "step": 4400
658
+ },
659
+ {
660
+ "epoch": 0.12341869793273681,
661
+ "grad_norm": 0.38051414489746094,
662
+ "learning_rate": 0.00019993567145967791,
663
+ "loss": 0.253,
664
+ "step": 4450
665
+ },
666
+ {
667
+ "epoch": 0.12480542487580126,
668
+ "grad_norm": 0.26227161288261414,
669
+ "learning_rate": 0.0001999301903817309,
670
+ "loss": 0.2584,
671
+ "step": 4500
672
+ },
673
+ {
674
+ "epoch": 0.12619215181886573,
675
+ "grad_norm": 0.21259668469429016,
676
+ "learning_rate": 0.00019992448532686453,
677
+ "loss": 0.2618,
678
+ "step": 4550
679
+ },
680
+ {
681
+ "epoch": 0.1275788787619302,
682
+ "grad_norm": 0.23226451873779297,
683
+ "learning_rate": 0.0001999185563078658,
684
+ "loss": 0.2526,
685
+ "step": 4600
686
+ },
687
+ {
688
+ "epoch": 0.12896560570499466,
689
+ "grad_norm": 0.24459871649742126,
690
+ "learning_rate": 0.00019991240333802352,
691
+ "loss": 0.2523,
692
+ "step": 4650
693
+ },
694
+ {
695
+ "epoch": 0.1303523326480591,
696
+ "grad_norm": 0.29185208678245544,
697
+ "learning_rate": 0.00019990602643112863,
698
+ "loss": 0.2546,
699
+ "step": 4700
700
+ },
701
+ {
702
+ "epoch": 0.13173905959112356,
703
+ "grad_norm": 0.23443324863910675,
704
+ "learning_rate": 0.00019989942560147387,
705
+ "loss": 0.2557,
706
+ "step": 4750
707
+ },
708
+ {
709
+ "epoch": 0.13312578653418802,
710
+ "grad_norm": 0.22915039956569672,
711
+ "learning_rate": 0.00019989260086385394,
712
+ "loss": 0.2546,
713
+ "step": 4800
714
+ },
715
+ {
716
+ "epoch": 0.1345125134772525,
717
+ "grad_norm": 0.2710748016834259,
718
+ "learning_rate": 0.00019988555223356531,
719
+ "loss": 0.2619,
720
+ "step": 4850
721
+ },
722
+ {
723
+ "epoch": 0.13589924042031692,
724
+ "grad_norm": 0.24671098589897156,
725
+ "learning_rate": 0.00019987827972640633,
726
+ "loss": 0.2594,
727
+ "step": 4900
728
+ },
729
+ {
730
+ "epoch": 0.1372859673633814,
731
+ "grad_norm": 0.2359282672405243,
732
+ "learning_rate": 0.00019987078335867713,
733
+ "loss": 0.2616,
734
+ "step": 4950
735
+ },
736
+ {
737
+ "epoch": 0.13867269430644585,
738
+ "grad_norm": 0.2197064608335495,
739
+ "learning_rate": 0.00019986306314717956,
740
+ "loss": 0.2507,
741
+ "step": 5000
742
+ },
743
+ {
744
+ "epoch": 0.13867269430644585,
745
+ "eval_loss": 0.25083017349243164,
746
+ "eval_runtime": 500.7995,
747
+ "eval_samples_per_second": 5.705,
748
+ "eval_steps_per_second": 5.705,
749
+ "step": 5000
750
+ },
751
+ {
752
+ "epoch": 0.14005942124951032,
753
+ "grad_norm": 0.2249370515346527,
754
+ "learning_rate": 0.0001998551191092172,
755
+ "loss": 0.2574,
756
+ "step": 5050
757
+ },
758
+ {
759
+ "epoch": 0.14144614819257478,
760
+ "grad_norm": 0.36345556378364563,
761
+ "learning_rate": 0.0001998469512625953,
762
+ "loss": 0.2493,
763
+ "step": 5100
764
+ },
765
+ {
766
+ "epoch": 0.14283287513563922,
767
+ "grad_norm": 0.24807791411876678,
768
+ "learning_rate": 0.00019983855962562067,
769
+ "loss": 0.2542,
770
+ "step": 5150
771
+ },
772
+ {
773
+ "epoch": 0.14421960207870368,
774
+ "grad_norm": 3.6125738620758057,
775
+ "learning_rate": 0.00019982994421710186,
776
+ "loss": 0.2595,
777
+ "step": 5200
778
+ },
779
+ {
780
+ "epoch": 0.14560632902176815,
781
+ "grad_norm": 0.4985048472881317,
782
+ "learning_rate": 0.0001998211050563488,
783
+ "loss": 0.2558,
784
+ "step": 5250
785
+ },
786
+ {
787
+ "epoch": 0.14699305596483261,
788
+ "grad_norm": 0.3320443332195282,
789
+ "learning_rate": 0.00019981204216317308,
790
+ "loss": 0.2545,
791
+ "step": 5300
792
+ },
793
+ {
794
+ "epoch": 0.14837978290789705,
795
+ "grad_norm": 0.2081877887248993,
796
+ "learning_rate": 0.00019980275555788759,
797
+ "loss": 0.2536,
798
+ "step": 5350
799
+ },
800
+ {
801
+ "epoch": 0.14976650985096152,
802
+ "grad_norm": 0.27258801460266113,
803
+ "learning_rate": 0.00019979324526130676,
804
+ "loss": 0.2505,
805
+ "step": 5400
806
+ },
807
+ {
808
+ "epoch": 0.15115323679402598,
809
+ "grad_norm": 0.23199999332427979,
810
+ "learning_rate": 0.00019978351129474632,
811
+ "loss": 0.2556,
812
+ "step": 5450
813
+ },
814
+ {
815
+ "epoch": 0.15253996373709044,
816
+ "grad_norm": 0.20929445326328278,
817
+ "learning_rate": 0.00019977355368002334,
818
+ "loss": 0.2486,
819
+ "step": 5500
820
+ },
821
+ {
822
+ "epoch": 0.1539266906801549,
823
+ "grad_norm": 0.23551955819129944,
824
+ "learning_rate": 0.00019976337243945617,
825
+ "loss": 0.2517,
826
+ "step": 5550
827
+ },
828
+ {
829
+ "epoch": 0.15531341762321935,
830
+ "grad_norm": 0.30231812596321106,
831
+ "learning_rate": 0.0001997529675958644,
832
+ "loss": 0.2498,
833
+ "step": 5600
834
+ },
835
+ {
836
+ "epoch": 0.1567001445662838,
837
+ "grad_norm": 0.24430635571479797,
838
+ "learning_rate": 0.00019974233917256865,
839
+ "loss": 0.2523,
840
+ "step": 5650
841
+ },
842
+ {
843
+ "epoch": 0.15808687150934828,
844
+ "grad_norm": 6.362756252288818,
845
+ "learning_rate": 0.0001997314871933909,
846
+ "loss": 0.2529,
847
+ "step": 5700
848
+ },
849
+ {
850
+ "epoch": 0.15947359845241274,
851
+ "grad_norm": 0.2339017242193222,
852
+ "learning_rate": 0.00019972041168265397,
853
+ "loss": 0.2524,
854
+ "step": 5750
855
+ },
856
+ {
857
+ "epoch": 0.16086032539547718,
858
+ "grad_norm": 0.22503100335597992,
859
+ "learning_rate": 0.0001997091126651818,
860
+ "loss": 0.251,
861
+ "step": 5800
862
+ },
863
+ {
864
+ "epoch": 0.16224705233854164,
865
+ "grad_norm": 0.26495125889778137,
866
+ "learning_rate": 0.00019969759016629928,
867
+ "loss": 0.2517,
868
+ "step": 5850
869
+ },
870
+ {
871
+ "epoch": 0.1636337792816061,
872
+ "grad_norm": 0.25339657068252563,
873
+ "learning_rate": 0.00019968584421183212,
874
+ "loss": 0.2505,
875
+ "step": 5900
876
+ },
877
+ {
878
+ "epoch": 0.16502050622467057,
879
+ "grad_norm": 0.20266841351985931,
880
+ "learning_rate": 0.000199673874828107,
881
+ "loss": 0.2501,
882
+ "step": 5950
883
+ },
884
+ {
885
+ "epoch": 0.16640723316773504,
886
+ "grad_norm": 0.19285647571086884,
887
+ "learning_rate": 0.00019966168204195125,
888
+ "loss": 0.2445,
889
+ "step": 6000
890
+ },
891
+ {
892
+ "epoch": 0.16640723316773504,
893
+ "eval_loss": 0.24731825292110443,
894
+ "eval_runtime": 500.9495,
895
+ "eval_samples_per_second": 5.703,
896
+ "eval_steps_per_second": 5.703,
897
+ "step": 6000
898
+ },
899
+ {
900
+ "epoch": 0.16779396011079947,
901
+ "grad_norm": 0.2121065855026245,
902
+ "learning_rate": 0.000199649265880693,
903
+ "loss": 0.2466,
904
+ "step": 6050
905
+ },
906
+ {
907
+ "epoch": 0.16918068705386394,
908
+ "grad_norm": 0.2560518980026245,
909
+ "learning_rate": 0.000199636626372161,
910
+ "loss": 0.2572,
911
+ "step": 6100
912
+ },
913
+ {
914
+ "epoch": 0.1705674139969284,
915
+ "grad_norm": 0.22927352786064148,
916
+ "learning_rate": 0.00019962376354468466,
917
+ "loss": 0.2509,
918
+ "step": 6150
919
+ },
920
+ {
921
+ "epoch": 0.17195414093999287,
922
+ "grad_norm": 0.2201690673828125,
923
+ "learning_rate": 0.00019961067742709377,
924
+ "loss": 0.2501,
925
+ "step": 6200
926
+ },
927
+ {
928
+ "epoch": 0.1733408678830573,
929
+ "grad_norm": 0.23233374953269958,
930
+ "learning_rate": 0.0001995973680487188,
931
+ "loss": 0.2525,
932
+ "step": 6250
933
+ },
934
+ {
935
+ "epoch": 0.17472759482612177,
936
+ "grad_norm": 0.254256933927536,
937
+ "learning_rate": 0.00019958383543939041,
938
+ "loss": 0.2499,
939
+ "step": 6300
940
+ },
941
+ {
942
+ "epoch": 0.17611432176918623,
943
+ "grad_norm": 0.1754632294178009,
944
+ "learning_rate": 0.00019957007962943975,
945
+ "loss": 0.251,
946
+ "step": 6350
947
+ },
948
+ {
949
+ "epoch": 0.1775010487122507,
950
+ "grad_norm": 0.23628771305084229,
951
+ "learning_rate": 0.00019955610064969817,
952
+ "loss": 0.256,
953
+ "step": 6400
954
+ },
955
+ {
956
+ "epoch": 0.17888777565531516,
957
+ "grad_norm": 0.23698653280735016,
958
+ "learning_rate": 0.00019954189853149725,
959
+ "loss": 0.2474,
960
+ "step": 6450
961
+ },
962
+ {
963
+ "epoch": 0.1802745025983796,
964
+ "grad_norm": 0.27713823318481445,
965
+ "learning_rate": 0.00019952747330666867,
966
+ "loss": 0.2481,
967
+ "step": 6500
968
+ },
969
+ {
970
+ "epoch": 0.18166122954144406,
971
+ "grad_norm": 0.1710810512304306,
972
+ "learning_rate": 0.00019951282500754413,
973
+ "loss": 0.2564,
974
+ "step": 6550
975
+ },
976
+ {
977
+ "epoch": 0.18304795648450853,
978
+ "grad_norm": 0.21406157314777374,
979
+ "learning_rate": 0.00019949795366695544,
980
+ "loss": 0.2517,
981
+ "step": 6600
982
+ },
983
+ {
984
+ "epoch": 0.184434683427573,
985
+ "grad_norm": 0.20108449459075928,
986
+ "learning_rate": 0.00019948285931823415,
987
+ "loss": 0.2518,
988
+ "step": 6650
989
+ },
990
+ {
991
+ "epoch": 0.18582141037063743,
992
+ "grad_norm": 5.1352715492248535,
993
+ "learning_rate": 0.0001994675419952118,
994
+ "loss": 0.2546,
995
+ "step": 6700
996
+ },
997
+ {
998
+ "epoch": 0.1872081373137019,
999
+ "grad_norm": 0.22743810713291168,
1000
+ "learning_rate": 0.00019945200173221962,
1001
+ "loss": 0.2457,
1002
+ "step": 6750
1003
+ },
1004
+ {
1005
+ "epoch": 0.18859486425676636,
1006
+ "grad_norm": 0.20475907623767853,
1007
+ "learning_rate": 0.0001994362385640885,
1008
+ "loss": 0.2529,
1009
+ "step": 6800
1010
+ },
1011
+ {
1012
+ "epoch": 0.18998159119983082,
1013
+ "grad_norm": 0.22172316908836365,
1014
+ "learning_rate": 0.000199420252526149,
1015
+ "loss": 0.2554,
1016
+ "step": 6850
1017
+ },
1018
+ {
1019
+ "epoch": 0.1913683181428953,
1020
+ "grad_norm": 2.967470407485962,
1021
+ "learning_rate": 0.0001994040436542311,
1022
+ "loss": 0.2555,
1023
+ "step": 6900
1024
+ },
1025
+ {
1026
+ "epoch": 0.19275504508595973,
1027
+ "grad_norm": 0.23698735237121582,
1028
+ "learning_rate": 0.00019938761198466437,
1029
+ "loss": 0.2619,
1030
+ "step": 6950
1031
+ },
1032
+ {
1033
+ "epoch": 0.1941417720290242,
1034
+ "grad_norm": 0.17891797423362732,
1035
+ "learning_rate": 0.0001993709575542776,
1036
+ "loss": 0.2464,
1037
+ "step": 7000
1038
+ },
1039
+ {
1040
+ "epoch": 0.1941417720290242,
1041
+ "eval_loss": 0.24410127103328705,
1042
+ "eval_runtime": 500.8833,
1043
+ "eval_samples_per_second": 5.704,
1044
+ "eval_steps_per_second": 5.704,
1045
+ "step": 7000
1046
+ },
1047
+ {
1048
+ "epoch": 0.19552849897208865,
1049
+ "grad_norm": 0.21030811965465546,
1050
+ "learning_rate": 0.00019935408040039901,
1051
+ "loss": 0.2517,
1052
+ "step": 7050
1053
+ },
1054
+ {
1055
+ "epoch": 0.19691522591515312,
1056
+ "grad_norm": 0.1913098245859146,
1057
+ "learning_rate": 0.00019933698056085586,
1058
+ "loss": 0.249,
1059
+ "step": 7100
1060
+ },
1061
+ {
1062
+ "epoch": 0.19830195285821758,
1063
+ "grad_norm": 0.2044433057308197,
1064
+ "learning_rate": 0.00019931965807397465,
1065
+ "loss": 0.2496,
1066
+ "step": 7150
1067
+ },
1068
+ {
1069
+ "epoch": 0.19968867980128202,
1070
+ "grad_norm": 0.18698015809059143,
1071
+ "learning_rate": 0.00019930211297858078,
1072
+ "loss": 0.2537,
1073
+ "step": 7200
1074
+ },
1075
+ {
1076
+ "epoch": 0.20107540674434649,
1077
+ "grad_norm": 0.22580522298812866,
1078
+ "learning_rate": 0.00019928434531399876,
1079
+ "loss": 0.2456,
1080
+ "step": 7250
1081
+ },
1082
+ {
1083
+ "epoch": 0.20246213368741095,
1084
+ "grad_norm": 0.1749202162027359,
1085
+ "learning_rate": 0.00019926635512005183,
1086
+ "loss": 0.2504,
1087
+ "step": 7300
1088
+ },
1089
+ {
1090
+ "epoch": 0.20384886063047541,
1091
+ "grad_norm": 0.2123364359140396,
1092
+ "learning_rate": 0.00019924814243706197,
1093
+ "loss": 0.2477,
1094
+ "step": 7350
1095
+ },
1096
+ {
1097
+ "epoch": 0.20523558757353985,
1098
+ "grad_norm": 0.2234705090522766,
1099
+ "learning_rate": 0.00019922970730584997,
1100
+ "loss": 0.2457,
1101
+ "step": 7400
1102
+ },
1103
+ {
1104
+ "epoch": 0.20662231451660432,
1105
+ "grad_norm": 0.20742256939411163,
1106
+ "learning_rate": 0.00019921104976773505,
1107
+ "loss": 0.249,
1108
+ "step": 7450
1109
+ },
1110
+ {
1111
+ "epoch": 0.20800904145966878,
1112
+ "grad_norm": 0.18315458297729492,
1113
+ "learning_rate": 0.000199192169864535,
1114
+ "loss": 0.2459,
1115
+ "step": 7500
1116
+ },
1117
+ {
1118
+ "epoch": 0.20939576840273325,
1119
+ "grad_norm": 0.19357183575630188,
1120
+ "learning_rate": 0.000199173067638566,
1121
+ "loss": 0.2439,
1122
+ "step": 7550
1123
+ },
1124
+ {
1125
+ "epoch": 0.2107824953457977,
1126
+ "grad_norm": 0.2398926168680191,
1127
+ "learning_rate": 0.00019915374313264248,
1128
+ "loss": 0.2497,
1129
+ "step": 7600
1130
+ },
1131
+ {
1132
+ "epoch": 0.21216922228886215,
1133
+ "grad_norm": 0.20313721895217896,
1134
+ "learning_rate": 0.00019913419639007714,
1135
+ "loss": 0.2447,
1136
+ "step": 7650
1137
+ },
1138
+ {
1139
+ "epoch": 0.2135559492319266,
1140
+ "grad_norm": 0.17255066335201263,
1141
+ "learning_rate": 0.00019911442745468075,
1142
+ "loss": 0.2447,
1143
+ "step": 7700
1144
+ },
1145
+ {
1146
+ "epoch": 0.21494267617499108,
1147
+ "grad_norm": 0.19140756130218506,
1148
+ "learning_rate": 0.0001990944363707621,
1149
+ "loss": 0.2383,
1150
+ "step": 7750
1151
+ },
1152
+ {
1153
+ "epoch": 0.21632940311805554,
1154
+ "grad_norm": 0.15212053060531616,
1155
+ "learning_rate": 0.00019907422318312783,
1156
+ "loss": 0.2485,
1157
+ "step": 7800
1158
+ },
1159
+ {
1160
+ "epoch": 0.21771613006111998,
1161
+ "grad_norm": 0.1841588169336319,
1162
+ "learning_rate": 0.0001990537879370825,
1163
+ "loss": 0.2432,
1164
+ "step": 7850
1165
+ },
1166
+ {
1167
+ "epoch": 0.21910285700418444,
1168
+ "grad_norm": 0.2013355791568756,
1169
+ "learning_rate": 0.00019903313067842833,
1170
+ "loss": 0.2431,
1171
+ "step": 7900
1172
+ },
1173
+ {
1174
+ "epoch": 0.2204895839472489,
1175
+ "grad_norm": 0.17149454355239868,
1176
+ "learning_rate": 0.0001990122514534651,
1177
+ "loss": 0.247,
1178
+ "step": 7950
1179
+ },
1180
+ {
1181
+ "epoch": 0.22187631089031337,
1182
+ "grad_norm": 0.24272453784942627,
1183
+ "learning_rate": 0.00019899115030899014,
1184
+ "loss": 0.2468,
1185
+ "step": 8000
1186
+ },
1187
+ {
1188
+ "epoch": 0.22187631089031337,
1189
+ "eval_loss": 0.24099861085414886,
1190
+ "eval_runtime": 501.2129,
1191
+ "eval_samples_per_second": 5.7,
1192
+ "eval_steps_per_second": 5.7,
1193
+ "step": 8000
1194
+ },
1195
+ {
1196
+ "epoch": 0.22326303783337784,
1197
+ "grad_norm": 0.2419915497303009,
1198
+ "learning_rate": 0.00019896982729229813,
1199
+ "loss": 0.2454,
1200
+ "step": 8050
1201
+ },
1202
+ {
1203
+ "epoch": 0.22464976477644227,
1204
+ "grad_norm": 0.16482336819171906,
1205
+ "learning_rate": 0.0001989482824511811,
1206
+ "loss": 0.2423,
1207
+ "step": 8100
1208
+ },
1209
+ {
1210
+ "epoch": 0.22603649171950674,
1211
+ "grad_norm": 0.22351431846618652,
1212
+ "learning_rate": 0.00019892651583392824,
1213
+ "loss": 0.2501,
1214
+ "step": 8150
1215
+ },
1216
+ {
1217
+ "epoch": 0.2274232186625712,
1218
+ "grad_norm": 0.19319549202919006,
1219
+ "learning_rate": 0.0001989045274893258,
1220
+ "loss": 0.2452,
1221
+ "step": 8200
1222
+ },
1223
+ {
1224
+ "epoch": 0.22880994560563567,
1225
+ "grad_norm": 0.15613292157649994,
1226
+ "learning_rate": 0.00019888231746665696,
1227
+ "loss": 0.2428,
1228
+ "step": 8250
1229
+ },
1230
+ {
1231
+ "epoch": 0.2301966725487001,
1232
+ "grad_norm": 0.18092665076255798,
1233
+ "learning_rate": 0.00019885988581570184,
1234
+ "loss": 0.2448,
1235
+ "step": 8300
1236
+ },
1237
+ {
1238
+ "epoch": 0.23158339949176457,
1239
+ "grad_norm": 0.18928927183151245,
1240
+ "learning_rate": 0.00019883723258673724,
1241
+ "loss": 0.2493,
1242
+ "step": 8350
1243
+ },
1244
+ {
1245
+ "epoch": 0.23297012643482903,
1246
+ "grad_norm": 0.19816988706588745,
1247
+ "learning_rate": 0.0001988143578305366,
1248
+ "loss": 0.2465,
1249
+ "step": 8400
1250
+ },
1251
+ {
1252
+ "epoch": 0.2343568533778935,
1253
+ "grad_norm": 0.19853706657886505,
1254
+ "learning_rate": 0.00019879126159836992,
1255
+ "loss": 0.2443,
1256
+ "step": 8450
1257
+ },
1258
+ {
1259
+ "epoch": 0.23574358032095796,
1260
+ "grad_norm": 0.17544203996658325,
1261
+ "learning_rate": 0.00019876794394200353,
1262
+ "loss": 0.2429,
1263
+ "step": 8500
1264
+ },
1265
+ {
1266
+ "epoch": 0.2371303072640224,
1267
+ "grad_norm": 0.16583149135112762,
1268
+ "learning_rate": 0.0001987444049137001,
1269
+ "loss": 0.244,
1270
+ "step": 8550
1271
+ },
1272
+ {
1273
+ "epoch": 0.23851703420708686,
1274
+ "grad_norm": 0.18239592015743256,
1275
+ "learning_rate": 0.00019872064456621848,
1276
+ "loss": 0.2447,
1277
+ "step": 8600
1278
+ },
1279
+ {
1280
+ "epoch": 0.23990376115015133,
1281
+ "grad_norm": 0.15820704400539398,
1282
+ "learning_rate": 0.0001986966629528135,
1283
+ "loss": 0.2469,
1284
+ "step": 8650
1285
+ },
1286
+ {
1287
+ "epoch": 0.2412904880932158,
1288
+ "grad_norm": 0.18477188050746918,
1289
+ "learning_rate": 0.00019867246012723598,
1290
+ "loss": 0.2407,
1291
+ "step": 8700
1292
+ },
1293
+ {
1294
+ "epoch": 0.24267721503628023,
1295
+ "grad_norm": 0.1676979809999466,
1296
+ "learning_rate": 0.0001986480361437325,
1297
+ "loss": 0.2448,
1298
+ "step": 8750
1299
+ },
1300
+ {
1301
+ "epoch": 0.2440639419793447,
1302
+ "grad_norm": 0.2173600196838379,
1303
+ "learning_rate": 0.00019862339105704543,
1304
+ "loss": 0.2409,
1305
+ "step": 8800
1306
+ },
1307
+ {
1308
+ "epoch": 0.24545066892240916,
1309
+ "grad_norm": 0.17326687276363373,
1310
+ "learning_rate": 0.00019859852492241256,
1311
+ "loss": 0.2387,
1312
+ "step": 8850
1313
+ },
1314
+ {
1315
+ "epoch": 0.24683739586547362,
1316
+ "grad_norm": 0.16229301691055298,
1317
+ "learning_rate": 0.00019857343779556725,
1318
+ "loss": 0.2467,
1319
+ "step": 8900
1320
+ },
1321
+ {
1322
+ "epoch": 0.2482241228085381,
1323
+ "grad_norm": 0.21166543662548065,
1324
+ "learning_rate": 0.0001985481297327381,
1325
+ "loss": 0.2507,
1326
+ "step": 8950
1327
+ },
1328
+ {
1329
+ "epoch": 0.24961084975160253,
1330
+ "grad_norm": 0.17892777919769287,
1331
+ "learning_rate": 0.00019852260079064894,
1332
+ "loss": 0.2416,
1333
+ "step": 9000
1334
+ },
1335
+ {
1336
+ "epoch": 0.24961084975160253,
1337
+ "eval_loss": 0.23973840475082397,
1338
+ "eval_runtime": 500.5349,
1339
+ "eval_samples_per_second": 5.708,
1340
+ "eval_steps_per_second": 5.708,
1341
+ "step": 9000
1342
+ },
1343
+ {
1344
+ "epoch": 0.250997576694667,
1345
+ "grad_norm": 0.20435132086277008,
1346
+ "learning_rate": 0.00019849685102651867,
1347
+ "loss": 0.2385,
1348
+ "step": 9050
1349
+ },
1350
+ {
1351
+ "epoch": 0.25238430363773146,
1352
+ "grad_norm": 0.1890842318534851,
1353
+ "learning_rate": 0.0001984708804980611,
1354
+ "loss": 0.2416,
1355
+ "step": 9100
1356
+ },
1357
+ {
1358
+ "epoch": 0.2537710305807959,
1359
+ "grad_norm": 0.18390174210071564,
1360
+ "learning_rate": 0.00019844468926348482,
1361
+ "loss": 0.2469,
1362
+ "step": 9150
1363
+ },
1364
+ {
1365
+ "epoch": 0.2551577575238604,
1366
+ "grad_norm": 0.23599492013454437,
1367
+ "learning_rate": 0.00019841827738149314,
1368
+ "loss": 0.2417,
1369
+ "step": 9200
1370
+ },
1371
+ {
1372
+ "epoch": 0.25654448446692485,
1373
+ "grad_norm": 0.1522965133190155,
1374
+ "learning_rate": 0.00019839164491128398,
1375
+ "loss": 0.2427,
1376
+ "step": 9250
1377
+ },
1378
+ {
1379
+ "epoch": 0.2579312114099893,
1380
+ "grad_norm": 0.206534281373024,
1381
+ "learning_rate": 0.00019836479191254948,
1382
+ "loss": 0.2452,
1383
+ "step": 9300
1384
+ },
1385
+ {
1386
+ "epoch": 0.2593179383530537,
1387
+ "grad_norm": 0.18928374350070953,
1388
+ "learning_rate": 0.00019833771844547627,
1389
+ "loss": 0.244,
1390
+ "step": 9350
1391
+ },
1392
+ {
1393
+ "epoch": 0.2607046652961182,
1394
+ "grad_norm": 0.17130087316036224,
1395
+ "learning_rate": 0.00019831042457074498,
1396
+ "loss": 0.2488,
1397
+ "step": 9400
1398
+ },
1399
+ {
1400
+ "epoch": 0.26209139223918265,
1401
+ "grad_norm": 0.17631781101226807,
1402
+ "learning_rate": 0.00019828291034953033,
1403
+ "loss": 0.2441,
1404
+ "step": 9450
1405
+ },
1406
+ {
1407
+ "epoch": 0.2634781191822471,
1408
+ "grad_norm": 0.1852494180202484,
1409
+ "learning_rate": 0.00019825517584350083,
1410
+ "loss": 0.2414,
1411
+ "step": 9500
1412
+ },
1413
+ {
1414
+ "epoch": 0.2648648461253116,
1415
+ "grad_norm": 0.21513506770133972,
1416
+ "learning_rate": 0.0001982272211148188,
1417
+ "loss": 0.2412,
1418
+ "step": 9550
1419
+ },
1420
+ {
1421
+ "epoch": 0.26625157306837605,
1422
+ "grad_norm": 0.18172813951969147,
1423
+ "learning_rate": 0.0001981990462261401,
1424
+ "loss": 0.2435,
1425
+ "step": 9600
1426
+ },
1427
+ {
1428
+ "epoch": 0.2676383000114405,
1429
+ "grad_norm": 0.1561124324798584,
1430
+ "learning_rate": 0.00019817065124061407,
1431
+ "loss": 0.238,
1432
+ "step": 9650
1433
+ },
1434
+ {
1435
+ "epoch": 0.269025026954505,
1436
+ "grad_norm": 0.16663338243961334,
1437
+ "learning_rate": 0.00019814203622188338,
1438
+ "loss": 0.2383,
1439
+ "step": 9700
1440
+ },
1441
+ {
1442
+ "epoch": 0.27041175389756944,
1443
+ "grad_norm": 0.17735238373279572,
1444
+ "learning_rate": 0.0001981132012340838,
1445
+ "loss": 0.2459,
1446
+ "step": 9750
1447
+ },
1448
+ {
1449
+ "epoch": 0.27179848084063385,
1450
+ "grad_norm": 0.21334126591682434,
1451
+ "learning_rate": 0.00019808414634184417,
1452
+ "loss": 0.2425,
1453
+ "step": 9800
1454
+ },
1455
+ {
1456
+ "epoch": 0.2731852077836983,
1457
+ "grad_norm": 0.16817434132099152,
1458
+ "learning_rate": 0.00019805487161028625,
1459
+ "loss": 0.2361,
1460
+ "step": 9850
1461
+ },
1462
+ {
1463
+ "epoch": 0.2745719347267628,
1464
+ "grad_norm": 0.17149919271469116,
1465
+ "learning_rate": 0.00019802537710502443,
1466
+ "loss": 0.2431,
1467
+ "step": 9900
1468
+ },
1469
+ {
1470
+ "epoch": 0.27595866166982724,
1471
+ "grad_norm": 0.1521356999874115,
1472
+ "learning_rate": 0.00019799566289216576,
1473
+ "loss": 0.2411,
1474
+ "step": 9950
1475
+ },
1476
+ {
1477
+ "epoch": 0.2773453886128917,
1478
+ "grad_norm": 0.15583455562591553,
1479
+ "learning_rate": 0.00019796572903830974,
1480
+ "loss": 0.2388,
1481
+ "step": 10000
1482
+ },
1483
+ {
1484
+ "epoch": 0.2773453886128917,
1485
+ "eval_loss": 0.23783154785633087,
1486
+ "eval_runtime": 501.3932,
1487
+ "eval_samples_per_second": 5.698,
1488
+ "eval_steps_per_second": 5.698,
1489
+ "step": 10000
1490
+ }
1491
+ ],
1492
+ "logging_steps": 50,
1493
+ "max_steps": 108168,
1494
+ "num_input_tokens_seen": 0,
1495
+ "num_train_epochs": 3,
1496
+ "save_steps": 1000,
1497
+ "stateful_callbacks": {
1498
+ "TrainerControl": {
1499
+ "args": {
1500
+ "should_epoch_stop": false,
1501
+ "should_evaluate": false,
1502
+ "should_log": false,
1503
+ "should_save": true,
1504
+ "should_training_stop": false
1505
+ },
1506
+ "attributes": {}
1507
+ }
1508
+ },
1509
+ "total_flos": 1.802385752064e+17,
1510
+ "train_batch_size": 1,
1511
+ "trial_name": null,
1512
+ "trial_params": null
1513
+ }
checkpoint-10000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e593d4080c231741ba3acb7aa362bf85842c0c2beaca29489d434eca5cc7fa0
3
+ size 5432
checkpoint-10000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-100000/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-0.5B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.12.0
checkpoint-100000/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "down_proj",
25
+ "o_proj",
26
+ "k_proj",
27
+ "v_proj",
28
+ "up_proj",
29
+ "gate_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-100000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3c26895ebe509beb048b4a5df2b3584dbd61ab7a7d0df3e4e21af59776ac2fd
3
+ size 35237104
checkpoint-100000/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
checkpoint-100000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-100000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90bd07cc32cdde47422b75334992b9028b7d157d071423d5d3a3561121396d95
3
+ size 70667778
checkpoint-100000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81d6044f360391f451823995f67c8a25a8f965bd2bf1917d5adbc2e6af6a05e6
3
+ size 14244
checkpoint-100000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3492fc7749e1f19a47db06ff67f7a756ea45f058edac424b40411da4350e0678
3
+ size 1064
checkpoint-100000/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
checkpoint-100000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-100000/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
checkpoint-100000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-100000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e593d4080c231741ba3acb7aa362bf85842c0c2beaca29489d434eca5cc7fa0
3
+ size 5432
checkpoint-100000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-101000/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-0.5B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.12.0
checkpoint-101000/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "down_proj",
25
+ "o_proj",
26
+ "k_proj",
27
+ "v_proj",
28
+ "up_proj",
29
+ "gate_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-101000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dfd0236cc0e15e7055065e987e6fe540dc814097e82681e163632e03e80fcab
3
+ size 35237104
checkpoint-101000/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
checkpoint-101000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-101000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b70a06edad3ab9a340b005ff18c8b6780f3d3085c8cdaa34d3172f1f075ffd7
3
+ size 70667778
checkpoint-101000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50f020ffad9fe17df1c07fa175e61340c75596ed3d0c2722273bea78476d9068
3
+ size 14244
checkpoint-101000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da286f94045493eb2418832babaf8f5961021808b00eecaf9c6e2022aad67b87
3
+ size 1064