DGME commited on
Commit
4a2da0e
·
verified ·
1 Parent(s): d49f13a

Model save

Browse files
Files changed (4) hide show
  1. README.md +58 -0
  2. all_results.json +8 -0
  3. train_results.json +8 -0
  4. trainer_state.json +315 -0
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-0.5B-Instruct
3
+ library_name: transformers
4
+ model_name: Qwen2.5-0.5B-Open-R1-SFT
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - sft
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for Qwen2.5-0.5B-Open-R1-SFT
13
+
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="DGME/Qwen2.5-0.5B-Open-R1-SFT", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/dgme-nanjing-university-of-aeronautics-and-astronautics/huggingface/runs/1cfo2dxw)
31
+
32
+
33
+ This model was trained with SFT.
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.17.0.dev0
38
+ - Transformers: 4.51.2
39
+ - Pytorch: 2.6.0
40
+ - Datasets: 3.3.2
41
+ - Tokenizers: 0.21.0
42
+
43
+ ## Citations
44
+
45
+
46
+
47
+ Cite TRL as:
48
+
49
+ ```bibtex
50
+ @misc{vonwerra2022trl,
51
+ title = {{TRL: Transformer Reinforcement Learning}},
52
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
53
+ year = 2020,
54
+ journal = {GitHub repository},
55
+ publisher = {GitHub},
56
+ howpublished = {\url{https://github.com/huggingface/trl}}
57
+ }
58
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 385299641270272.0,
3
+ "train_loss": 0.13468429481281954,
4
+ "train_runtime": 769.9529,
5
+ "train_samples": 7473,
6
+ "train_samples_per_second": 1.831,
7
+ "train_steps_per_second": 0.221
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 385299641270272.0,
3
+ "train_loss": 0.13468429481281954,
4
+ "train_runtime": 769.9529,
5
+ "train_samples": 7473,
6
+ "train_samples_per_second": 1.831,
7
+ "train_steps_per_second": 0.221
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 9.794326241134751,
6
+ "eval_steps": 500,
7
+ "global_step": 170,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.28368794326241137,
14
+ "grad_norm": 1.4403227431607122,
15
+ "learning_rate": 2.2222222222222223e-05,
16
+ "loss": 0.5222,
17
+ "num_tokens": 634122.0,
18
+ "step": 5
19
+ },
20
+ {
21
+ "epoch": 0.5673758865248227,
22
+ "grad_norm": 1.051530473447262,
23
+ "learning_rate": 5e-05,
24
+ "loss": 0.4841,
25
+ "num_tokens": 1261436.0,
26
+ "step": 10
27
+ },
28
+ {
29
+ "epoch": 0.851063829787234,
30
+ "grad_norm": 0.5700469145307384,
31
+ "learning_rate": 4.989299698973645e-05,
32
+ "loss": 0.4265,
33
+ "num_tokens": 1900524.0,
34
+ "step": 15
35
+ },
36
+ {
37
+ "epoch": 1.1134751773049645,
38
+ "grad_norm": 0.4334982351318258,
39
+ "learning_rate": 4.957300570509739e-05,
40
+ "loss": 0.3977,
41
+ "num_tokens": 2498258.0,
42
+ "step": 20
43
+ },
44
+ {
45
+ "epoch": 1.397163120567376,
46
+ "grad_norm": 0.3413966057669926,
47
+ "learning_rate": 4.9043069704368563e-05,
48
+ "loss": 0.3787,
49
+ "num_tokens": 3127412.0,
50
+ "step": 25
51
+ },
52
+ {
53
+ "epoch": 1.6808510638297873,
54
+ "grad_norm": 0.30824886663026985,
55
+ "learning_rate": 4.83082294095344e-05,
56
+ "loss": 0.366,
57
+ "num_tokens": 3747537.0,
58
+ "step": 30
59
+ },
60
+ {
61
+ "epoch": 1.9645390070921986,
62
+ "grad_norm": 0.23393589833355252,
63
+ "learning_rate": 4.7375474164915813e-05,
64
+ "loss": 0.3629,
65
+ "num_tokens": 4390308.0,
66
+ "step": 35
67
+ },
68
+ {
69
+ "epoch": 2.226950354609929,
70
+ "grad_norm": 0.20565201227770782,
71
+ "learning_rate": 4.625367575886955e-05,
72
+ "loss": 0.3438,
73
+ "num_tokens": 4988042.0,
74
+ "step": 40
75
+ },
76
+ {
77
+ "epoch": 2.5106382978723403,
78
+ "grad_norm": 0.18576050641368966,
79
+ "learning_rate": 4.4953504040849445e-05,
80
+ "loss": 0.3479,
81
+ "num_tokens": 5633018.0,
82
+ "step": 45
83
+ },
84
+ {
85
+ "epoch": 2.794326241134752,
86
+ "grad_norm": 0.1668363092881809,
87
+ "learning_rate": 4.348732543642775e-05,
88
+ "loss": 0.337,
89
+ "num_tokens": 6264607.0,
90
+ "step": 50
91
+ },
92
+ {
93
+ "epoch": 3.0567375886524824,
94
+ "grad_norm": 0.2191426033169888,
95
+ "learning_rate": 4.1869085325538425e-05,
96
+ "loss": 0.3336,
97
+ "num_tokens": 6839414.0,
98
+ "step": 55
99
+ },
100
+ {
101
+ "epoch": 3.3404255319148937,
102
+ "grad_norm": 0.1529250136588493,
103
+ "learning_rate": 4.011417540268764e-05,
104
+ "loss": 0.3313,
105
+ "num_tokens": 7460094.0,
106
+ "step": 60
107
+ },
108
+ {
109
+ "epoch": 3.624113475177305,
110
+ "grad_norm": 0.13922308111530698,
111
+ "learning_rate": 3.8239287280718634e-05,
112
+ "loss": 0.3261,
113
+ "num_tokens": 8090603.0,
114
+ "step": 65
115
+ },
116
+ {
117
+ "epoch": 3.9078014184397163,
118
+ "grad_norm": 0.1510678073392735,
119
+ "learning_rate": 3.626225373056101e-05,
120
+ "loss": 0.3206,
121
+ "num_tokens": 8731464.0,
122
+ "step": 70
123
+ },
124
+ {
125
+ "epoch": 4.170212765957447,
126
+ "grad_norm": 0.13076324205354475,
127
+ "learning_rate": 3.420187906699333e-05,
128
+ "loss": 0.3138,
129
+ "num_tokens": 9337672.0,
130
+ "step": 75
131
+ },
132
+ {
133
+ "epoch": 4.453900709219858,
134
+ "grad_norm": 0.13075395705901488,
135
+ "learning_rate": 3.207776029368427e-05,
136
+ "loss": 0.3135,
137
+ "num_tokens": 9993032.0,
138
+ "step": 80
139
+ },
140
+ {
141
+ "epoch": 4.73758865248227,
142
+ "grad_norm": 0.1217000256711584,
143
+ "learning_rate": 2.99101007086695e-05,
144
+ "loss": 0.3126,
145
+ "num_tokens": 10628806.0,
146
+ "step": 85
147
+ },
148
+ {
149
+ "epoch": 5.0,
150
+ "grad_norm": 0.17989296172670693,
151
+ "learning_rate": 2.7719517743133312e-05,
152
+ "loss": 0.3194,
153
+ "num_tokens": 11180570.0,
154
+ "step": 90
155
+ },
156
+ {
157
+ "epoch": 5.567375886524823,
158
+ "grad_norm": 0.12712404524211618,
159
+ "learning_rate": 2.5526846861212987e-05,
160
+ "loss": 0.3084,
161
+ "num_tokens": 618338.0,
162
+ "step": 95
163
+ },
164
+ {
165
+ "epoch": 5.851063829787234,
166
+ "grad_norm": 0.12153203094502081,
167
+ "learning_rate": 2.3352943386009223e-05,
168
+ "loss": 0.3051,
169
+ "num_tokens": 655360.0,
170
+ "step": 100
171
+ },
172
+ {
173
+ "epoch": 6.170212765957447,
174
+ "grad_norm": 0.1305667538126749,
175
+ "learning_rate": 2.1218484136710372e-05,
176
+ "loss": 0.3671,
177
+ "num_tokens": 1370427.0,
178
+ "step": 105
179
+ },
180
+ {
181
+ "epoch": 6.453900709219858,
182
+ "grad_norm": 0.11952808807930158,
183
+ "learning_rate": 1.914377076353511e-05,
184
+ "loss": 0.2998,
185
+ "num_tokens": 1995968.0,
186
+ "step": 110
187
+ },
188
+ {
189
+ "epoch": 6.73758865248227,
190
+ "grad_norm": 0.11957376304714733,
191
+ "learning_rate": 1.7148536651049078e-05,
192
+ "loss": 0.3056,
193
+ "num_tokens": 2640944.0,
194
+ "step": 115
195
+ },
196
+ {
197
+ "epoch": 7.0,
198
+ "grad_norm": 0.1618112055166159,
199
+ "learning_rate": 1.5251759226471556e-05,
200
+ "loss": 0.3039,
201
+ "num_tokens": 3235538.0,
202
+ "step": 120
203
+ },
204
+ {
205
+ "epoch": 7.283687943262412,
206
+ "grad_norm": 0.12326180294670976,
207
+ "learning_rate": 1.3471479458178499e-05,
208
+ "loss": 0.2973,
209
+ "num_tokens": 3848771.0,
210
+ "step": 125
211
+ },
212
+ {
213
+ "epoch": 7.567375886524823,
214
+ "grad_norm": 0.12629369151907394,
215
+ "learning_rate": 1.1824630261220466e-05,
216
+ "loss": 0.301,
217
+ "num_tokens": 4504131.0,
218
+ "step": 130
219
+ },
220
+ {
221
+ "epoch": 7.851063829787234,
222
+ "grad_norm": 0.12537087846653183,
223
+ "learning_rate": 1.0326875441955191e-05,
224
+ "loss": 0.3028,
225
+ "num_tokens": 5139202.0,
226
+ "step": 135
227
+ },
228
+ {
229
+ "epoch": 8.113475177304965,
230
+ "grad_norm": 0.11821179377146723,
231
+ "learning_rate": 8.99246071365363e-06,
232
+ "loss": 0.3039,
233
+ "num_tokens": 5733796.0,
234
+ "step": 140
235
+ },
236
+ {
237
+ "epoch": 8.397163120567376,
238
+ "grad_norm": 0.12302203109401375,
239
+ "learning_rate": 7.834078200126287e-06,
240
+ "loss": 0.2976,
241
+ "num_tokens": 6355222.0,
242
+ "step": 145
243
+ },
244
+ {
245
+ "epoch": 8.680851063829786,
246
+ "grad_norm": 0.125688120347689,
247
+ "learning_rate": 6.862745716127312e-06,
248
+ "loss": 0.3,
249
+ "num_tokens": 6980870.0,
250
+ "step": 150
251
+ },
252
+ {
253
+ "epoch": 8.964539007092199,
254
+ "grad_norm": 0.12817483625488393,
255
+ "learning_rate": 6.087701972745999e-06,
256
+ "loss": 0.3018,
257
+ "num_tokens": 7625846.0,
258
+ "step": 155
259
+ },
260
+ {
261
+ "epoch": 9.22695035460993,
262
+ "grad_norm": 0.12206554634101204,
263
+ "learning_rate": 5.51631870452704e-06,
264
+ "loss": 0.2962,
265
+ "num_tokens": 8215782.0,
266
+ "step": 160
267
+ },
268
+ {
269
+ "epoch": 9.51063829787234,
270
+ "grad_norm": 0.11862499163677016,
271
+ "learning_rate": 5.154030554111651e-06,
272
+ "loss": 0.2983,
273
+ "num_tokens": 8860030.0,
274
+ "step": 165
275
+ },
276
+ {
277
+ "epoch": 9.794326241134751,
278
+ "grad_norm": 0.11740647423935624,
279
+ "learning_rate": 5.004283381293366e-06,
280
+ "loss": 0.2988,
281
+ "num_tokens": 9507891.0,
282
+ "step": 170
283
+ },
284
+ {
285
+ "epoch": 9.794326241134751,
286
+ "step": 170,
287
+ "total_flos": 385299641270272.0,
288
+ "train_loss": 0.13468429481281954,
289
+ "train_runtime": 769.9529,
290
+ "train_samples_per_second": 1.831,
291
+ "train_steps_per_second": 0.221
292
+ }
293
+ ],
294
+ "logging_steps": 5,
295
+ "max_steps": 170,
296
+ "num_input_tokens_seen": 0,
297
+ "num_train_epochs": 10,
298
+ "save_steps": 5,
299
+ "stateful_callbacks": {
300
+ "TrainerControl": {
301
+ "args": {
302
+ "should_epoch_stop": false,
303
+ "should_evaluate": false,
304
+ "should_log": false,
305
+ "should_save": true,
306
+ "should_training_stop": true
307
+ },
308
+ "attributes": {}
309
+ }
310
+ },
311
+ "total_flos": 385299641270272.0,
312
+ "train_batch_size": 1,
313
+ "trial_name": null,
314
+ "trial_params": null
315
+ }