Kadins commited on
Commit
8b0eee9
·
verified ·
1 Parent(s): 14242e3

Model save

Browse files
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ model_name: Qwen-2.5-7B-Math-RL
4
+ tags:
5
+ - generated_from_trainer
6
+ - trl
7
+ - grpo
8
+ licence: license
9
+ ---
10
+
11
+ # Model Card for Qwen-2.5-7B-Math-RL
12
+
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
+ It has been trained using [TRL](https://github.com/huggingface/trl).
15
+
16
+ ## Quick start
17
+
18
+ ```python
19
+ from transformers import pipeline
20
+
21
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
22
+ generator = pipeline("text-generation", model="Kadins/Qwen-2.5-7B-Math-RL", device="cuda")
23
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
24
+ print(output["generated_text"])
25
+ ```
26
+
27
+ ## Training procedure
28
+
29
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/build_r1/huggingface/runs/xcm5a6ec)
30
+
31
+
32
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.16.0.dev0
37
+ - Transformers: 4.49.0
38
+ - Pytorch: 2.5.1+cu124
39
+ - Datasets: 3.3.1
40
+ - Tokenizers: 0.21.0
41
+
42
+ ## Citations
43
+
44
+ Cite GRPO as:
45
+
46
+ ```bibtex
47
+ @article{zhihong2024deepseekmath,
48
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
49
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
50
+ year = 2024,
51
+ eprint = {arXiv:2402.03300},
52
+ }
53
+
54
+ ```
55
+
56
+ Cite TRL as:
57
+
58
+ ```bibtex
59
+ @misc{vonwerra2022trl,
60
+ title = {{TRL: Transformer Reinforcement Learning}},
61
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
62
+ year = 2020,
63
+ journal = {GitHub repository},
64
+ publisher = {GitHub},
65
+ howpublished = {\url{https://github.com/huggingface/trl}}
66
+ }
67
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 7.607245717240476,
4
+ "train_runtime": 13150.4833,
5
+ "train_samples": 7500,
6
+ "train_samples_per_second": 0.855,
7
+ "train_steps_per_second": 0.007
8
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 64,
5
+ "pad_token_id": 151643,
6
+ "transformers_version": "4.49.0"
7
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 7.607245717240476,
4
+ "train_runtime": 13150.4833,
5
+ "train_samples": 7500,
6
+ "train_samples_per_second": 0.855,
7
+ "train_steps_per_second": 0.007
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.4946695095948828,
5
+ "eval_steps": 500,
6
+ "global_step": 87,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "completion_length": 691.5893173217773,
13
+ "epoch": 0.017057569296375266,
14
+ "grad_norm": 0.6600080728530884,
15
+ "kl": 0.0,
16
+ "learning_rate": 3.333333333333333e-07,
17
+ "loss": 0.0895,
18
+ "reward": 0.21837703324854374,
19
+ "reward_std": 0.457524124532938,
20
+ "rewards/accuracy_reward": 0.4107143059372902,
21
+ "rewards/format_reward": 0.0,
22
+ "rewards/repetition_penalty_reward": -0.19233726896345615,
23
+ "step": 1
24
+ },
25
+ {
26
+ "completion_length": 711.3395977020264,
27
+ "epoch": 0.08528784648187633,
28
+ "grad_norm": 0.521936297416687,
29
+ "kl": 0.00035202503204345703,
30
+ "learning_rate": 1.6666666666666669e-06,
31
+ "loss": 0.0873,
32
+ "reward": 0.19321720697917044,
33
+ "reward_std": 0.4614652991294861,
34
+ "rewards/accuracy_reward": 0.38699778635054827,
35
+ "rewards/format_reward": 0.0,
36
+ "rewards/repetition_penalty_reward": -0.1937805782072246,
37
+ "step": 5
38
+ },
39
+ {
40
+ "completion_length": 684.9652038574219,
41
+ "epoch": 0.17057569296375266,
42
+ "grad_norm": 0.5586162805557251,
43
+ "kl": 0.01878194808959961,
44
+ "learning_rate": 2.9987834972573546e-06,
45
+ "loss": 0.0742,
46
+ "reward": 0.2827843511477113,
47
+ "reward_std": 0.44751987904310225,
48
+ "rewards/accuracy_reward": 0.4611607357859612,
49
+ "rewards/format_reward": 0.0,
50
+ "rewards/repetition_penalty_reward": -0.17837638631463051,
51
+ "step": 10
52
+ },
53
+ {
54
+ "completion_length": 641.0350692749023,
55
+ "epoch": 0.255863539445629,
56
+ "grad_norm": 1.2480870485305786,
57
+ "kl": 0.5136314392089844,
58
+ "learning_rate": 2.956412726139078e-06,
59
+ "loss": 0.1195,
60
+ "reward": 0.49980842545628545,
61
+ "reward_std": 0.3398652091622353,
62
+ "rewards/accuracy_reward": 0.6600446701049805,
63
+ "rewards/format_reward": 0.0,
64
+ "rewards/repetition_penalty_reward": -0.16023624502122402,
65
+ "step": 15
66
+ },
67
+ {
68
+ "completion_length": 629.3375289916992,
69
+ "epoch": 0.3411513859275053,
70
+ "grad_norm": 0.5801119208335876,
71
+ "kl": 0.020611572265625,
72
+ "learning_rate": 2.8551756519155732e-06,
73
+ "loss": 0.1008,
74
+ "reward": 0.5805917799472808,
75
+ "reward_std": 0.27404730543494227,
76
+ "rewards/accuracy_reward": 0.7352678969502449,
77
+ "rewards/format_reward": 0.0,
78
+ "rewards/repetition_penalty_reward": -0.15467611663043498,
79
+ "step": 20
80
+ },
81
+ {
82
+ "completion_length": 632.6993621826172,
83
+ "epoch": 0.42643923240938164,
84
+ "grad_norm": 0.5435677766799927,
85
+ "kl": 0.01521759033203125,
86
+ "learning_rate": 2.699164145105252e-06,
87
+ "loss": 0.0827,
88
+ "reward": 0.5681484438478946,
89
+ "reward_std": 0.28131211549043655,
90
+ "rewards/accuracy_reward": 0.710714316368103,
91
+ "rewards/format_reward": 0.0,
92
+ "rewards/repetition_penalty_reward": -0.14256587382405997,
93
+ "step": 25
94
+ },
95
+ {
96
+ "completion_length": 631.0065017700196,
97
+ "epoch": 0.511727078891258,
98
+ "grad_norm": 0.40434423089027405,
99
+ "kl": 0.087030029296875,
100
+ "learning_rate": 2.4946839873611927e-06,
101
+ "loss": 0.0906,
102
+ "reward": 0.5263580977916718,
103
+ "reward_std": 0.3138962801545858,
104
+ "rewards/accuracy_reward": 0.6600446760654449,
105
+ "rewards/format_reward": 0.0,
106
+ "rewards/repetition_penalty_reward": -0.13368657547980548,
107
+ "step": 30
108
+ },
109
+ {
110
+ "completion_length": 617.7948944091797,
111
+ "epoch": 0.5970149253731343,
112
+ "grad_norm": 1.0160555839538574,
113
+ "kl": 0.117022705078125,
114
+ "learning_rate": 2.25e-06,
115
+ "loss": 0.0962,
116
+ "reward": 0.5008847367018461,
117
+ "reward_std": 0.3264832843095064,
118
+ "rewards/accuracy_reward": 0.622321455180645,
119
+ "rewards/format_reward": 0.0,
120
+ "rewards/repetition_penalty_reward": -0.12143671978265047,
121
+ "step": 35
122
+ },
123
+ {
124
+ "completion_length": 607.287973022461,
125
+ "epoch": 0.6823027718550106,
126
+ "grad_norm": 1.8936026096343994,
127
+ "kl": 0.2015625,
128
+ "learning_rate": 1.975001990702209e-06,
129
+ "loss": 0.1194,
130
+ "reward": 0.5203140497207641,
131
+ "reward_std": 0.3361724577844143,
132
+ "rewards/accuracy_reward": 0.6433036029338837,
133
+ "rewards/format_reward": 0.0,
134
+ "rewards/repetition_penalty_reward": -0.12298955339938403,
135
+ "step": 40
136
+ },
137
+ {
138
+ "completion_length": 604.0877517700195,
139
+ "epoch": 0.767590618336887,
140
+ "grad_norm": 4.448797225952148,
141
+ "kl": 5.82470703125,
142
+ "learning_rate": 1.6808050203829845e-06,
143
+ "loss": 0.5125,
144
+ "reward": 0.5560206107795238,
145
+ "reward_std": 0.29633302725851535,
146
+ "rewards/accuracy_reward": 0.6729911029338836,
147
+ "rewards/format_reward": 0.0,
148
+ "rewards/repetition_penalty_reward": -0.11697048898786307,
149
+ "step": 45
150
+ },
151
+ {
152
+ "completion_length": 604.8629692077636,
153
+ "epoch": 0.8528784648187633,
154
+ "grad_norm": 2.490539789199829,
155
+ "kl": 0.377392578125,
156
+ "learning_rate": 1.3793001469249112e-06,
157
+ "loss": 0.1237,
158
+ "reward": 0.5681209519505501,
159
+ "reward_std": 0.28387211747467517,
160
+ "rewards/accuracy_reward": 0.6881696730852127,
161
+ "rewards/format_reward": 0.0,
162
+ "rewards/repetition_penalty_reward": -0.12004871871322394,
163
+ "step": 50
164
+ },
165
+ {
166
+ "completion_length": 579.4413185119629,
167
+ "epoch": 0.9381663113006397,
168
+ "grad_norm": 4.152231216430664,
169
+ "kl": 0.2768310546875,
170
+ "learning_rate": 1.0826738041253211e-06,
171
+ "loss": 0.0852,
172
+ "reward": 0.6077298022806644,
173
+ "reward_std": 0.27112445086240766,
174
+ "rewards/accuracy_reward": 0.7265625357627868,
175
+ "rewards/format_reward": 0.0,
176
+ "rewards/repetition_penalty_reward": -0.11883272156119347,
177
+ "step": 55
178
+ },
179
+ {
180
+ "completion_length": 566.9164165150036,
181
+ "epoch": 1.0341151385927505,
182
+ "grad_norm": 3.0358667373657227,
183
+ "kl": 713.1028053977273,
184
+ "learning_rate": 8.029152419343472e-07,
185
+ "loss": 82.7243,
186
+ "reward": 0.6099555316296491,
187
+ "reward_std": 0.25437648966908455,
188
+ "rewards/accuracy_reward": 0.7307224382053722,
189
+ "rewards/format_reward": 0.0,
190
+ "rewards/repetition_penalty_reward": -0.12076690539040348,
191
+ "step": 60
192
+ },
193
+ {
194
+ "completion_length": 548.3241325378418,
195
+ "epoch": 1.1194029850746268,
196
+ "grad_norm": 2.281466245651245,
197
+ "kl": 0.169915771484375,
198
+ "learning_rate": 5.513319366069343e-07,
199
+ "loss": 0.0759,
200
+ "reward": 0.6292034849524498,
201
+ "reward_std": 0.24837480448186397,
202
+ "rewards/accuracy_reward": 0.7457589656114578,
203
+ "rewards/format_reward": 0.0,
204
+ "rewards/repetition_penalty_reward": -0.11655548624694348,
205
+ "step": 65
206
+ },
207
+ {
208
+ "completion_length": 544.6705657958985,
209
+ "epoch": 1.2046908315565032,
210
+ "grad_norm": 4.551150798797607,
211
+ "kl": 0.168426513671875,
212
+ "learning_rate": 3.380925572585183e-07,
213
+ "loss": 0.081,
214
+ "reward": 0.6241521313786507,
215
+ "reward_std": 0.23203972559422253,
216
+ "rewards/accuracy_reward": 0.7475446790456772,
217
+ "rewards/format_reward": 0.0,
218
+ "rewards/repetition_penalty_reward": -0.12339254282414913,
219
+ "step": 70
220
+ },
221
+ {
222
+ "completion_length": 560.6620811462402,
223
+ "epoch": 1.2899786780383795,
224
+ "grad_norm": 1.7139493227005005,
225
+ "kl": 0.295257568359375,
226
+ "learning_rate": 1.718159615201853e-07,
227
+ "loss": 0.0725,
228
+ "reward": 0.6123189993202687,
229
+ "reward_std": 0.25482493862509725,
230
+ "rewards/accuracy_reward": 0.7330357432365417,
231
+ "rewards/format_reward": 0.0,
232
+ "rewards/repetition_penalty_reward": -0.12071674298495054,
233
+ "step": 75
234
+ },
235
+ {
236
+ "completion_length": 538.8129737854003,
237
+ "epoch": 1.375266524520256,
238
+ "grad_norm": 1.8946577310562134,
239
+ "kl": 0.28873291015625,
240
+ "learning_rate": 5.922283255294164e-08,
241
+ "loss": 0.0819,
242
+ "reward": 0.6251922458410263,
243
+ "reward_std": 0.23853217102587224,
244
+ "rewards/accuracy_reward": 0.7475446805357933,
245
+ "rewards/format_reward": 0.0,
246
+ "rewards/repetition_penalty_reward": -0.12235244438052177,
247
+ "step": 80
248
+ },
249
+ {
250
+ "completion_length": 552.9801628112793,
251
+ "epoch": 1.4605543710021323,
252
+ "grad_norm": 1.40664541721344,
253
+ "kl": 711.1384460449219,
254
+ "learning_rate": 4.864037798685106e-09,
255
+ "loss": 47.7953,
256
+ "reward": 0.618848904967308,
257
+ "reward_std": 0.24578485451638699,
258
+ "rewards/accuracy_reward": 0.7415178909897804,
259
+ "rewards/format_reward": 0.0,
260
+ "rewards/repetition_penalty_reward": -0.12266898676753044,
261
+ "step": 85
262
+ },
263
+ {
264
+ "completion_length": 589.5374145507812,
265
+ "epoch": 1.4946695095948828,
266
+ "kl": 0.252349853515625,
267
+ "reward": 0.5610090717673302,
268
+ "reward_std": 0.24640783108770847,
269
+ "rewards/accuracy_reward": 0.6824777089059353,
270
+ "rewards/format_reward": 0.0,
271
+ "rewards/repetition_penalty_reward": -0.12146863108500838,
272
+ "step": 87,
273
+ "total_flos": 0.0,
274
+ "train_loss": 7.607245717240476,
275
+ "train_runtime": 13150.4833,
276
+ "train_samples_per_second": 0.855,
277
+ "train_steps_per_second": 0.007
278
+ }
279
+ ],
280
+ "logging_steps": 5,
281
+ "max_steps": 87,
282
+ "num_input_tokens_seen": 0,
283
+ "num_train_epochs": 2,
284
+ "save_steps": 10,
285
+ "stateful_callbacks": {
286
+ "TrainerControl": {
287
+ "args": {
288
+ "should_epoch_stop": false,
289
+ "should_evaluate": false,
290
+ "should_log": false,
291
+ "should_save": true,
292
+ "should_training_stop": true
293
+ },
294
+ "attributes": {}
295
+ }
296
+ },
297
+ "total_flos": 0.0,
298
+ "train_batch_size": 16,
299
+ "trial_name": null,
300
+ "trial_params": null
301
+ }