AdithyaSK HF Staff commited on
Commit
6916455
·
verified ·
1 Parent(s): 19df7aa

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +7 -0
  2. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/added_tokens.json +24 -0
  3. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/config.json +29 -0
  4. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/generation_config.json +14 -0
  5. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/merges.txt +0 -0
  6. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/rng_state_0.pth +3 -0
  7. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/rng_state_1.pth +3 -0
  8. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/scheduler.pt +3 -0
  9. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/special_tokens_map.json +25 -0
  10. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/tokenizer.json +3 -0
  11. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/tokenizer_config.json +208 -0
  12. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/trainer_state.json +1633 -0
  13. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/vocab.json +0 -0
  14. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/added_tokens.json +24 -0
  15. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/config.json +29 -0
  16. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/generation_config.json +14 -0
  17. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/merges.txt +0 -0
  18. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/rng_state_0.pth +3 -0
  19. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/rng_state_1.pth +3 -0
  20. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/scheduler.pt +3 -0
  21. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/special_tokens_map.json +25 -0
  22. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/tokenizer.json +3 -0
  23. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/tokenizer_config.json +208 -0
  24. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/trainer_state.json +0 -0
  25. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/training_args.bin +3 -0
  26. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/vocab.json +0 -0
  27. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/added_tokens.json +24 -0
  28. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/config.json +29 -0
  29. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/generation_config.json +14 -0
  30. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/merges.txt +0 -0
  31. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/model.safetensors +3 -0
  32. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/rng_state_0.pth +3 -0
  33. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/rng_state_1.pth +3 -0
  34. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/scheduler.pt +3 -0
  35. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/special_tokens_map.json +25 -0
  36. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/tokenizer.json +3 -0
  37. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/tokenizer_config.json +208 -0
  38. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/trainer_state.json +0 -0
  39. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/training_args.bin +3 -0
  40. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/vocab.json +0 -0
  41. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/added_tokens.json +24 -0
  42. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/config.json +29 -0
  43. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/generation_config.json +14 -0
  44. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/merges.txt +0 -0
  45. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/model.safetensors +3 -0
  46. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/rng_state_0.pth +3 -0
  47. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/rng_state_1.pth +3 -0
  48. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/scheduler.pt +3 -0
  49. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/special_tokens_map.json +25 -0
  50. Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/tokenizer.json +3 -0
.gitattributes CHANGED
@@ -66,3 +66,10 @@ Qwen-0.5B-GRPO-Code-vllm-modified-v1/checkpoint-100/tokenizer.json filter=lfs di
66
  Qwen-0.5B-GRPO-Code-vllm-modified-v1/checkpoint-1600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
67
  llama3b-GRPO-Code-vllm/checkpoint-200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
68
  Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
66
  Qwen-0.5B-GRPO-Code-vllm-modified-v1/checkpoint-1600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
67
  llama3b-GRPO-Code-vllm/checkpoint-200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
68
  Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
69
+ Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
70
+ Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
71
+ Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
72
+ Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
73
+ Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
74
+ Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
75
+ Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 896,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4864,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 21,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 14,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 2,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": null,
21
+ "rope_theta": 1000000.0,
22
+ "sliding_window": null,
23
+ "tie_word_embeddings": true,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.48.2",
26
+ "use_cache": true,
27
+ "use_sliding_window": false,
28
+ "vocab_size": 151936
29
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.48.2"
14
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76a9c7f09fa1d2fb5198badfb65e763ac66f6972eedcca0cecc5feab433b7069
3
+ size 14448
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f68a37892a1b445d21bb35cc10bf7a058a6f9ec8c363f5ed156ff4f49d90fb6
3
+ size 14512
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71d2161c9233860a0c55de4fa68dfcf16cdf4af9234c3d44fc611fcd896d1189
3
+ size 1064
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/special_tokens_map.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": "<|im_end|>"
25
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63a2951d5edfa5cc0a2346ef872f8c77a2920274cfc3b503b04e3799104dee80
3
+ size 11422060
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 131072,
204
+ "pad_token": "<|im_end|>",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,1633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.02675943270002676,
5
+ "eval_steps": 500,
6
+ "global_step": 100,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "completion_length": 98.5,
13
+ "epoch": 0.0002675943270002676,
14
+ "grad_norm": 30.0,
15
+ "kl": 0.0,
16
+ "learning_rate": 5.3475935828877005e-08,
17
+ "loss": 0.0,
18
+ "reward": 1.0875000953674316,
19
+ "reward_std": 0.4607583284378052,
20
+ "rewards/basic_format_reward_func": 0.0,
21
+ "rewards/code_execution_reward": 0.6000000238418579,
22
+ "rewards/code_quality_reward_func": 0.17500001192092896,
23
+ "rewards/reasoning_quality_reward_func": 0.21250000596046448,
24
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
25
+ "step": 1
26
+ },
27
+ {
28
+ "completion_length": 168.75,
29
+ "epoch": 0.0005351886540005352,
30
+ "grad_norm": 15.0625,
31
+ "kl": 0.0,
32
+ "learning_rate": 1.0695187165775401e-07,
33
+ "loss": -0.0,
34
+ "reward": 1.3125,
35
+ "reward_std": 0.6351761817932129,
36
+ "rewards/basic_format_reward_func": 0.0,
37
+ "rewards/code_execution_reward": 0.7250000238418579,
38
+ "rewards/code_quality_reward_func": 0.1875,
39
+ "rewards/reasoning_quality_reward_func": 0.2875000238418579,
40
+ "rewards/xml_structure_reward_func": 0.11250000447034836,
41
+ "step": 2
42
+ },
43
+ {
44
+ "completion_length": 138.75,
45
+ "epoch": 0.0008027829810008028,
46
+ "grad_norm": 7.84375,
47
+ "kl": 0.0004256499232724309,
48
+ "learning_rate": 1.6042780748663104e-07,
49
+ "loss": 0.0,
50
+ "reward": 0.9000000953674316,
51
+ "reward_std": 0.5,
52
+ "rewards/basic_format_reward_func": 0.0,
53
+ "rewards/code_execution_reward": 0.42500004172325134,
54
+ "rewards/code_quality_reward_func": 0.08750000596046448,
55
+ "rewards/reasoning_quality_reward_func": 0.2875000238418579,
56
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
57
+ "step": 3
58
+ },
59
+ {
60
+ "completion_length": 161.125,
61
+ "epoch": 0.0010703773080010704,
62
+ "grad_norm": 6.96875,
63
+ "kl": 0.0005086653982289135,
64
+ "learning_rate": 2.1390374331550802e-07,
65
+ "loss": 0.0,
66
+ "reward": 1.25,
67
+ "reward_std": 0.7432811856269836,
68
+ "rewards/basic_format_reward_func": 0.0,
69
+ "rewards/code_execution_reward": 0.7250000238418579,
70
+ "rewards/code_quality_reward_func": 0.15000000596046448,
71
+ "rewards/reasoning_quality_reward_func": 0.25,
72
+ "rewards/xml_structure_reward_func": 0.125,
73
+ "step": 4
74
+ },
75
+ {
76
+ "completion_length": 108.125,
77
+ "epoch": 0.001337971635001338,
78
+ "grad_norm": 12.625,
79
+ "kl": 0.0007201232365332544,
80
+ "learning_rate": 2.6737967914438503e-07,
81
+ "loss": 0.0,
82
+ "reward": 0.8500000238418579,
83
+ "reward_std": 0.30000001192092896,
84
+ "rewards/basic_format_reward_func": 0.0,
85
+ "rewards/code_execution_reward": 0.30000001192092896,
86
+ "rewards/code_quality_reward_func": 0.125,
87
+ "rewards/reasoning_quality_reward_func": 0.32500001788139343,
88
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
89
+ "step": 5
90
+ },
91
+ {
92
+ "completion_length": 158.125,
93
+ "epoch": 0.0016055659620016055,
94
+ "grad_norm": 9.875,
95
+ "kl": 0.0006567862583324313,
96
+ "learning_rate": 3.208556149732621e-07,
97
+ "loss": 0.0,
98
+ "reward": 0.8625000715255737,
99
+ "reward_std": 0.3379955291748047,
100
+ "rewards/basic_format_reward_func": 0.0,
101
+ "rewards/code_execution_reward": 0.40000003576278687,
102
+ "rewards/code_quality_reward_func": 0.16250000894069672,
103
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
104
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
105
+ "step": 6
106
+ },
107
+ {
108
+ "completion_length": 103.875,
109
+ "epoch": 0.0018731602890018732,
110
+ "grad_norm": 11.75,
111
+ "kl": 0.0005918288370594382,
112
+ "learning_rate": 3.7433155080213904e-07,
113
+ "loss": 0.0,
114
+ "reward": 1.524999976158142,
115
+ "reward_std": 0.7623251676559448,
116
+ "rewards/basic_format_reward_func": 0.0625,
117
+ "rewards/code_execution_reward": 0.925000011920929,
118
+ "rewards/code_quality_reward_func": 0.21250000596046448,
119
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
120
+ "rewards/xml_structure_reward_func": 0.125,
121
+ "step": 7
122
+ },
123
+ {
124
+ "completion_length": 102.0,
125
+ "epoch": 0.0021407546160021407,
126
+ "grad_norm": 16.0,
127
+ "kl": 0.0005299834883771837,
128
+ "learning_rate": 4.2780748663101604e-07,
129
+ "loss": 0.0,
130
+ "reward": 0.9750000238418579,
131
+ "reward_std": 0.43620064854621887,
132
+ "rewards/basic_format_reward_func": 0.0,
133
+ "rewards/code_execution_reward": 0.5,
134
+ "rewards/code_quality_reward_func": 0.16249999403953552,
135
+ "rewards/reasoning_quality_reward_func": 0.21249999105930328,
136
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
137
+ "step": 8
138
+ },
139
+ {
140
+ "completion_length": 168.25,
141
+ "epoch": 0.002408348943002408,
142
+ "grad_norm": 16.375,
143
+ "kl": 0.0011368428822606802,
144
+ "learning_rate": 4.812834224598931e-07,
145
+ "loss": 0.0,
146
+ "reward": 0.75,
147
+ "reward_std": 0.2748348116874695,
148
+ "rewards/basic_format_reward_func": 0.0,
149
+ "rewards/code_execution_reward": 0.30000001192092896,
150
+ "rewards/code_quality_reward_func": 0.08750000596046448,
151
+ "rewards/reasoning_quality_reward_func": 0.26249998807907104,
152
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
153
+ "step": 9
154
+ },
155
+ {
156
+ "completion_length": 113.875,
157
+ "epoch": 0.002675943270002676,
158
+ "grad_norm": 10.875,
159
+ "kl": 0.0007178444066084921,
160
+ "learning_rate": 5.347593582887701e-07,
161
+ "loss": 0.0,
162
+ "reward": 0.9500000476837158,
163
+ "reward_std": 0.47081488370895386,
164
+ "rewards/basic_format_reward_func": 0.0,
165
+ "rewards/code_execution_reward": 0.42500004172325134,
166
+ "rewards/code_quality_reward_func": 0.16250000894069672,
167
+ "rewards/reasoning_quality_reward_func": 0.26249998807907104,
168
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
169
+ "step": 10
170
+ },
171
+ {
172
+ "completion_length": 66.625,
173
+ "epoch": 0.0029435375970029436,
174
+ "grad_norm": 116.0,
175
+ "kl": 0.0015349595341831446,
176
+ "learning_rate": 5.882352941176471e-07,
177
+ "loss": 0.0,
178
+ "reward": 1.2625000476837158,
179
+ "reward_std": 0.6949611902236938,
180
+ "rewards/basic_format_reward_func": 0.0,
181
+ "rewards/code_execution_reward": 0.824999988079071,
182
+ "rewards/code_quality_reward_func": 0.13750000298023224,
183
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
184
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
185
+ "step": 11
186
+ },
187
+ {
188
+ "completion_length": 92.5,
189
+ "epoch": 0.003211131924003211,
190
+ "grad_norm": 10.6875,
191
+ "kl": 0.0008166864863596857,
192
+ "learning_rate": 6.417112299465242e-07,
193
+ "loss": 0.0,
194
+ "reward": 1.8875000476837158,
195
+ "reward_std": 0.42763128876686096,
196
+ "rewards/basic_format_reward_func": 0.0,
197
+ "rewards/code_execution_reward": 1.274999976158142,
198
+ "rewards/code_quality_reward_func": 0.26249998807907104,
199
+ "rewards/reasoning_quality_reward_func": 0.25,
200
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
201
+ "step": 12
202
+ },
203
+ {
204
+ "completion_length": 97.5,
205
+ "epoch": 0.0034787262510034785,
206
+ "grad_norm": 47.25,
207
+ "kl": 0.0013502974761649966,
208
+ "learning_rate": 6.951871657754011e-07,
209
+ "loss": 0.0,
210
+ "reward": 1.2000000476837158,
211
+ "reward_std": 0.7858108282089233,
212
+ "rewards/basic_format_reward_func": 0.0,
213
+ "rewards/code_execution_reward": 0.7000000476837158,
214
+ "rewards/code_quality_reward_func": 0.15000000596046448,
215
+ "rewards/reasoning_quality_reward_func": 0.2500000298023224,
216
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
217
+ "step": 13
218
+ },
219
+ {
220
+ "completion_length": 115.75,
221
+ "epoch": 0.0037463205780037465,
222
+ "grad_norm": 10.8125,
223
+ "kl": 0.0006711420137435198,
224
+ "learning_rate": 7.486631016042781e-07,
225
+ "loss": 0.0,
226
+ "reward": 2.0500001907348633,
227
+ "reward_std": 1.0827668905258179,
228
+ "rewards/basic_format_reward_func": 0.125,
229
+ "rewards/code_execution_reward": 1.2999999523162842,
230
+ "rewards/code_quality_reward_func": 0.25,
231
+ "rewards/reasoning_quality_reward_func": 0.25,
232
+ "rewards/xml_structure_reward_func": 0.125,
233
+ "step": 14
234
+ },
235
+ {
236
+ "completion_length": 193.25,
237
+ "epoch": 0.004013914905004014,
238
+ "grad_norm": 8.875,
239
+ "kl": 0.0012524605263024569,
240
+ "learning_rate": 8.021390374331551e-07,
241
+ "loss": 0.0,
242
+ "reward": 0.7124999761581421,
243
+ "reward_std": 0.05386751890182495,
244
+ "rewards/basic_format_reward_func": 0.0,
245
+ "rewards/code_execution_reward": 0.20000000298023224,
246
+ "rewards/code_quality_reward_func": 0.0625,
247
+ "rewards/reasoning_quality_reward_func": 0.3499999940395355,
248
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
249
+ "step": 15
250
+ },
251
+ {
252
+ "completion_length": 154.375,
253
+ "epoch": 0.004281509232004281,
254
+ "grad_norm": 9.0625,
255
+ "kl": 0.0007793009281158447,
256
+ "learning_rate": 8.556149732620321e-07,
257
+ "loss": 0.0,
258
+ "reward": 0.8250000476837158,
259
+ "reward_std": 0.2288675308227539,
260
+ "rewards/basic_format_reward_func": 0.0,
261
+ "rewards/code_execution_reward": 0.30000001192092896,
262
+ "rewards/code_quality_reward_func": 0.15000000596046448,
263
+ "rewards/reasoning_quality_reward_func": 0.2750000059604645,
264
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
265
+ "step": 16
266
+ },
267
+ {
268
+ "completion_length": 119.5,
269
+ "epoch": 0.004549103559004549,
270
+ "grad_norm": 16.5,
271
+ "kl": 0.0010260213166475296,
272
+ "learning_rate": 9.090909090909091e-07,
273
+ "loss": 0.0,
274
+ "reward": 0.7625000476837158,
275
+ "reward_std": 0.23393532633781433,
276
+ "rewards/basic_format_reward_func": 0.0,
277
+ "rewards/code_execution_reward": 0.30000001192092896,
278
+ "rewards/code_quality_reward_func": 0.11250000447034836,
279
+ "rewards/reasoning_quality_reward_func": 0.25,
280
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
281
+ "step": 17
282
+ },
283
+ {
284
+ "completion_length": 138.625,
285
+ "epoch": 0.004816697886004816,
286
+ "grad_norm": 10.75,
287
+ "kl": 0.0008449122542515397,
288
+ "learning_rate": 9.625668449197862e-07,
289
+ "loss": 0.0,
290
+ "reward": 0.9500000476837158,
291
+ "reward_std": 0.4422738254070282,
292
+ "rewards/basic_format_reward_func": 0.0,
293
+ "rewards/code_execution_reward": 0.4000000059604645,
294
+ "rewards/code_quality_reward_func": 0.16250000894069672,
295
+ "rewards/reasoning_quality_reward_func": 0.2875000238418579,
296
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
297
+ "step": 18
298
+ },
299
+ {
300
+ "completion_length": 113.625,
301
+ "epoch": 0.005084292213005084,
302
+ "grad_norm": 12.0625,
303
+ "kl": 0.0007207915186882019,
304
+ "learning_rate": 1.0160427807486633e-06,
305
+ "loss": 0.0,
306
+ "reward": 1.0125000476837158,
307
+ "reward_std": 0.4223129451274872,
308
+ "rewards/basic_format_reward_func": 0.0625,
309
+ "rewards/code_execution_reward": 0.4000000059604645,
310
+ "rewards/code_quality_reward_func": 0.1875,
311
+ "rewards/reasoning_quality_reward_func": 0.25,
312
+ "rewards/xml_structure_reward_func": 0.11250000447034836,
313
+ "step": 19
314
+ },
315
+ {
316
+ "completion_length": 109.75,
317
+ "epoch": 0.005351886540005352,
318
+ "grad_norm": 14.1875,
319
+ "kl": 0.0009593804134055972,
320
+ "learning_rate": 1.0695187165775401e-06,
321
+ "loss": 0.0,
322
+ "reward": 1.1124999523162842,
323
+ "reward_std": 0.4327978491783142,
324
+ "rewards/basic_format_reward_func": 0.0,
325
+ "rewards/code_execution_reward": 0.6000000238418579,
326
+ "rewards/code_quality_reward_func": 0.1875000149011612,
327
+ "rewards/reasoning_quality_reward_func": 0.22500000894069672,
328
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
329
+ "step": 20
330
+ },
331
+ {
332
+ "completion_length": 167.75,
333
+ "epoch": 0.00561948086700562,
334
+ "grad_norm": 9.4375,
335
+ "kl": 0.0019687237218022346,
336
+ "learning_rate": 1.1229946524064172e-06,
337
+ "loss": 0.0,
338
+ "reward": 0.7125000357627869,
339
+ "reward_std": 0.09787134826183319,
340
+ "rewards/basic_format_reward_func": 0.0,
341
+ "rewards/code_execution_reward": 0.20000000298023224,
342
+ "rewards/code_quality_reward_func": 0.11249999701976776,
343
+ "rewards/reasoning_quality_reward_func": 0.30000001192092896,
344
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
345
+ "step": 21
346
+ },
347
+ {
348
+ "completion_length": 159.25,
349
+ "epoch": 0.005887075194005887,
350
+ "grad_norm": 8.4375,
351
+ "kl": 0.0009809941984713078,
352
+ "learning_rate": 1.1764705882352942e-06,
353
+ "loss": 0.0,
354
+ "reward": 0.8125000596046448,
355
+ "reward_std": 0.3002312183380127,
356
+ "rewards/basic_format_reward_func": 0.0,
357
+ "rewards/code_execution_reward": 0.30000001192092896,
358
+ "rewards/code_quality_reward_func": 0.15000000596046448,
359
+ "rewards/reasoning_quality_reward_func": 0.25,
360
+ "rewards/xml_structure_reward_func": 0.11250000447034836,
361
+ "step": 22
362
+ },
363
+ {
364
+ "completion_length": 200.625,
365
+ "epoch": 0.006154669521006155,
366
+ "grad_norm": 10.375,
367
+ "kl": 0.0009175295126624405,
368
+ "learning_rate": 1.2299465240641713e-06,
369
+ "loss": 0.0,
370
+ "reward": 1.0,
371
+ "reward_std": 0.4392969608306885,
372
+ "rewards/basic_format_reward_func": 0.0,
373
+ "rewards/code_execution_reward": 0.40000003576278687,
374
+ "rewards/code_quality_reward_func": 0.26249998807907104,
375
+ "rewards/reasoning_quality_reward_func": 0.23749999701976776,
376
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
377
+ "step": 23
378
+ },
379
+ {
380
+ "completion_length": 139.375,
381
+ "epoch": 0.006422263848006422,
382
+ "grad_norm": 22.5,
383
+ "kl": 0.0015095111448317766,
384
+ "learning_rate": 1.2834224598930483e-06,
385
+ "loss": 0.0,
386
+ "reward": 0.800000011920929,
387
+ "reward_std": 0.4590751528739929,
388
+ "rewards/basic_format_reward_func": 0.0,
389
+ "rewards/code_execution_reward": 0.375,
390
+ "rewards/code_quality_reward_func": 0.11250000447034836,
391
+ "rewards/reasoning_quality_reward_func": 0.21250000596046448,
392
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
393
+ "step": 24
394
+ },
395
+ {
396
+ "completion_length": 50.25,
397
+ "epoch": 0.00668985817500669,
398
+ "grad_norm": 0.00201416015625,
399
+ "kl": 0.000902252912055701,
400
+ "learning_rate": 1.3368983957219254e-06,
401
+ "loss": 0.0,
402
+ "reward": 2.0,
403
+ "reward_std": 0.0,
404
+ "rewards/basic_format_reward_func": 0.0,
405
+ "rewards/code_execution_reward": 1.5,
406
+ "rewards/code_quality_reward_func": 0.25,
407
+ "rewards/reasoning_quality_reward_func": 0.15000000596046448,
408
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
409
+ "step": 25
410
+ },
411
+ {
412
+ "completion_length": 88.875,
413
+ "epoch": 0.006957452502006957,
414
+ "grad_norm": 15.6875,
415
+ "kl": 0.0012402876745909452,
416
+ "learning_rate": 1.3903743315508022e-06,
417
+ "loss": 0.0,
418
+ "reward": 1.4124999046325684,
419
+ "reward_std": 0.9211608171463013,
420
+ "rewards/basic_format_reward_func": 0.0,
421
+ "rewards/code_execution_reward": 0.9750000238418579,
422
+ "rewards/code_quality_reward_func": 0.15000000596046448,
423
+ "rewards/reasoning_quality_reward_func": 0.1875,
424
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
425
+ "step": 26
426
+ },
427
+ {
428
+ "completion_length": 202.5,
429
+ "epoch": 0.0072250468290072254,
430
+ "grad_norm": 9.9375,
431
+ "kl": 0.001193880569189787,
432
+ "learning_rate": 1.4438502673796793e-06,
433
+ "loss": 0.0,
434
+ "reward": 0.8125000596046448,
435
+ "reward_std": 0.2467355877161026,
436
+ "rewards/basic_format_reward_func": 0.0,
437
+ "rewards/code_execution_reward": 0.30000001192092896,
438
+ "rewards/code_quality_reward_func": 0.11250000447034836,
439
+ "rewards/reasoning_quality_reward_func": 0.30000001192092896,
440
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
441
+ "step": 27
442
+ },
443
+ {
444
+ "completion_length": 217.5,
445
+ "epoch": 0.007492641156007493,
446
+ "grad_norm": 6.65625,
447
+ "kl": 0.002084344858303666,
448
+ "learning_rate": 1.4973262032085562e-06,
449
+ "loss": 0.0,
450
+ "reward": 0.987500011920929,
451
+ "reward_std": 0.2808602452278137,
452
+ "rewards/basic_format_reward_func": 0.0,
453
+ "rewards/code_execution_reward": 0.5,
454
+ "rewards/code_quality_reward_func": 0.13750000298023224,
455
+ "rewards/reasoning_quality_reward_func": 0.25,
456
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
457
+ "step": 28
458
+ },
459
+ {
460
+ "completion_length": 144.375,
461
+ "epoch": 0.00776023548300776,
462
+ "grad_norm": 11.375,
463
+ "kl": 0.0022054852452129126,
464
+ "learning_rate": 1.5508021390374334e-06,
465
+ "loss": 0.0,
466
+ "reward": 0.9500000476837158,
467
+ "reward_std": 0.2717355787754059,
468
+ "rewards/basic_format_reward_func": 0.0,
469
+ "rewards/code_execution_reward": 0.5,
470
+ "rewards/code_quality_reward_func": 0.11249999701976776,
471
+ "rewards/reasoning_quality_reward_func": 0.23749999701976776,
472
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
473
+ "step": 29
474
+ },
475
+ {
476
+ "completion_length": 90.0,
477
+ "epoch": 0.008027829810008028,
478
+ "grad_norm": 142.0,
479
+ "kl": 0.0038667870685458183,
480
+ "learning_rate": 1.6042780748663103e-06,
481
+ "loss": 0.0,
482
+ "reward": 0.9500000476837158,
483
+ "reward_std": 0.5573897957801819,
484
+ "rewards/basic_format_reward_func": 0.0,
485
+ "rewards/code_execution_reward": 0.4750000238418579,
486
+ "rewards/code_quality_reward_func": 0.17500001192092896,
487
+ "rewards/reasoning_quality_reward_func": 0.19999998807907104,
488
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
489
+ "step": 30
490
+ },
491
+ {
492
+ "completion_length": 148.5,
493
+ "epoch": 0.008295424137008296,
494
+ "grad_norm": 10.4375,
495
+ "kl": 0.0013046960812062025,
496
+ "learning_rate": 1.6577540106951873e-06,
497
+ "loss": 0.0,
498
+ "reward": 1.8499999046325684,
499
+ "reward_std": 0.8840129375457764,
500
+ "rewards/basic_format_reward_func": 0.0,
501
+ "rewards/code_execution_reward": 1.2999999523162842,
502
+ "rewards/code_quality_reward_func": 0.23750001192092896,
503
+ "rewards/reasoning_quality_reward_func": 0.21249999105930328,
504
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
505
+ "step": 31
506
+ },
507
+ {
508
+ "completion_length": 165.875,
509
+ "epoch": 0.008563018464008563,
510
+ "grad_norm": 11.4375,
511
+ "kl": 0.00392666133120656,
512
+ "learning_rate": 1.7112299465240642e-06,
513
+ "loss": 0.0,
514
+ "reward": 0.8875000476837158,
515
+ "reward_std": 0.4786737263202667,
516
+ "rewards/basic_format_reward_func": 0.0,
517
+ "rewards/code_execution_reward": 0.4000000059604645,
518
+ "rewards/code_quality_reward_func": 0.125,
519
+ "rewards/reasoning_quality_reward_func": 0.26249998807907104,
520
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
521
+ "step": 32
522
+ },
523
+ {
524
+ "completion_length": 97.75,
525
+ "epoch": 0.008830612791008831,
526
+ "grad_norm": 14.125,
527
+ "kl": 0.0017194543033838272,
528
+ "learning_rate": 1.7647058823529414e-06,
529
+ "loss": 0.0,
530
+ "reward": 1.350000023841858,
531
+ "reward_std": 0.6012930870056152,
532
+ "rewards/basic_format_reward_func": 0.0,
533
+ "rewards/code_execution_reward": 0.8250000476837158,
534
+ "rewards/code_quality_reward_func": 0.21249999105930328,
535
+ "rewards/reasoning_quality_reward_func": 0.21249999105930328,
536
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
537
+ "step": 33
538
+ },
539
+ {
540
+ "completion_length": 65.125,
541
+ "epoch": 0.009098207118009098,
542
+ "grad_norm": 14.375,
543
+ "kl": 0.006373521871864796,
544
+ "learning_rate": 1.8181818181818183e-06,
545
+ "loss": 0.0,
546
+ "reward": 1.4249999523162842,
547
+ "reward_std": 0.30773499608039856,
548
+ "rewards/basic_format_reward_func": 0.0,
549
+ "rewards/code_execution_reward": 0.8999999761581421,
550
+ "rewards/code_quality_reward_func": 0.25,
551
+ "rewards/reasoning_quality_reward_func": 0.17499999701976776,
552
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
553
+ "step": 34
554
+ },
555
+ {
556
+ "completion_length": 166.125,
557
+ "epoch": 0.009365801445009366,
558
+ "grad_norm": 11.3125,
559
+ "kl": 0.00691020954400301,
560
+ "learning_rate": 1.8716577540106954e-06,
561
+ "loss": 0.0,
562
+ "reward": 1.0750000476837158,
563
+ "reward_std": 0.6594308614730835,
564
+ "rewards/basic_format_reward_func": 0.0,
565
+ "rewards/code_execution_reward": 0.625,
566
+ "rewards/code_quality_reward_func": 0.10000000149011612,
567
+ "rewards/reasoning_quality_reward_func": 0.25,
568
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
569
+ "step": 35
570
+ },
571
+ {
572
+ "completion_length": 93.625,
573
+ "epoch": 0.009633395772009633,
574
+ "grad_norm": 7.90625,
575
+ "kl": 0.005570035893470049,
576
+ "learning_rate": 1.9251336898395724e-06,
577
+ "loss": 0.0,
578
+ "reward": 1.5250000953674316,
579
+ "reward_std": 0.5560275316238403,
580
+ "rewards/basic_format_reward_func": 0.0,
581
+ "rewards/code_execution_reward": 0.9249999523162842,
582
+ "rewards/code_quality_reward_func": 0.23750001192092896,
583
+ "rewards/reasoning_quality_reward_func": 0.25,
584
+ "rewards/xml_structure_reward_func": 0.11249999701976776,
585
+ "step": 36
586
+ },
587
+ {
588
+ "completion_length": 228.25,
589
+ "epoch": 0.009900990099009901,
590
+ "grad_norm": 10.875,
591
+ "kl": 0.006528891623020172,
592
+ "learning_rate": 1.9786096256684497e-06,
593
+ "loss": 0.0,
594
+ "reward": 1.6499998569488525,
595
+ "reward_std": 0.4349328875541687,
596
+ "rewards/basic_format_reward_func": 0.0,
597
+ "rewards/code_execution_reward": 1.0750000476837158,
598
+ "rewards/code_quality_reward_func": 0.22499999403953552,
599
+ "rewards/reasoning_quality_reward_func": 0.25,
600
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
601
+ "step": 37
602
+ },
603
+ {
604
+ "completion_length": 92.75,
605
+ "epoch": 0.010168584426010168,
606
+ "grad_norm": 10.125,
607
+ "kl": 0.004976172000169754,
608
+ "learning_rate": 2.0320855614973265e-06,
609
+ "loss": 0.0,
610
+ "reward": 1.2625000476837158,
611
+ "reward_std": 0.7337645292282104,
612
+ "rewards/basic_format_reward_func": 0.0,
613
+ "rewards/code_execution_reward": 0.7250000238418579,
614
+ "rewards/code_quality_reward_func": 0.22500000894069672,
615
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
616
+ "rewards/xml_structure_reward_func": 0.11250000447034836,
617
+ "step": 38
618
+ },
619
+ {
620
+ "completion_length": 113.25,
621
+ "epoch": 0.010436178753010436,
622
+ "grad_norm": 13.6875,
623
+ "kl": 0.006215811241418123,
624
+ "learning_rate": 2.0855614973262034e-06,
625
+ "loss": 0.0,
626
+ "reward": 1.0125000476837158,
627
+ "reward_std": 0.4158177375793457,
628
+ "rewards/basic_format_reward_func": 0.0,
629
+ "rewards/code_execution_reward": 0.5,
630
+ "rewards/code_quality_reward_func": 0.1875,
631
+ "rewards/reasoning_quality_reward_func": 0.22499999403953552,
632
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
633
+ "step": 39
634
+ },
635
+ {
636
+ "completion_length": 134.25,
637
+ "epoch": 0.010703773080010704,
638
+ "grad_norm": 10.1875,
639
+ "kl": 0.002335732337087393,
640
+ "learning_rate": 2.1390374331550802e-06,
641
+ "loss": 0.0,
642
+ "reward": 1.600000023841858,
643
+ "reward_std": 0.05000003054738045,
644
+ "rewards/basic_format_reward_func": 0.0,
645
+ "rewards/code_execution_reward": 1.0,
646
+ "rewards/code_quality_reward_func": 0.2875000238418579,
647
+ "rewards/reasoning_quality_reward_func": 0.21249999105930328,
648
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
649
+ "step": 40
650
+ },
651
+ {
652
+ "completion_length": 64.0,
653
+ "epoch": 0.010971367407010971,
654
+ "grad_norm": 24.875,
655
+ "kl": 0.004402505233883858,
656
+ "learning_rate": 2.1925133689839575e-06,
657
+ "loss": 0.0,
658
+ "reward": 1.9249999523162842,
659
+ "reward_std": 0.6726604700088501,
660
+ "rewards/basic_format_reward_func": 0.0,
661
+ "rewards/code_execution_reward": 1.399999976158142,
662
+ "rewards/code_quality_reward_func": 0.23749999701976776,
663
+ "rewards/reasoning_quality_reward_func": 0.1875,
664
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
665
+ "step": 41
666
+ },
667
+ {
668
+ "completion_length": 102.75,
669
+ "epoch": 0.01123896173401124,
670
+ "grad_norm": 12.6875,
671
+ "kl": 0.008235585875809193,
672
+ "learning_rate": 2.2459893048128343e-06,
673
+ "loss": 0.0,
674
+ "reward": 1.1124999523162842,
675
+ "reward_std": 0.4286736845970154,
676
+ "rewards/basic_format_reward_func": 0.0,
677
+ "rewards/code_execution_reward": 0.6000000238418579,
678
+ "rewards/code_quality_reward_func": 0.20000000298023224,
679
+ "rewards/reasoning_quality_reward_func": 0.21250000596046448,
680
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
681
+ "step": 42
682
+ },
683
+ {
684
+ "completion_length": 98.25,
685
+ "epoch": 0.011506556061011506,
686
+ "grad_norm": 38.0,
687
+ "kl": 0.006902622990310192,
688
+ "learning_rate": 2.2994652406417116e-06,
689
+ "loss": 0.0,
690
+ "reward": 1.0750000476837158,
691
+ "reward_std": 0.520801305770874,
692
+ "rewards/basic_format_reward_func": 0.0,
693
+ "rewards/code_execution_reward": 0.5750000476837158,
694
+ "rewards/code_quality_reward_func": 0.17500001192092896,
695
+ "rewards/reasoning_quality_reward_func": 0.22500000894069672,
696
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
697
+ "step": 43
698
+ },
699
+ {
700
+ "completion_length": 158.75,
701
+ "epoch": 0.011774150388011774,
702
+ "grad_norm": 9.25,
703
+ "kl": 0.012133742682635784,
704
+ "learning_rate": 2.3529411764705885e-06,
705
+ "loss": 0.0,
706
+ "reward": 1.225000023841858,
707
+ "reward_std": 0.4723786413669586,
708
+ "rewards/basic_format_reward_func": 0.0,
709
+ "rewards/code_execution_reward": 0.699999988079071,
710
+ "rewards/code_quality_reward_func": 0.17499999701976776,
711
+ "rewards/reasoning_quality_reward_func": 0.25,
712
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
713
+ "step": 44
714
+ },
715
+ {
716
+ "completion_length": 119.25,
717
+ "epoch": 0.012041744715012041,
718
+ "grad_norm": 19.375,
719
+ "kl": 0.007604178972542286,
720
+ "learning_rate": 2.4064171122994653e-06,
721
+ "loss": 0.0,
722
+ "reward": 1.0125000476837158,
723
+ "reward_std": 0.5095207691192627,
724
+ "rewards/basic_format_reward_func": 0.0,
725
+ "rewards/code_execution_reward": 0.5,
726
+ "rewards/code_quality_reward_func": 0.1875,
727
+ "rewards/reasoning_quality_reward_func": 0.22500000894069672,
728
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
729
+ "step": 45
730
+ },
731
+ {
732
+ "completion_length": 128.625,
733
+ "epoch": 0.01230933904201231,
734
+ "grad_norm": 10.375,
735
+ "kl": 0.007734506856650114,
736
+ "learning_rate": 2.4598930481283426e-06,
737
+ "loss": 0.0,
738
+ "reward": 1.0125000476837158,
739
+ "reward_std": 0.4967355728149414,
740
+ "rewards/basic_format_reward_func": 0.0,
741
+ "rewards/code_execution_reward": 0.5,
742
+ "rewards/code_quality_reward_func": 0.22500000894069672,
743
+ "rewards/reasoning_quality_reward_func": 0.1875,
744
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
745
+ "step": 46
746
+ },
747
+ {
748
+ "completion_length": 100.25,
749
+ "epoch": 0.012576933369012578,
750
+ "grad_norm": 6.5,
751
+ "kl": 0.005467721726745367,
752
+ "learning_rate": 2.5133689839572194e-06,
753
+ "loss": 0.0,
754
+ "reward": 1.7374999523162842,
755
+ "reward_std": 0.2428133189678192,
756
+ "rewards/basic_format_reward_func": 0.0,
757
+ "rewards/code_execution_reward": 1.125,
758
+ "rewards/code_quality_reward_func": 0.2875000238418579,
759
+ "rewards/reasoning_quality_reward_func": 0.22499999403953552,
760
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
761
+ "step": 47
762
+ },
763
+ {
764
+ "completion_length": 122.875,
765
+ "epoch": 0.012844527696012844,
766
+ "grad_norm": 8.875,
767
+ "kl": 0.010364975780248642,
768
+ "learning_rate": 2.5668449197860967e-06,
769
+ "loss": 0.0,
770
+ "reward": 1.2000000476837158,
771
+ "reward_std": 0.4000000059604645,
772
+ "rewards/basic_format_reward_func": 0.0,
773
+ "rewards/code_execution_reward": 0.6000000238418579,
774
+ "rewards/code_quality_reward_func": 0.2750000059604645,
775
+ "rewards/reasoning_quality_reward_func": 0.22499999403953552,
776
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
777
+ "step": 48
778
+ },
779
+ {
780
+ "completion_length": 91.625,
781
+ "epoch": 0.013112122023013113,
782
+ "grad_norm": 11.1875,
783
+ "kl": 0.007891715504229069,
784
+ "learning_rate": 2.6203208556149735e-06,
785
+ "loss": 0.0,
786
+ "reward": 1.3624999523162842,
787
+ "reward_std": 0.5999225974082947,
788
+ "rewards/basic_format_reward_func": 0.0,
789
+ "rewards/code_execution_reward": 0.8250000476837158,
790
+ "rewards/code_quality_reward_func": 0.25,
791
+ "rewards/reasoning_quality_reward_func": 0.1875,
792
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
793
+ "step": 49
794
+ },
795
+ {
796
+ "completion_length": 98.875,
797
+ "epoch": 0.01337971635001338,
798
+ "grad_norm": 9.8125,
799
+ "kl": 0.011884085834026337,
800
+ "learning_rate": 2.673796791443851e-06,
801
+ "loss": 0.0,
802
+ "reward": 1.649999976158142,
803
+ "reward_std": 0.5500816106796265,
804
+ "rewards/basic_format_reward_func": 0.0,
805
+ "rewards/code_execution_reward": 1.0499999523162842,
806
+ "rewards/code_quality_reward_func": 0.2750000059604645,
807
+ "rewards/reasoning_quality_reward_func": 0.22499999403953552,
808
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
809
+ "step": 50
810
+ },
811
+ {
812
+ "completion_length": 167.875,
813
+ "epoch": 0.013647310677013648,
814
+ "grad_norm": 7.78125,
815
+ "kl": 0.024216562509536743,
816
+ "learning_rate": 2.7272727272727272e-06,
817
+ "loss": 0.0,
818
+ "reward": 1.5499999523162842,
819
+ "reward_std": 0.6633045077323914,
820
+ "rewards/basic_format_reward_func": 0.0,
821
+ "rewards/code_execution_reward": 0.9500000476837158,
822
+ "rewards/code_quality_reward_func": 0.23749999701976776,
823
+ "rewards/reasoning_quality_reward_func": 0.26249998807907104,
824
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
825
+ "step": 51
826
+ },
827
+ {
828
+ "completion_length": 103.75,
829
+ "epoch": 0.013914905004013914,
830
+ "grad_norm": 13.6875,
831
+ "kl": 0.008454602211713791,
832
+ "learning_rate": 2.7807486631016045e-06,
833
+ "loss": 0.0,
834
+ "reward": 1.4874999523162842,
835
+ "reward_std": 0.6402456760406494,
836
+ "rewards/basic_format_reward_func": 0.0,
837
+ "rewards/code_execution_reward": 0.925000011920929,
838
+ "rewards/code_quality_reward_func": 0.23750001192092896,
839
+ "rewards/reasoning_quality_reward_func": 0.22499999403953552,
840
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
841
+ "step": 52
842
+ },
843
+ {
844
+ "completion_length": 56.125,
845
+ "epoch": 0.014182499331014183,
846
+ "grad_norm": 16.75,
847
+ "kl": 0.01777804270386696,
848
+ "learning_rate": 2.8342245989304818e-06,
849
+ "loss": 0.0,
850
+ "reward": 1.7374999523162842,
851
+ "reward_std": 0.5997022390365601,
852
+ "rewards/basic_format_reward_func": 0.0,
853
+ "rewards/code_execution_reward": 1.149999976158142,
854
+ "rewards/code_quality_reward_func": 0.30000001192092896,
855
+ "rewards/reasoning_quality_reward_func": 0.1875,
856
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
857
+ "step": 53
858
+ },
859
+ {
860
+ "completion_length": 112.25,
861
+ "epoch": 0.014450093658014451,
862
+ "grad_norm": 11.3125,
863
+ "kl": 0.011312819086015224,
864
+ "learning_rate": 2.8877005347593586e-06,
865
+ "loss": 0.0,
866
+ "reward": 1.0625,
867
+ "reward_std": 0.4679570198059082,
868
+ "rewards/basic_format_reward_func": 0.0,
869
+ "rewards/code_execution_reward": 0.5,
870
+ "rewards/code_quality_reward_func": 0.23749999701976776,
871
+ "rewards/reasoning_quality_reward_func": 0.22499999403953552,
872
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
873
+ "step": 54
874
+ },
875
+ {
876
+ "completion_length": 92.375,
877
+ "epoch": 0.014717687985014717,
878
+ "grad_norm": 5.71875,
879
+ "kl": 0.021408643573522568,
880
+ "learning_rate": 2.9411764705882355e-06,
881
+ "loss": 0.0,
882
+ "reward": 1.4874999523162842,
883
+ "reward_std": 0.22499997913837433,
884
+ "rewards/basic_format_reward_func": 0.0,
885
+ "rewards/code_execution_reward": 0.8999999761581421,
886
+ "rewards/code_quality_reward_func": 0.2750000059604645,
887
+ "rewards/reasoning_quality_reward_func": 0.21250000596046448,
888
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
889
+ "step": 55
890
+ },
891
+ {
892
+ "completion_length": 107.625,
893
+ "epoch": 0.014985282312014986,
894
+ "grad_norm": 7.1875,
895
+ "kl": 0.015812650322914124,
896
+ "learning_rate": 2.9946524064171123e-06,
897
+ "loss": 0.0,
898
+ "reward": 1.7374999523162842,
899
+ "reward_std": 0.2428133189678192,
900
+ "rewards/basic_format_reward_func": 0.0,
901
+ "rewards/code_execution_reward": 1.125,
902
+ "rewards/code_quality_reward_func": 0.3125,
903
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
904
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
905
+ "step": 56
906
+ },
907
+ {
908
+ "completion_length": 83.125,
909
+ "epoch": 0.015252876639015252,
910
+ "grad_norm": 6.71875,
911
+ "kl": 0.009970920160412788,
912
+ "learning_rate": 3.0481283422459896e-06,
913
+ "loss": 0.0,
914
+ "reward": 1.1875,
915
+ "reward_std": 0.5055937170982361,
916
+ "rewards/basic_format_reward_func": 0.0,
917
+ "rewards/code_execution_reward": 0.6499999761581421,
918
+ "rewards/code_quality_reward_func": 0.22499999403953552,
919
+ "rewards/reasoning_quality_reward_func": 0.21250000596046448,
920
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
921
+ "step": 57
922
+ },
923
+ {
924
+ "completion_length": 124.375,
925
+ "epoch": 0.01552047096601552,
926
+ "grad_norm": 7.15625,
927
+ "kl": 0.015437191352248192,
928
+ "learning_rate": 3.101604278074867e-06,
929
+ "loss": 0.0,
930
+ "reward": 1.5250000953674316,
931
+ "reward_std": 0.5685557126998901,
932
+ "rewards/basic_format_reward_func": 0.0,
933
+ "rewards/code_execution_reward": 0.925000011920929,
934
+ "rewards/code_quality_reward_func": 0.30000001192092896,
935
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
936
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
937
+ "step": 58
938
+ },
939
+ {
940
+ "completion_length": 77.125,
941
+ "epoch": 0.01578806529301579,
942
+ "grad_norm": 10.25,
943
+ "kl": 0.027593251317739487,
944
+ "learning_rate": 3.1550802139037433e-06,
945
+ "loss": 0.0,
946
+ "reward": 2.0874998569488525,
947
+ "reward_std": 0.5924705266952515,
948
+ "rewards/basic_format_reward_func": 0.0,
949
+ "rewards/code_execution_reward": 1.5,
950
+ "rewards/code_quality_reward_func": 0.2875000238418579,
951
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
952
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
953
+ "step": 59
954
+ },
955
+ {
956
+ "completion_length": 126.125,
957
+ "epoch": 0.016055659620016056,
958
+ "grad_norm": 8.5625,
959
+ "kl": 0.01432301290333271,
960
+ "learning_rate": 3.2085561497326205e-06,
961
+ "loss": 0.0,
962
+ "reward": 1.0625,
963
+ "reward_std": 0.24787135422229767,
964
+ "rewards/basic_format_reward_func": 0.0,
965
+ "rewards/code_execution_reward": 0.5,
966
+ "rewards/code_quality_reward_func": 0.26250001788139343,
967
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
968
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
969
+ "step": 60
970
+ },
971
+ {
972
+ "completion_length": 119.875,
973
+ "epoch": 0.016323253947016322,
974
+ "grad_norm": 8.9375,
975
+ "kl": 0.024657145142555237,
976
+ "learning_rate": 3.262032085561498e-06,
977
+ "loss": 0.0,
978
+ "reward": 1.6375000476837158,
979
+ "reward_std": 0.6750324368476868,
980
+ "rewards/basic_format_reward_func": 0.0,
981
+ "rewards/code_execution_reward": 1.0749999284744263,
982
+ "rewards/code_quality_reward_func": 0.23749999701976776,
983
+ "rewards/reasoning_quality_reward_func": 0.22500000894069672,
984
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
985
+ "step": 61
986
+ },
987
+ {
988
+ "completion_length": 128.5,
989
+ "epoch": 0.016590848274016592,
990
+ "grad_norm": 6.28125,
991
+ "kl": 0.011644430458545685,
992
+ "learning_rate": 3.3155080213903747e-06,
993
+ "loss": 0.0,
994
+ "reward": 1.625,
995
+ "reward_std": 0.3685557246208191,
996
+ "rewards/basic_format_reward_func": 0.0,
997
+ "rewards/code_execution_reward": 1.024999976158142,
998
+ "rewards/code_quality_reward_func": 0.30000001192092896,
999
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1000
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1001
+ "step": 62
1002
+ },
1003
+ {
1004
+ "completion_length": 103.125,
1005
+ "epoch": 0.01685844260101686,
1006
+ "grad_norm": 7.34375,
1007
+ "kl": 0.013543471693992615,
1008
+ "learning_rate": 3.368983957219252e-06,
1009
+ "loss": 0.0,
1010
+ "reward": 1.4875000715255737,
1011
+ "reward_std": 0.2250000238418579,
1012
+ "rewards/basic_format_reward_func": 0.0,
1013
+ "rewards/code_execution_reward": 0.8999999761581421,
1014
+ "rewards/code_quality_reward_func": 0.2875000238418579,
1015
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1016
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1017
+ "step": 63
1018
+ },
1019
+ {
1020
+ "completion_length": 313.125,
1021
+ "epoch": 0.017126036928017126,
1022
+ "grad_norm": 10.1875,
1023
+ "kl": 0.03165314346551895,
1024
+ "learning_rate": 3.4224598930481284e-06,
1025
+ "loss": 0.0,
1026
+ "reward": 1.6124999523162842,
1027
+ "reward_std": 0.6787031888961792,
1028
+ "rewards/basic_format_reward_func": 0.0,
1029
+ "rewards/code_execution_reward": 1.0499999523162842,
1030
+ "rewards/code_quality_reward_func": 0.2750000059604645,
1031
+ "rewards/reasoning_quality_reward_func": 0.1875,
1032
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1033
+ "step": 64
1034
+ },
1035
+ {
1036
+ "completion_length": 115.25,
1037
+ "epoch": 0.017393631255017392,
1038
+ "grad_norm": 9.5625,
1039
+ "kl": 0.017762087285518646,
1040
+ "learning_rate": 3.4759358288770056e-06,
1041
+ "loss": 0.0,
1042
+ "reward": 1.774999976158142,
1043
+ "reward_std": 0.4809401035308838,
1044
+ "rewards/basic_format_reward_func": 0.0,
1045
+ "rewards/code_execution_reward": 1.1749999523162842,
1046
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1047
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1048
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1049
+ "step": 65
1050
+ },
1051
+ {
1052
+ "completion_length": 53.125,
1053
+ "epoch": 0.017661225582017662,
1054
+ "grad_norm": 25.125,
1055
+ "kl": 0.025264494121074677,
1056
+ "learning_rate": 3.529411764705883e-06,
1057
+ "loss": 0.0,
1058
+ "reward": 1.5499999523162842,
1059
+ "reward_std": 0.09999998658895493,
1060
+ "rewards/basic_format_reward_func": 0.0,
1061
+ "rewards/code_execution_reward": 1.0,
1062
+ "rewards/code_quality_reward_func": 0.2750000059604645,
1063
+ "rewards/reasoning_quality_reward_func": 0.17499999701976776,
1064
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1065
+ "step": 66
1066
+ },
1067
+ {
1068
+ "completion_length": 85.0,
1069
+ "epoch": 0.01792881990901793,
1070
+ "grad_norm": 0.00439453125,
1071
+ "kl": 0.0155054721981287,
1072
+ "learning_rate": 3.5828877005347597e-06,
1073
+ "loss": 0.0,
1074
+ "reward": 2.049999952316284,
1075
+ "reward_std": 0.0,
1076
+ "rewards/basic_format_reward_func": 0.0,
1077
+ "rewards/code_execution_reward": 1.5,
1078
+ "rewards/code_quality_reward_func": 0.25,
1079
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1080
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1081
+ "step": 67
1082
+ },
1083
+ {
1084
+ "completion_length": 97.625,
1085
+ "epoch": 0.018196414236018196,
1086
+ "grad_norm": 9.875,
1087
+ "kl": 0.02109808288514614,
1088
+ "learning_rate": 3.6363636363636366e-06,
1089
+ "loss": 0.0,
1090
+ "reward": 1.625,
1091
+ "reward_std": 0.44999995827674866,
1092
+ "rewards/basic_format_reward_func": 0.0,
1093
+ "rewards/code_execution_reward": 1.024999976158142,
1094
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1095
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1096
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1097
+ "step": 68
1098
+ },
1099
+ {
1100
+ "completion_length": 96.75,
1101
+ "epoch": 0.018464008563018466,
1102
+ "grad_norm": 10.0625,
1103
+ "kl": 0.014365495182573795,
1104
+ "learning_rate": 3.6898395721925134e-06,
1105
+ "loss": 0.0,
1106
+ "reward": 1.8874999284744263,
1107
+ "reward_std": 0.45966240763664246,
1108
+ "rewards/basic_format_reward_func": 0.0,
1109
+ "rewards/code_execution_reward": 1.274999976158142,
1110
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1111
+ "rewards/reasoning_quality_reward_func": 0.21249999105930328,
1112
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1113
+ "step": 69
1114
+ },
1115
+ {
1116
+ "completion_length": 63.5,
1117
+ "epoch": 0.018731602890018732,
1118
+ "grad_norm": 8.8125,
1119
+ "kl": 0.015685059130191803,
1120
+ "learning_rate": 3.7433155080213907e-06,
1121
+ "loss": 0.0,
1122
+ "reward": 2.012500047683716,
1123
+ "reward_std": 0.22499997913837433,
1124
+ "rewards/basic_format_reward_func": 0.0,
1125
+ "rewards/code_execution_reward": 1.399999976158142,
1126
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1127
+ "rewards/reasoning_quality_reward_func": 0.21249999105930328,
1128
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1129
+ "step": 70
1130
+ },
1131
+ {
1132
+ "completion_length": 79.5,
1133
+ "epoch": 0.018999197217019,
1134
+ "grad_norm": 6.5625,
1135
+ "kl": 0.025880061089992523,
1136
+ "learning_rate": 3.796791443850268e-06,
1137
+ "loss": 0.0,
1138
+ "reward": 2.2249999046325684,
1139
+ "reward_std": 0.24999995529651642,
1140
+ "rewards/basic_format_reward_func": 0.0,
1141
+ "rewards/code_execution_reward": 1.625,
1142
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1143
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1144
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1145
+ "step": 71
1146
+ },
1147
+ {
1148
+ "completion_length": 71.75,
1149
+ "epoch": 0.019266791544019266,
1150
+ "grad_norm": 8.75,
1151
+ "kl": 0.026266396045684814,
1152
+ "learning_rate": 3.850267379679145e-06,
1153
+ "loss": 0.0,
1154
+ "reward": 1.5,
1155
+ "reward_std": 0.20000000298023224,
1156
+ "rewards/basic_format_reward_func": 0.0,
1157
+ "rewards/code_execution_reward": 0.8999999761581421,
1158
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1159
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1160
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1161
+ "step": 72
1162
+ },
1163
+ {
1164
+ "completion_length": 123.5,
1165
+ "epoch": 0.019534385871019536,
1166
+ "grad_norm": 7.65625,
1167
+ "kl": 0.040041256695985794,
1168
+ "learning_rate": 3.903743315508022e-06,
1169
+ "loss": 0.0,
1170
+ "reward": 1.1749999523162842,
1171
+ "reward_std": 0.6809400916099548,
1172
+ "rewards/basic_format_reward_func": 0.0,
1173
+ "rewards/code_execution_reward": 0.625,
1174
+ "rewards/code_quality_reward_func": 0.25,
1175
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1176
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1177
+ "step": 73
1178
+ },
1179
+ {
1180
+ "completion_length": 66.125,
1181
+ "epoch": 0.019801980198019802,
1182
+ "grad_norm": 14.1875,
1183
+ "kl": 0.015473383478820324,
1184
+ "learning_rate": 3.957219251336899e-06,
1185
+ "loss": 0.0,
1186
+ "reward": 1.9499999284744263,
1187
+ "reward_std": 0.2788674831390381,
1188
+ "rewards/basic_format_reward_func": 0.0,
1189
+ "rewards/code_execution_reward": 1.375,
1190
+ "rewards/code_quality_reward_func": 0.2750000059604645,
1191
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1192
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1193
+ "step": 74
1194
+ },
1195
+ {
1196
+ "completion_length": 133.625,
1197
+ "epoch": 0.02006957452502007,
1198
+ "grad_norm": 7.9375,
1199
+ "kl": 0.01520618237555027,
1200
+ "learning_rate": 4.010695187165775e-06,
1201
+ "loss": 0.0,
1202
+ "reward": 1.4625000953674316,
1203
+ "reward_std": 0.21746647357940674,
1204
+ "rewards/basic_format_reward_func": 0.0,
1205
+ "rewards/code_execution_reward": 0.800000011920929,
1206
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1207
+ "rewards/reasoning_quality_reward_func": 0.26249998807907104,
1208
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1209
+ "step": 75
1210
+ },
1211
+ {
1212
+ "completion_length": 99.125,
1213
+ "epoch": 0.020337168852020335,
1214
+ "grad_norm": 7.0,
1215
+ "kl": 0.021426944062113762,
1216
+ "learning_rate": 4.064171122994653e-06,
1217
+ "loss": 0.0,
1218
+ "reward": 1.625,
1219
+ "reward_std": 0.49074774980545044,
1220
+ "rewards/basic_format_reward_func": 0.0,
1221
+ "rewards/code_execution_reward": 1.0499999523162842,
1222
+ "rewards/code_quality_reward_func": 0.2875000238418579,
1223
+ "rewards/reasoning_quality_reward_func": 0.1875,
1224
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1225
+ "step": 76
1226
+ },
1227
+ {
1228
+ "completion_length": 121.375,
1229
+ "epoch": 0.020604763179020606,
1230
+ "grad_norm": 3.828125,
1231
+ "kl": 0.01213662326335907,
1232
+ "learning_rate": 4.11764705882353e-06,
1233
+ "loss": 0.0,
1234
+ "reward": 1.75,
1235
+ "reward_std": 0.4358898997306824,
1236
+ "rewards/basic_format_reward_func": 0.0,
1237
+ "rewards/code_execution_reward": 1.149999976158142,
1238
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1239
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1240
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1241
+ "step": 77
1242
+ },
1243
+ {
1244
+ "completion_length": 55.75,
1245
+ "epoch": 0.020872357506020872,
1246
+ "grad_norm": 11.25,
1247
+ "kl": 0.042267389595508575,
1248
+ "learning_rate": 4.171122994652407e-06,
1249
+ "loss": 0.0,
1250
+ "reward": 1.5374999046325684,
1251
+ "reward_std": 0.05386751890182495,
1252
+ "rewards/basic_format_reward_func": 0.0,
1253
+ "rewards/code_execution_reward": 1.0,
1254
+ "rewards/code_quality_reward_func": 0.2875000238418579,
1255
+ "rewards/reasoning_quality_reward_func": 0.15000000596046448,
1256
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1257
+ "step": 78
1258
+ },
1259
+ {
1260
+ "completion_length": 75.5,
1261
+ "epoch": 0.02113995183302114,
1262
+ "grad_norm": 13.5625,
1263
+ "kl": 0.04684869945049286,
1264
+ "learning_rate": 4.224598930481284e-06,
1265
+ "loss": 0.0,
1266
+ "reward": 1.5250000953674316,
1267
+ "reward_std": 0.6907477378845215,
1268
+ "rewards/basic_format_reward_func": 0.0,
1269
+ "rewards/code_execution_reward": 0.9500000476837158,
1270
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1271
+ "rewards/reasoning_quality_reward_func": 0.17499999701976776,
1272
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1273
+ "step": 79
1274
+ },
1275
+ {
1276
+ "completion_length": 68.0,
1277
+ "epoch": 0.02140754616002141,
1278
+ "grad_norm": 11.0,
1279
+ "kl": 0.05405609309673309,
1280
+ "learning_rate": 4.2780748663101604e-06,
1281
+ "loss": 0.0001,
1282
+ "reward": 1.600000023841858,
1283
+ "reward_std": 0.07886753231287003,
1284
+ "rewards/basic_format_reward_func": 0.0,
1285
+ "rewards/code_execution_reward": 1.0,
1286
+ "rewards/code_quality_reward_func": 0.3125,
1287
+ "rewards/reasoning_quality_reward_func": 0.1875,
1288
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1289
+ "step": 80
1290
+ },
1291
+ {
1292
+ "completion_length": 76.875,
1293
+ "epoch": 0.021675140487021675,
1294
+ "grad_norm": 6.0625,
1295
+ "kl": 0.020545657724142075,
1296
+ "learning_rate": 4.331550802139038e-06,
1297
+ "loss": 0.0,
1298
+ "reward": 1.6624999046325684,
1299
+ "reward_std": 0.25940635800361633,
1300
+ "rewards/basic_format_reward_func": 0.0,
1301
+ "rewards/code_execution_reward": 1.125,
1302
+ "rewards/code_quality_reward_func": 0.25,
1303
+ "rewards/reasoning_quality_reward_func": 0.1875,
1304
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1305
+ "step": 81
1306
+ },
1307
+ {
1308
+ "completion_length": 52.0,
1309
+ "epoch": 0.021942734814021942,
1310
+ "grad_norm": 13.3125,
1311
+ "kl": 0.04687309265136719,
1312
+ "learning_rate": 4.385026737967915e-06,
1313
+ "loss": 0.0,
1314
+ "reward": 1.9499999284744263,
1315
+ "reward_std": 0.5563814043998718,
1316
+ "rewards/basic_format_reward_func": 0.0,
1317
+ "rewards/code_execution_reward": 1.375,
1318
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1319
+ "rewards/reasoning_quality_reward_func": 0.17500001192092896,
1320
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1321
+ "step": 82
1322
+ },
1323
+ {
1324
+ "completion_length": 83.5,
1325
+ "epoch": 0.02221032914102221,
1326
+ "grad_norm": 8.5,
1327
+ "kl": 0.020440079271793365,
1328
+ "learning_rate": 4.438502673796792e-06,
1329
+ "loss": 0.0,
1330
+ "reward": 1.625,
1331
+ "reward_std": 0.7579997181892395,
1332
+ "rewards/basic_format_reward_func": 0.0,
1333
+ "rewards/code_execution_reward": 1.0499999523162842,
1334
+ "rewards/code_quality_reward_func": 0.2750000059604645,
1335
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1336
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1337
+ "step": 83
1338
+ },
1339
+ {
1340
+ "completion_length": 71.5,
1341
+ "epoch": 0.02247792346802248,
1342
+ "grad_norm": 9.6875,
1343
+ "kl": 0.0274663046002388,
1344
+ "learning_rate": 4.491978609625669e-06,
1345
+ "loss": 0.0,
1346
+ "reward": 1.5499999523162842,
1347
+ "reward_std": 0.6668300032615662,
1348
+ "rewards/basic_format_reward_func": 0.0,
1349
+ "rewards/code_execution_reward": 0.949999988079071,
1350
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1351
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1352
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1353
+ "step": 84
1354
+ },
1355
+ {
1356
+ "completion_length": 79.5,
1357
+ "epoch": 0.022745517795022745,
1358
+ "grad_norm": 5.3125,
1359
+ "kl": 0.02609388716518879,
1360
+ "learning_rate": 4.5454545454545455e-06,
1361
+ "loss": 0.0,
1362
+ "reward": 1.7249999046325684,
1363
+ "reward_std": 0.24999995529651642,
1364
+ "rewards/basic_format_reward_func": 0.0,
1365
+ "rewards/code_execution_reward": 1.125,
1366
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1367
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1368
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1369
+ "step": 85
1370
+ },
1371
+ {
1372
+ "completion_length": 104.75,
1373
+ "epoch": 0.023013112122023012,
1374
+ "grad_norm": 0.0023193359375,
1375
+ "kl": 0.018861236050724983,
1376
+ "learning_rate": 4.598930481283423e-06,
1377
+ "loss": 0.0,
1378
+ "reward": 1.5499999523162842,
1379
+ "reward_std": 0.0,
1380
+ "rewards/basic_format_reward_func": 0.0,
1381
+ "rewards/code_execution_reward": 1.0,
1382
+ "rewards/code_quality_reward_func": 0.25,
1383
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1384
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1385
+ "step": 86
1386
+ },
1387
+ {
1388
+ "completion_length": 53.125,
1389
+ "epoch": 0.023280706449023282,
1390
+ "grad_norm": 12.8125,
1391
+ "kl": 0.04781404882669449,
1392
+ "learning_rate": 4.6524064171123e-06,
1393
+ "loss": 0.0,
1394
+ "reward": 1.75,
1395
+ "reward_std": 0.6354056596755981,
1396
+ "rewards/basic_format_reward_func": 0.0,
1397
+ "rewards/code_execution_reward": 1.1749999523162842,
1398
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1399
+ "rewards/reasoning_quality_reward_func": 0.17499999701976776,
1400
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1401
+ "step": 87
1402
+ },
1403
+ {
1404
+ "completion_length": 117.375,
1405
+ "epoch": 0.02354830077602355,
1406
+ "grad_norm": 6.875,
1407
+ "kl": 0.012227240018546581,
1408
+ "learning_rate": 4.705882352941177e-06,
1409
+ "loss": 0.0,
1410
+ "reward": 1.5,
1411
+ "reward_std": 0.20000000298023224,
1412
+ "rewards/basic_format_reward_func": 0.0,
1413
+ "rewards/code_execution_reward": 0.8999999761581421,
1414
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1415
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1416
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1417
+ "step": 88
1418
+ },
1419
+ {
1420
+ "completion_length": 54.625,
1421
+ "epoch": 0.023815895103023815,
1422
+ "grad_norm": 11.8125,
1423
+ "kl": 0.05620575696229935,
1424
+ "learning_rate": 4.759358288770054e-06,
1425
+ "loss": 0.0001,
1426
+ "reward": 1.475000023841858,
1427
+ "reward_std": 0.21811051666736603,
1428
+ "rewards/basic_format_reward_func": 0.0,
1429
+ "rewards/code_execution_reward": 0.8999999761581421,
1430
+ "rewards/code_quality_reward_func": 0.2875000238418579,
1431
+ "rewards/reasoning_quality_reward_func": 0.1875,
1432
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1433
+ "step": 89
1434
+ },
1435
+ {
1436
+ "completion_length": 65.0,
1437
+ "epoch": 0.024083489430024082,
1438
+ "grad_norm": 0.01141357421875,
1439
+ "kl": 0.04672177881002426,
1440
+ "learning_rate": 4.812834224598931e-06,
1441
+ "loss": 0.0,
1442
+ "reward": 1.600000023841858,
1443
+ "reward_std": 0.0,
1444
+ "rewards/basic_format_reward_func": 0.0,
1445
+ "rewards/code_execution_reward": 1.0,
1446
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1447
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1448
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1449
+ "step": 90
1450
+ },
1451
+ {
1452
+ "completion_length": 94.5,
1453
+ "epoch": 0.024351083757024352,
1454
+ "grad_norm": 10.4375,
1455
+ "kl": 0.03195090591907501,
1456
+ "learning_rate": 4.866310160427808e-06,
1457
+ "loss": 0.0,
1458
+ "reward": 1.2750000953674316,
1459
+ "reward_std": 0.43933194875717163,
1460
+ "rewards/basic_format_reward_func": 0.0,
1461
+ "rewards/code_execution_reward": 0.7000000476837158,
1462
+ "rewards/code_quality_reward_func": 0.2750000059604645,
1463
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1464
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1465
+ "step": 91
1466
+ },
1467
+ {
1468
+ "completion_length": 108.375,
1469
+ "epoch": 0.02461867808402462,
1470
+ "grad_norm": 10.125,
1471
+ "kl": 0.02047823742032051,
1472
+ "learning_rate": 4.919786096256685e-06,
1473
+ "loss": 0.0,
1474
+ "reward": 1.337499976158142,
1475
+ "reward_std": 0.4249999523162842,
1476
+ "rewards/basic_format_reward_func": 0.0,
1477
+ "rewards/code_execution_reward": 0.800000011920929,
1478
+ "rewards/code_quality_reward_func": 0.23749999701976776,
1479
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1480
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1481
+ "step": 92
1482
+ },
1483
+ {
1484
+ "completion_length": 59.25,
1485
+ "epoch": 0.024886272411024885,
1486
+ "grad_norm": 6.09375,
1487
+ "kl": 0.033340681344270706,
1488
+ "learning_rate": 4.973262032085562e-06,
1489
+ "loss": 0.0,
1490
+ "reward": 1.5125000476837158,
1491
+ "reward_std": 0.02499997615814209,
1492
+ "rewards/basic_format_reward_func": 0.0,
1493
+ "rewards/code_execution_reward": 1.0,
1494
+ "rewards/code_quality_reward_func": 0.25,
1495
+ "rewards/reasoning_quality_reward_func": 0.16250000894069672,
1496
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1497
+ "step": 93
1498
+ },
1499
+ {
1500
+ "completion_length": 72.125,
1501
+ "epoch": 0.025153866738025155,
1502
+ "grad_norm": 12.9375,
1503
+ "kl": 0.0372508242726326,
1504
+ "learning_rate": 5.026737967914439e-06,
1505
+ "loss": 0.0,
1506
+ "reward": 1.712499976158142,
1507
+ "reward_std": 0.5224937200546265,
1508
+ "rewards/basic_format_reward_func": 0.0,
1509
+ "rewards/code_execution_reward": 1.149999976158142,
1510
+ "rewards/code_quality_reward_func": 0.26249998807907104,
1511
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1512
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1513
+ "step": 94
1514
+ },
1515
+ {
1516
+ "completion_length": 82.875,
1517
+ "epoch": 0.025421461065025422,
1518
+ "grad_norm": 15.75,
1519
+ "kl": 0.04930912330746651,
1520
+ "learning_rate": 5.0802139037433165e-06,
1521
+ "loss": 0.0,
1522
+ "reward": 1.475000023841858,
1523
+ "reward_std": 0.25,
1524
+ "rewards/basic_format_reward_func": 0.0,
1525
+ "rewards/code_execution_reward": 0.8999999761581421,
1526
+ "rewards/code_quality_reward_func": 0.2750000059604645,
1527
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1528
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1529
+ "step": 95
1530
+ },
1531
+ {
1532
+ "completion_length": 84.25,
1533
+ "epoch": 0.02568905539202569,
1534
+ "grad_norm": 5.28125,
1535
+ "kl": 0.034752584993839264,
1536
+ "learning_rate": 5.133689839572193e-06,
1537
+ "loss": 0.0,
1538
+ "reward": 2.0749998092651367,
1539
+ "reward_std": 0.028867486864328384,
1540
+ "rewards/basic_format_reward_func": 0.0,
1541
+ "rewards/code_execution_reward": 1.5,
1542
+ "rewards/code_quality_reward_func": 0.25,
1543
+ "rewards/reasoning_quality_reward_func": 0.22499999403953552,
1544
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1545
+ "step": 96
1546
+ },
1547
+ {
1548
+ "completion_length": 87.5,
1549
+ "epoch": 0.025956649719025955,
1550
+ "grad_norm": 10.5,
1551
+ "kl": 0.01500009186565876,
1552
+ "learning_rate": 5.187165775401069e-06,
1553
+ "loss": 0.0,
1554
+ "reward": 1.5999999046325684,
1555
+ "reward_std": 0.43484222888946533,
1556
+ "rewards/basic_format_reward_func": 0.0,
1557
+ "rewards/code_execution_reward": 1.024999976158142,
1558
+ "rewards/code_quality_reward_func": 0.2750000059604645,
1559
+ "rewards/reasoning_quality_reward_func": 0.20000000298023224,
1560
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1561
+ "step": 97
1562
+ },
1563
+ {
1564
+ "completion_length": 73.625,
1565
+ "epoch": 0.026224244046026225,
1566
+ "grad_norm": 6.21875,
1567
+ "kl": 0.02606140449643135,
1568
+ "learning_rate": 5.240641711229947e-06,
1569
+ "loss": 0.0,
1570
+ "reward": 1.7374999523162842,
1571
+ "reward_std": 0.2749999463558197,
1572
+ "rewards/basic_format_reward_func": 0.0,
1573
+ "rewards/code_execution_reward": 1.125,
1574
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1575
+ "rewards/reasoning_quality_reward_func": 0.21249999105930328,
1576
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1577
+ "step": 98
1578
+ },
1579
+ {
1580
+ "completion_length": 151.375,
1581
+ "epoch": 0.026491838373026492,
1582
+ "grad_norm": 6.28125,
1583
+ "kl": 0.027492664754390717,
1584
+ "learning_rate": 5.294117647058824e-06,
1585
+ "loss": 0.0,
1586
+ "reward": 1.2750000953674316,
1587
+ "reward_std": 0.2179449498653412,
1588
+ "rewards/basic_format_reward_func": 0.0,
1589
+ "rewards/code_execution_reward": 0.7000000476837158,
1590
+ "rewards/code_quality_reward_func": 0.26249998807907104,
1591
+ "rewards/reasoning_quality_reward_func": 0.21249999105930328,
1592
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1593
+ "step": 99
1594
+ },
1595
+ {
1596
+ "completion_length": 113.875,
1597
+ "epoch": 0.02675943270002676,
1598
+ "grad_norm": 7.625,
1599
+ "kl": 0.016676055267453194,
1600
+ "learning_rate": 5.347593582887702e-06,
1601
+ "loss": 0.0,
1602
+ "reward": 1.5625,
1603
+ "reward_std": 0.49034643173217773,
1604
+ "rewards/basic_format_reward_func": 0.0,
1605
+ "rewards/code_execution_reward": 0.925000011920929,
1606
+ "rewards/code_quality_reward_func": 0.30000001192092896,
1607
+ "rewards/reasoning_quality_reward_func": 0.23749999701976776,
1608
+ "rewards/xml_structure_reward_func": 0.10000000149011612,
1609
+ "step": 100
1610
+ }
1611
+ ],
1612
+ "logging_steps": 1,
1613
+ "max_steps": 3737,
1614
+ "num_input_tokens_seen": 0,
1615
+ "num_train_epochs": 1,
1616
+ "save_steps": 100,
1617
+ "stateful_callbacks": {
1618
+ "TrainerControl": {
1619
+ "args": {
1620
+ "should_epoch_stop": false,
1621
+ "should_evaluate": false,
1622
+ "should_log": false,
1623
+ "should_save": true,
1624
+ "should_training_stop": false
1625
+ },
1626
+ "attributes": {}
1627
+ }
1628
+ },
1629
+ "total_flos": 0.0,
1630
+ "train_batch_size": 1,
1631
+ "trial_name": null,
1632
+ "trial_params": null
1633
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-100/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 896,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4864,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 21,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 14,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 2,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": null,
21
+ "rope_theta": 1000000.0,
22
+ "sliding_window": null,
23
+ "tie_word_embeddings": true,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.48.2",
26
+ "use_cache": true,
27
+ "use_sliding_window": false,
28
+ "vocab_size": 151936
29
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.48.2"
14
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05710297877446a04f3920efba05a55a25dde11ea4b7e46a0983c84b19d90682
3
+ size 14448
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f68a37892a1b445d21bb35cc10bf7a058a6f9ec8c363f5ed156ff4f49d90fb6
3
+ size 14512
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11ebcc79730a243f22c7142afc98308dacd92b09dfab19d18c4de53c27124888
3
+ size 1064
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/special_tokens_map.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": "<|im_end|>"
25
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63a2951d5edfa5cc0a2346ef872f8c77a2920274cfc3b503b04e3799104dee80
3
+ size 11422060
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 131072,
204
+ "pad_token": "<|im_end|>",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ae303199a3f5d31692ecd76e856b1fe809229c2b385ebaf722c0cfa122f0773
3
+ size 5624
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-200/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 896,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4864,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 21,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 14,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 2,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": null,
21
+ "rope_theta": 1000000.0,
22
+ "sliding_window": null,
23
+ "tie_word_embeddings": true,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.48.2",
26
+ "use_cache": true,
27
+ "use_sliding_window": false,
28
+ "vocab_size": 151936
29
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.48.2"
14
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:439cdf1c33eecff95d718422547913643700b7ebeec4e21c93e23d66ed0e4ed7
3
+ size 988097824
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c22e71e32943d11848f83241f6bee27a2cbfd89730e917622ca26176d6564040
3
+ size 14448
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f68a37892a1b445d21bb35cc10bf7a058a6f9ec8c363f5ed156ff4f49d90fb6
3
+ size 14512
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d02df7af88de4b6f674b0c33cc677c6a2ad4269da378066906d80d6a74ed8b1
3
+ size 1064
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/special_tokens_map.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": "<|im_end|>"
25
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63a2951d5edfa5cc0a2346ef872f8c77a2920274cfc3b503b04e3799104dee80
3
+ size 11422060
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 131072,
204
+ "pad_token": "<|im_end|>",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ae303199a3f5d31692ecd76e856b1fe809229c2b385ebaf722c0cfa122f0773
3
+ size 5624
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-300/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 896,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4864,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 21,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 14,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 2,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": null,
21
+ "rope_theta": 1000000.0,
22
+ "sliding_window": null,
23
+ "tie_word_embeddings": true,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.48.2",
26
+ "use_cache": true,
27
+ "use_sliding_window": false,
28
+ "vocab_size": 151936
29
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.48.2"
14
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0750ea889825dde49e05b3bb75078d98703e849b9102ebd2bbcb4b4ae26d905
3
+ size 988097824
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c537187f43c0116f21beb3dcba86f99c57a2aa139b32330c80843a8007316757
3
+ size 14448
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f68a37892a1b445d21bb35cc10bf7a058a6f9ec8c363f5ed156ff4f49d90fb6
3
+ size 14512
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb71be99938129e57fc6761f65091470d0f07eabfd1b414a74a83ca9a2b33782
3
+ size 1064
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/special_tokens_map.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": "<|im_end|>"
25
+ }
Qwen-0.5B-GRPO-Code-vllm-base/checkpoint-400/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63a2951d5edfa5cc0a2346ef872f8c77a2920274cfc3b503b04e3799104dee80
3
+ size 11422060