leonMW commited on
Commit
1b97150
·
verified ·
1 Parent(s): 8ac2b3b

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ model_name: DeepSeek-R1-Distill-Qwen-7B-S
4
+ tags:
5
+ - generated_from_trainer
6
+ - grpo
7
+ - trl
8
+ licence: license
9
+ ---
10
+
11
+ # Model Card for DeepSeek-R1-Distill-Qwen-7B-S
12
+
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
+ It has been trained using [TRL](https://github.com/huggingface/trl).
15
+
16
+ ## Quick start
17
+
18
+ ```python
19
+ from transformers import pipeline
20
+
21
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
22
+ generator = pipeline("text-generation", model="leonMW/DeepSeek-R1-Distill-Qwen-7B-S", device="cuda")
23
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
24
+ print(output["generated_text"])
25
+ ```
26
+
27
+ ## Training procedure
28
+
29
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/leonwenderoth-tu-darmstadt/huggingface/runs/b2dqumry)
30
+
31
+
32
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.23.0
37
+ - Transformers: 4.56.2
38
+ - Pytorch: 2.7.1
39
+ - Datasets: 4.1.1
40
+ - Tokenizers: 0.22.1
41
+
42
+ ## Citations
43
+
44
+ Cite GRPO as:
45
+
46
+ ```bibtex
47
+ @article{shao2024deepseekmath,
48
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
49
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
50
+ year = 2024,
51
+ eprint = {arXiv:2402.03300},
52
+ }
53
+
54
+ ```
55
+
56
+ Cite TRL as:
57
+
58
+ ```bibtex
59
+ @misc{vonwerra2022trl,
60
+ title = {{TRL: Transformer Reinforcement Learning}},
61
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
62
+ year = 2020,
63
+ journal = {GitHub repository},
64
+ publisher = {GitHub},
65
+ howpublished = {\url{https://github.com/huggingface/trl}}
66
+ }
67
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.0,
4
+ "train_runtime": 2.3434,
5
+ "train_samples": 3000,
6
+ "train_samples_per_second": 2560.359,
7
+ "train_steps_per_second": 78.518
8
+ }
chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}
config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151646,
7
+ "dtype": "bfloat16",
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 18944,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention"
42
+ ],
43
+ "max_position_embeddings": 131072,
44
+ "max_window_layers": 28,
45
+ "model_type": "qwen2",
46
+ "num_attention_heads": 28,
47
+ "num_hidden_layers": 28,
48
+ "num_key_value_heads": 4,
49
+ "pad_token_id": 151643,
50
+ "rms_norm_eps": 1e-06,
51
+ "rope_scaling": null,
52
+ "rope_theta": 10000,
53
+ "sliding_window": null,
54
+ "tie_word_embeddings": false,
55
+ "transformers_version": "4.56.2",
56
+ "use_cache": true,
57
+ "use_mrope": false,
58
+ "use_sliding_window": false,
59
+ "vocab_size": 152064
60
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151646,
4
+ "eos_token_id": 151643,
5
+ "pad_token_id": 151643,
6
+ "transformers_version": "4.56.2"
7
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b8f752344974194fc8a1f24f8f542da9ac75a1f0a32fe28b0ba00e149768894
3
+ size 4877660776
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a43d15eaf06e17594c86809c958511bd178ce798ee87563438e06329d2ef4367
3
+ size 4932751008
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f3a66181f459450828bee809b24b88eb4c26b7583f897f75b2d040c72fa5478
3
+ size 4330865200
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab24b3572f03b03a507add4574f4c23e0fa67746c4e108310ec3c06e63f813e8
3
+ size 1089994880
model.safetensors.index.json ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 333312,
4
+ "total_size": 15231233024
5
+ },
6
+ "weight_map": {
7
+ "lm_head.weight": "model-00004-of-00004.safetensors",
8
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
18
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
20
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
27
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
28
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
29
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
30
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
31
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
32
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
33
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
39
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
42
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
44
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
51
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
54
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
56
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
63
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
66
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
68
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
75
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
78
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
80
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
87
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
90
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
91
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
92
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
98
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
99
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
101
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
102
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
103
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
104
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
109
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
110
+ "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
111
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
112
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
113
+ "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
114
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
115
+ "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
116
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
117
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
118
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
119
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
120
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
121
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
122
+ "model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
123
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
124
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
125
+ "model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
126
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
127
+ "model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
128
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
129
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
131
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
132
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
133
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
134
+ "model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
135
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
136
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
137
+ "model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
138
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
139
+ "model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
140
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
141
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
146
+ "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
147
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
149
+ "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
150
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
151
+ "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
152
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
153
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
154
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
155
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
156
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
157
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
158
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
159
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
160
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
161
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
162
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
163
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
164
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
165
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
170
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
171
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
172
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
173
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
174
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
176
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
182
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
183
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
185
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
186
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
187
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
188
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
194
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
195
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
197
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
198
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
199
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
200
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
206
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
207
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
209
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
210
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
211
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
212
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
217
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
218
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
219
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
220
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
221
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
222
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
223
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
224
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
225
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
229
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
230
+ "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
231
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
232
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
233
+ "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
234
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
235
+ "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
236
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
238
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
239
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
241
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
242
+ "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
243
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
244
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
245
+ "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
246
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
247
+ "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
248
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
249
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
250
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
251
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
252
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
253
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
254
+ "model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
255
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
256
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
257
+ "model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
258
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
259
+ "model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
260
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
261
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
262
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
263
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
264
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
265
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
266
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
267
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
268
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
269
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
270
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
271
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
272
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
273
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
274
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
275
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
276
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
277
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
278
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
279
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
280
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
281
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
282
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
283
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
284
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
285
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
286
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
287
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
288
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
289
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
290
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
291
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
292
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
293
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
294
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
295
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
296
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
297
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
298
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
299
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
300
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
301
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
302
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
303
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
304
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
305
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
306
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
307
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
308
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
309
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
310
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
311
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
312
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
313
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
314
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
315
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
316
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
317
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
318
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
319
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
320
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
321
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
322
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
323
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
324
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
325
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
326
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
327
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
328
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
329
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
330
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
331
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
332
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
333
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
334
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
335
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
336
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
337
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
338
+ "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
339
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
340
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
341
+ "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
342
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
343
+ "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
344
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
345
+ "model.norm.weight": "model-00003-of-00004.safetensors"
346
+ }
347
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|end▁of▁sentence|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4256422650d141f228fe954acee98679da412984c29a569877eefd3af69315a
3
+ size 11422959
tokenizer_config.json ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "151643": {
7
+ "content": "<|end▁of▁sentence|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "151644": {
15
+ "content": "<|User|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": false
21
+ },
22
+ "151645": {
23
+ "content": "<|Assistant|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "151646": {
31
+ "content": "<|begin▁of▁sentence|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "151647": {
39
+ "content": "<|EOT|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": false
45
+ },
46
+ "151648": {
47
+ "content": "<think>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": false
53
+ },
54
+ "151649": {
55
+ "content": "</think>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": false
61
+ },
62
+ "151650": {
63
+ "content": "<|quad_start|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "151651": {
71
+ "content": "<|quad_end|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "151652": {
79
+ "content": "<|vision_start|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "151653": {
87
+ "content": "<|vision_end|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "151654": {
95
+ "content": "<|vision_pad|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "151655": {
103
+ "content": "<|image_pad|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "151656": {
111
+ "content": "<|video_pad|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": true
117
+ },
118
+ "151657": {
119
+ "content": "<tool_call>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": false
125
+ },
126
+ "151658": {
127
+ "content": "</tool_call>",
128
+ "lstrip": false,
129
+ "normalized": false,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": false
133
+ },
134
+ "151659": {
135
+ "content": "<|fim_prefix|>",
136
+ "lstrip": false,
137
+ "normalized": false,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": false
141
+ },
142
+ "151660": {
143
+ "content": "<|fim_middle|>",
144
+ "lstrip": false,
145
+ "normalized": false,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": false
149
+ },
150
+ "151661": {
151
+ "content": "<|fim_suffix|>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false,
156
+ "special": false
157
+ },
158
+ "151662": {
159
+ "content": "<|fim_pad|>",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false,
164
+ "special": false
165
+ },
166
+ "151663": {
167
+ "content": "<|repo_name|>",
168
+ "lstrip": false,
169
+ "normalized": false,
170
+ "rstrip": false,
171
+ "single_word": false,
172
+ "special": false
173
+ },
174
+ "151664": {
175
+ "content": "<|file_sep|>",
176
+ "lstrip": false,
177
+ "normalized": false,
178
+ "rstrip": false,
179
+ "single_word": false,
180
+ "special": false
181
+ }
182
+ },
183
+ "bos_token": "<|begin▁of▁sentence|>",
184
+ "clean_up_tokenization_spaces": false,
185
+ "eos_token": "<|end▁of▁sentence|>",
186
+ "extra_special_tokens": {},
187
+ "legacy": true,
188
+ "max_length": null,
189
+ "model_max_length": 16384,
190
+ "pad_to_multiple_of": null,
191
+ "pad_token": "<|end▁of▁sentence|>",
192
+ "pad_token_type_id": 0,
193
+ "padding_side": "left",
194
+ "sp_model_kwargs": {},
195
+ "tokenizer_class": "LlamaTokenizerFast",
196
+ "unk_token": null,
197
+ "use_default_system_prompt": false
198
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.0,
4
+ "train_runtime": 2.3434,
5
+ "train_samples": 3000,
6
+ "train_samples_per_second": 2560.359,
7
+ "train_steps_per_second": 78.518
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,2026 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 184,
3
+ "best_metric": 0.003358551999554038,
4
+ "best_model_checkpoint": "data/DeepSeek-R1-Distill-Qwen-7B-Staged-4/checkpoint-184",
5
+ "epoch": 2.0,
6
+ "eval_steps": 500,
7
+ "global_step": 184,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "clip_ratio/high_max": 0.0,
14
+ "clip_ratio/high_mean": 0.0,
15
+ "clip_ratio/low_mean": 0.0,
16
+ "clip_ratio/low_min": 0.0,
17
+ "clip_ratio/region_mean": 0.0,
18
+ "completions/clipped_ratio": 0.0,
19
+ "completions/max_length": 1531.0,
20
+ "completions/max_terminated_length": 1531.0,
21
+ "completions/mean_length": 423.75732421875,
22
+ "completions/mean_terminated_length": 423.75732421875,
23
+ "completions/min_length": 181.0,
24
+ "completions/min_terminated_length": 181.0,
25
+ "entropy": 1.90119668841362,
26
+ "epoch": 0.010869565217391304,
27
+ "frac_reward_zero_std": 0.0,
28
+ "grad_norm": 0.11881979554891586,
29
+ "learning_rate": 1e-05,
30
+ "loss": 0.0039,
31
+ "num_tokens": 2917775.0,
32
+ "reward": 3.519592761993408,
33
+ "reward_std": 0.18993031978607178,
34
+ "rewards/ngram_repetition2/mean": 0.9466348886489868,
35
+ "rewards/ngram_repetition2/std": 0.01929597184062004,
36
+ "rewards/ngram_repetition3/mean": 0.9919997453689575,
37
+ "rewards/ngram_repetition3/std": 0.010940907523036003,
38
+ "rewards/symbolic_reward_accuracy/mean": 0.7822265625,
39
+ "rewards/symbolic_reward_accuracy/std": 0.4128333628177643,
40
+ "rewards/symbolic_reward_partial_score/mean": 0.9275716543197632,
41
+ "rewards/symbolic_reward_partial_score/std": 0.1604749709367752,
42
+ "rewards/tag_count_reward/mean": 0.99853515625,
43
+ "rewards/tag_count_reward/std": 0.02703022211790085,
44
+ "rewards/thinking_answer_ratio_reward/mean": 0.9646764397621155,
45
+ "rewards/thinking_answer_ratio_reward/std": 0.055691447108983994,
46
+ "sampling/importance_sampling_ratio/max": 2.0,
47
+ "sampling/importance_sampling_ratio/mean": 1.5215855836868286,
48
+ "sampling/importance_sampling_ratio/min": 0.0016820691525936127,
49
+ "sampling/sampling_logp_difference/max": 6.387730598449707,
50
+ "sampling/sampling_logp_difference/mean": 0.6701650023460388,
51
+ "step": 1
52
+ },
53
+ {
54
+ "clip_ratio/high_max": 0.11458333333333333,
55
+ "clip_ratio/high_mean": 0.037760416666666664,
56
+ "clip_ratio/low_mean": 0.359375,
57
+ "clip_ratio/low_min": 0.19270833333333334,
58
+ "clip_ratio/region_mean": 0.3971354166666667,
59
+ "entropy": 1.9287260274092357,
60
+ "epoch": 0.043478260869565216,
61
+ "grad_norm": 0.2112465351819992,
62
+ "learning_rate": 1e-05,
63
+ "loss": 0.0079,
64
+ "step": 4
65
+ },
66
+ {
67
+ "clip_ratio/high_max": 0.16015625,
68
+ "clip_ratio/high_mean": 0.0673828125,
69
+ "clip_ratio/low_mean": 0.27392578125,
70
+ "clip_ratio/low_min": 0.140625,
71
+ "clip_ratio/region_mean": 0.34130859375,
72
+ "completions/clipped_ratio": 0.0,
73
+ "completions/max_length": 620.0,
74
+ "completions/max_terminated_length": 620.0,
75
+ "completions/mean_length": 426.1572265625,
76
+ "completions/mean_terminated_length": 426.1572265625,
77
+ "completions/min_length": 69.0,
78
+ "completions/min_terminated_length": 69.0,
79
+ "entropy": 1.884685069322586,
80
+ "epoch": 0.08695652173913043,
81
+ "frac_reward_zero_std": 0.0,
82
+ "grad_norm": 0.10315027832984924,
83
+ "learning_rate": 1e-05,
84
+ "loss": 0.003,
85
+ "num_tokens": 5881649.0,
86
+ "reward": 3.206584930419922,
87
+ "reward_std": 0.24610982835292816,
88
+ "rewards/ngram_repetition2/mean": 0.9437640309333801,
89
+ "rewards/ngram_repetition2/std": 0.01659868098795414,
90
+ "rewards/ngram_repetition3/mean": 0.9914915561676025,
91
+ "rewards/ngram_repetition3/std": 0.006500926800072193,
92
+ "rewards/symbolic_reward_accuracy/mean": 0.64599609375,
93
+ "rewards/symbolic_reward_accuracy/std": 0.4783271551132202,
94
+ "rewards/symbolic_reward_partial_score/mean": 0.8863118886947632,
95
+ "rewards/symbolic_reward_partial_score/std": 0.18328505754470825,
96
+ "rewards/tag_count_reward/mean": 0.999267578125,
97
+ "rewards/tag_count_reward/std": 0.019127286970615387,
98
+ "rewards/thinking_answer_ratio_reward/mean": 0.9660772085189819,
99
+ "rewards/thinking_answer_ratio_reward/std": 0.0389963835477829,
100
+ "sampling/importance_sampling_ratio/max": 2.0,
101
+ "sampling/importance_sampling_ratio/mean": 1.5227073431015015,
102
+ "sampling/importance_sampling_ratio/min": 0.0010220027761533856,
103
+ "sampling/sampling_logp_difference/max": 6.885991096496582,
104
+ "sampling/sampling_logp_difference/mean": 0.6709789037704468,
105
+ "step": 8
106
+ },
107
+ {
108
+ "clip_ratio/high_max": 0.2109375,
109
+ "clip_ratio/high_mean": 0.107421875,
110
+ "clip_ratio/low_mean": 0.26953125,
111
+ "clip_ratio/low_min": 0.16015625,
112
+ "clip_ratio/region_mean": 0.376953125,
113
+ "completions/clipped_ratio": 0.0,
114
+ "completions/max_length": 724.0,
115
+ "completions/max_terminated_length": 724.0,
116
+ "completions/mean_length": 440.37939453125,
117
+ "completions/mean_terminated_length": 440.37939453125,
118
+ "completions/min_length": 215.0,
119
+ "completions/min_terminated_length": 215.0,
120
+ "entropy": 1.9614799618721008,
121
+ "epoch": 0.13043478260869565,
122
+ "frac_reward_zero_std": 0.0,
123
+ "grad_norm": 0.11278107017278671,
124
+ "learning_rate": 1e-05,
125
+ "loss": 0.0064,
126
+ "num_tokens": 8830298.0,
127
+ "reward": 3.415938138961792,
128
+ "reward_std": 0.14413174986839294,
129
+ "rewards/ngram_repetition2/mean": 0.9401968717575073,
130
+ "rewards/ngram_repetition2/std": 0.016658587381243706,
131
+ "rewards/ngram_repetition3/mean": 0.9910030364990234,
132
+ "rewards/ngram_repetition3/std": 0.006113104056566954,
133
+ "rewards/symbolic_reward_accuracy/mean": 0.7392578125,
134
+ "rewards/symbolic_reward_accuracy/std": 0.4391467273235321,
135
+ "rewards/symbolic_reward_partial_score/mean": 0.9086507558822632,
136
+ "rewards/symbolic_reward_partial_score/std": 0.18448227643966675,
137
+ "rewards/tag_count_reward/mean": 0.999755859375,
138
+ "rewards/tag_count_reward/std": 0.011048543266952038,
139
+ "rewards/thinking_answer_ratio_reward/mean": 0.970394492149353,
140
+ "rewards/thinking_answer_ratio_reward/std": 0.023110121488571167,
141
+ "sampling/importance_sampling_ratio/max": 2.0,
142
+ "sampling/importance_sampling_ratio/mean": 1.530914068222046,
143
+ "sampling/importance_sampling_ratio/min": 0.0017453260952606797,
144
+ "sampling/sampling_logp_difference/max": 6.350813865661621,
145
+ "sampling/sampling_logp_difference/mean": 0.6836334466934204,
146
+ "step": 12
147
+ },
148
+ {
149
+ "clip_ratio/high_max": 0.203125,
150
+ "clip_ratio/high_mean": 0.1064453125,
151
+ "clip_ratio/low_mean": 0.259765625,
152
+ "clip_ratio/low_min": 0.140625,
153
+ "clip_ratio/region_mean": 0.3662109375,
154
+ "completions/clipped_ratio": 0.0,
155
+ "completions/max_length": 792.0,
156
+ "completions/max_terminated_length": 792.0,
157
+ "completions/mean_length": 433.44384765625,
158
+ "completions/mean_terminated_length": 433.44384765625,
159
+ "completions/min_length": 68.0,
160
+ "completions/min_terminated_length": 68.0,
161
+ "entropy": 1.9702377393841743,
162
+ "epoch": 0.17391304347826086,
163
+ "frac_reward_zero_std": 0.0,
164
+ "grad_norm": 0.08772186189889908,
165
+ "learning_rate": 1e-05,
166
+ "loss": 0.0077,
167
+ "num_tokens": 11771079.0,
168
+ "reward": 3.4083731174468994,
169
+ "reward_std": 0.2164277881383896,
170
+ "rewards/ngram_repetition2/mean": 0.9409148693084717,
171
+ "rewards/ngram_repetition2/std": 0.016893817111849785,
172
+ "rewards/ngram_repetition3/mean": 0.9911813735961914,
173
+ "rewards/ngram_repetition3/std": 0.006109961774200201,
174
+ "rewards/symbolic_reward_accuracy/mean": 0.73974609375,
175
+ "rewards/symbolic_reward_accuracy/std": 0.43888023495674133,
176
+ "rewards/symbolic_reward_partial_score/mean": 0.902099609375,
177
+ "rewards/symbolic_reward_partial_score/std": 0.1949901431798935,
178
+ "rewards/tag_count_reward/mean": 0.997802734375,
179
+ "rewards/tag_count_reward/std": 0.033080797642469406,
180
+ "rewards/thinking_answer_ratio_reward/mean": 0.9657655954360962,
181
+ "rewards/thinking_answer_ratio_reward/std": 0.06475918740034103,
182
+ "sampling/importance_sampling_ratio/max": 2.0,
183
+ "sampling/importance_sampling_ratio/mean": 1.5316100120544434,
184
+ "sampling/importance_sampling_ratio/min": 0.001513556926511228,
185
+ "sampling/sampling_logp_difference/max": 6.493292808532715,
186
+ "sampling/sampling_logp_difference/mean": 0.6902951598167419,
187
+ "step": 16
188
+ },
189
+ {
190
+ "clip_ratio/high_max": 0.18359375,
191
+ "clip_ratio/high_mean": 0.07568359375,
192
+ "clip_ratio/low_mean": 0.27001953125,
193
+ "clip_ratio/low_min": 0.15234375,
194
+ "clip_ratio/region_mean": 0.345703125,
195
+ "completions/clipped_ratio": 0.0,
196
+ "completions/max_length": 789.0,
197
+ "completions/max_terminated_length": 789.0,
198
+ "completions/mean_length": 463.08349609375,
199
+ "completions/mean_terminated_length": 463.08349609375,
200
+ "completions/min_length": 151.0,
201
+ "completions/min_terminated_length": 151.0,
202
+ "entropy": 2.0984890162944794,
203
+ "epoch": 0.21739130434782608,
204
+ "frac_reward_zero_std": 0.0,
205
+ "grad_norm": 0.29433581233024597,
206
+ "learning_rate": 1e-05,
207
+ "loss": 0.009,
208
+ "num_tokens": 14759890.0,
209
+ "reward": 3.492382526397705,
210
+ "reward_std": 0.12577903270721436,
211
+ "rewards/ngram_repetition2/mean": 0.9376952052116394,
212
+ "rewards/ngram_repetition2/std": 0.01754874922335148,
213
+ "rewards/ngram_repetition3/mean": 0.991070568561554,
214
+ "rewards/ngram_repetition3/std": 0.005940499249845743,
215
+ "rewards/symbolic_reward_accuracy/mean": 0.767578125,
216
+ "rewards/symbolic_reward_accuracy/std": 0.4224797189235687,
217
+ "rewards/symbolic_reward_partial_score/mean": 0.9296875,
218
+ "rewards/symbolic_reward_partial_score/std": 0.14929406344890594,
219
+ "rewards/tag_count_reward/mean": 0.99853515625,
220
+ "rewards/tag_count_reward/std": 0.02703022211790085,
221
+ "rewards/thinking_answer_ratio_reward/mean": 0.9716020226478577,
222
+ "rewards/thinking_answer_ratio_reward/std": 0.052979789674282074,
223
+ "sampling/importance_sampling_ratio/max": 2.0,
224
+ "sampling/importance_sampling_ratio/mean": 1.547365427017212,
225
+ "sampling/importance_sampling_ratio/min": 0.0007647222955711186,
226
+ "sampling/sampling_logp_difference/max": 7.175997734069824,
227
+ "sampling/sampling_logp_difference/mean": 0.7198488712310791,
228
+ "step": 20
229
+ },
230
+ {
231
+ "clip_ratio/high_max": 0.09375,
232
+ "clip_ratio/high_mean": 0.037109375,
233
+ "clip_ratio/low_mean": 0.31298828125,
234
+ "clip_ratio/low_min": 0.16015625,
235
+ "clip_ratio/region_mean": 0.35009765625,
236
+ "completions/clipped_ratio": 0.0,
237
+ "completions/max_length": 830.0,
238
+ "completions/max_terminated_length": 830.0,
239
+ "completions/mean_length": 469.103515625,
240
+ "completions/mean_terminated_length": 469.103515625,
241
+ "completions/min_length": 163.0,
242
+ "completions/min_terminated_length": 163.0,
243
+ "entropy": 2.1055676490068436,
244
+ "epoch": 0.2608695652173913,
245
+ "frac_reward_zero_std": 0.0,
246
+ "grad_norm": 0.20278385281562805,
247
+ "learning_rate": 1e-05,
248
+ "loss": 0.0095,
249
+ "num_tokens": 17748358.0,
250
+ "reward": 3.392237424850464,
251
+ "reward_std": 0.13710367679595947,
252
+ "rewards/ngram_repetition2/mean": 0.9388803243637085,
253
+ "rewards/ngram_repetition2/std": 0.016749296337366104,
254
+ "rewards/ngram_repetition3/mean": 0.9914165139198303,
255
+ "rewards/ngram_repetition3/std": 0.005852194037288427,
256
+ "rewards/symbolic_reward_accuracy/mean": 0.71728515625,
257
+ "rewards/symbolic_reward_accuracy/std": 0.45042893290519714,
258
+ "rewards/symbolic_reward_partial_score/mean": 0.9291177988052368,
259
+ "rewards/symbolic_reward_partial_score/std": 0.13959749042987823,
260
+ "rewards/tag_count_reward/mean": 0.99951171875,
261
+ "rewards/tag_count_reward/std": 0.015621182508766651,
262
+ "rewards/thinking_answer_ratio_reward/mean": 0.9734582304954529,
263
+ "rewards/thinking_answer_ratio_reward/std": 0.030977273359894753,
264
+ "sampling/importance_sampling_ratio/max": 2.0,
265
+ "sampling/importance_sampling_ratio/mean": 1.5528595447540283,
266
+ "sampling/importance_sampling_ratio/min": 0.0014880819944664836,
267
+ "sampling/sampling_logp_difference/max": 6.51026725769043,
268
+ "sampling/sampling_logp_difference/mean": 0.728722095489502,
269
+ "step": 24
270
+ },
271
+ {
272
+ "clip_ratio/high_max": 0.1640625,
273
+ "clip_ratio/high_mean": 0.08447265625,
274
+ "clip_ratio/low_mean": 0.27197265625,
275
+ "clip_ratio/low_min": 0.13671875,
276
+ "clip_ratio/region_mean": 0.3564453125,
277
+ "completions/clipped_ratio": 0.0,
278
+ "completions/max_length": 791.0,
279
+ "completions/max_terminated_length": 791.0,
280
+ "completions/mean_length": 479.78955078125,
281
+ "completions/mean_terminated_length": 479.78955078125,
282
+ "completions/min_length": 83.0,
283
+ "completions/min_terminated_length": 83.0,
284
+ "entropy": 2.219125986099243,
285
+ "epoch": 0.30434782608695654,
286
+ "frac_reward_zero_std": 0.0,
287
+ "grad_norm": 0.29682013392448425,
288
+ "learning_rate": 1e-05,
289
+ "loss": 0.0102,
290
+ "num_tokens": 20780887.0,
291
+ "reward": 3.550210475921631,
292
+ "reward_std": 0.06435245275497437,
293
+ "rewards/ngram_repetition2/mean": 0.9357079267501831,
294
+ "rewards/ngram_repetition2/std": 0.017338356003165245,
295
+ "rewards/ngram_repetition3/mean": 0.9910642504692078,
296
+ "rewards/ngram_repetition3/std": 0.005752938333898783,
297
+ "rewards/symbolic_reward_accuracy/mean": 0.787109375,
298
+ "rewards/symbolic_reward_accuracy/std": 0.4094509482383728,
299
+ "rewards/symbolic_reward_partial_score/mean": 0.9469807744026184,
300
+ "rewards/symbolic_reward_partial_score/std": 0.12396769970655441,
301
+ "rewards/tag_count_reward/mean": 1.0,
302
+ "rewards/tag_count_reward/std": 0.0,
303
+ "rewards/thinking_answer_ratio_reward/mean": 0.9743147492408752,
304
+ "rewards/thinking_answer_ratio_reward/std": 0.00800173357129097,
305
+ "sampling/importance_sampling_ratio/max": 2.0,
306
+ "sampling/importance_sampling_ratio/mean": 1.5690585374832153,
307
+ "sampling/importance_sampling_ratio/min": 0.00028703955467790365,
308
+ "sampling/sampling_logp_difference/max": 8.155890464782715,
309
+ "sampling/sampling_logp_difference/mean": 0.764785885810852,
310
+ "step": 28
311
+ },
312
+ {
313
+ "clip_ratio/high_max": 0.12109375,
314
+ "clip_ratio/high_mean": 0.04931640625,
315
+ "clip_ratio/low_mean": 0.2880859375,
316
+ "clip_ratio/low_min": 0.1640625,
317
+ "clip_ratio/region_mean": 0.33740234375,
318
+ "completions/clipped_ratio": 0.0,
319
+ "completions/max_length": 807.0,
320
+ "completions/max_terminated_length": 807.0,
321
+ "completions/mean_length": 496.53515625,
322
+ "completions/mean_terminated_length": 496.53515625,
323
+ "completions/min_length": 81.0,
324
+ "completions/min_terminated_length": 81.0,
325
+ "entropy": 2.3269284814596176,
326
+ "epoch": 0.34782608695652173,
327
+ "frac_reward_zero_std": 0.0,
328
+ "grad_norm": 0.42718204855918884,
329
+ "learning_rate": 1e-05,
330
+ "loss": 0.0099,
331
+ "num_tokens": 23854047.0,
332
+ "reward": 3.542815685272217,
333
+ "reward_std": 0.06283775717020035,
334
+ "rewards/ngram_repetition2/mean": 0.935409665107727,
335
+ "rewards/ngram_repetition2/std": 0.01653331145644188,
336
+ "rewards/ngram_repetition3/mean": 0.991692304611206,
337
+ "rewards/ngram_repetition3/std": 0.005664789583534002,
338
+ "rewards/symbolic_reward_accuracy/mean": 0.783203125,
339
+ "rewards/symbolic_reward_accuracy/std": 0.41216373443603516,
340
+ "rewards/symbolic_reward_partial_score/mean": 0.9476318359375,
341
+ "rewards/symbolic_reward_partial_score/std": 0.11877194046974182,
342
+ "rewards/tag_count_reward/mean": 0.999755859375,
343
+ "rewards/tag_count_reward/std": 0.011048543266952038,
344
+ "rewards/thinking_answer_ratio_reward/mean": 0.975084662437439,
345
+ "rewards/thinking_answer_ratio_reward/std": 0.023241188377141953,
346
+ "sampling/importance_sampling_ratio/max": 2.0,
347
+ "sampling/importance_sampling_ratio/mean": 1.5795289278030396,
348
+ "sampling/importance_sampling_ratio/min": 0.0014081046683713794,
349
+ "sampling/sampling_logp_difference/max": 6.5655107498168945,
350
+ "sampling/sampling_logp_difference/mean": 0.7878707051277161,
351
+ "step": 32
352
+ },
353
+ {
354
+ "clip_ratio/high_max": 0.05078125,
355
+ "clip_ratio/high_mean": 0.01904296875,
356
+ "clip_ratio/low_mean": 0.33984375,
357
+ "clip_ratio/low_min": 0.21484375,
358
+ "clip_ratio/region_mean": 0.35888671875,
359
+ "completions/clipped_ratio": 0.0,
360
+ "completions/max_length": 792.0,
361
+ "completions/max_terminated_length": 792.0,
362
+ "completions/mean_length": 469.27099609375,
363
+ "completions/mean_terminated_length": 469.27099609375,
364
+ "completions/min_length": 94.0,
365
+ "completions/min_terminated_length": 94.0,
366
+ "entropy": 2.4070171415805817,
367
+ "epoch": 0.391304347826087,
368
+ "frac_reward_zero_std": 0.0,
369
+ "grad_norm": 0.4410569965839386,
370
+ "learning_rate": 1e-05,
371
+ "loss": 0.0054,
372
+ "num_tokens": 26880874.0,
373
+ "reward": 3.329148530960083,
374
+ "reward_std": 0.05563488230109215,
375
+ "rewards/ngram_repetition2/mean": 0.9407572746276855,
376
+ "rewards/ngram_repetition2/std": 0.015095624141395092,
377
+ "rewards/ngram_repetition3/mean": 0.9932428598403931,
378
+ "rewards/ngram_repetition3/std": 0.004793159198015928,
379
+ "rewards/symbolic_reward_accuracy/mean": 0.69189453125,
380
+ "rewards/symbolic_reward_accuracy/std": 0.46182316541671753,
381
+ "rewards/symbolic_reward_partial_score/mean": 0.9170328974723816,
382
+ "rewards/symbolic_reward_partial_score/std": 0.14379549026489258,
383
+ "rewards/tag_count_reward/mean": 0.999267578125,
384
+ "rewards/tag_count_reward/std": 0.019127286970615387,
385
+ "rewards/thinking_answer_ratio_reward/mean": 0.9718977808952332,
386
+ "rewards/thinking_answer_ratio_reward/std": 0.03811168670654297,
387
+ "sampling/importance_sampling_ratio/max": 2.0,
388
+ "sampling/importance_sampling_ratio/mean": 1.5838468074798584,
389
+ "sampling/importance_sampling_ratio/min": 0.00034505638177506626,
390
+ "sampling/sampling_logp_difference/max": 7.971802711486816,
391
+ "sampling/sampling_logp_difference/mean": 0.808716893196106,
392
+ "step": 36
393
+ },
394
+ {
395
+ "clip_ratio/high_max": 0.09765625,
396
+ "clip_ratio/high_mean": 0.0341796875,
397
+ "clip_ratio/low_mean": 0.31787109375,
398
+ "clip_ratio/low_min": 0.16796875,
399
+ "clip_ratio/region_mean": 0.35205078125,
400
+ "completions/clipped_ratio": 0.0,
401
+ "completions/max_length": 737.0,
402
+ "completions/max_terminated_length": 737.0,
403
+ "completions/mean_length": 489.2646484375,
404
+ "completions/mean_terminated_length": 489.2646484375,
405
+ "completions/min_length": 42.0,
406
+ "completions/min_terminated_length": 42.0,
407
+ "entropy": 2.6946236938238144,
408
+ "epoch": 0.43478260869565216,
409
+ "frac_reward_zero_std": 0.0,
410
+ "grad_norm": 0.2574540674686432,
411
+ "learning_rate": 1e-05,
412
+ "loss": 0.0076,
413
+ "num_tokens": 29920136.0,
414
+ "reward": 3.6383514404296875,
415
+ "reward_std": 0.0791553258895874,
416
+ "rewards/ngram_repetition2/mean": 0.9311598539352417,
417
+ "rewards/ngram_repetition2/std": 0.01622355915606022,
418
+ "rewards/ngram_repetition3/mean": 0.9929859638214111,
419
+ "rewards/ngram_repetition3/std": 0.004797095898538828,
420
+ "rewards/symbolic_reward_accuracy/mean": 0.826171875,
421
+ "rewards/symbolic_reward_accuracy/std": 0.37905415892601013,
422
+ "rewards/symbolic_reward_partial_score/mean": 0.957275390625,
423
+ "rewards/symbolic_reward_partial_score/std": 0.10868100076913834,
424
+ "rewards/tag_count_reward/mean": 0.999755859375,
425
+ "rewards/tag_count_reward/std": 0.011048543266952038,
426
+ "rewards/thinking_answer_ratio_reward/mean": 0.9735076427459717,
427
+ "rewards/thinking_answer_ratio_reward/std": 0.024763144552707672,
428
+ "sampling/importance_sampling_ratio/max": 2.0,
429
+ "sampling/importance_sampling_ratio/mean": 1.596329927444458,
430
+ "sampling/importance_sampling_ratio/min": 6.51177906547673e-05,
431
+ "sampling/sampling_logp_difference/max": 9.639312744140625,
432
+ "sampling/sampling_logp_difference/mean": 0.8470304012298584,
433
+ "step": 40
434
+ },
435
+ {
436
+ "clip_ratio/high_max": 0.046875,
437
+ "clip_ratio/high_mean": 0.01611328125,
438
+ "clip_ratio/low_mean": 0.3125,
439
+ "clip_ratio/low_min": 0.1953125,
440
+ "clip_ratio/region_mean": 0.32861328125,
441
+ "completions/clipped_ratio": 0.0,
442
+ "completions/max_length": 1433.0,
443
+ "completions/max_terminated_length": 1433.0,
444
+ "completions/mean_length": 503.10205078125,
445
+ "completions/mean_terminated_length": 503.10205078125,
446
+ "completions/min_length": 70.0,
447
+ "completions/min_terminated_length": 70.0,
448
+ "entropy": 2.872429236769676,
449
+ "epoch": 0.4782608695652174,
450
+ "frac_reward_zero_std": 0.0,
451
+ "grad_norm": 1.493705153465271,
452
+ "learning_rate": 1e-05,
453
+ "loss": 0.0106,
454
+ "num_tokens": 33009913.0,
455
+ "reward": 3.4076955318450928,
456
+ "reward_std": 0.07953877747058868,
457
+ "rewards/ngram_repetition2/mean": 0.9200476408004761,
458
+ "rewards/ngram_repetition2/std": 0.02228526771068573,
459
+ "rewards/ngram_repetition3/mean": 0.9917500615119934,
460
+ "rewards/ngram_repetition3/std": 0.012813129462301731,
461
+ "rewards/symbolic_reward_accuracy/mean": 0.72802734375,
462
+ "rewards/symbolic_reward_accuracy/std": 0.4450845420360565,
463
+ "rewards/symbolic_reward_partial_score/mean": 0.9240315556526184,
464
+ "rewards/symbolic_reward_partial_score/std": 0.14966855943202972,
465
+ "rewards/tag_count_reward/mean": 0.998779296875,
466
+ "rewards/tag_count_reward/std": 0.02468114346265793,
467
+ "rewards/thinking_answer_ratio_reward/mean": 0.9712032079696655,
468
+ "rewards/thinking_answer_ratio_reward/std": 0.0486665777862072,
469
+ "sampling/importance_sampling_ratio/max": 2.0,
470
+ "sampling/importance_sampling_ratio/mean": 1.6160120964050293,
471
+ "sampling/importance_sampling_ratio/min": 0.0009256865596398711,
472
+ "sampling/sampling_logp_difference/max": 6.9849748611450195,
473
+ "sampling/sampling_logp_difference/mean": 0.8949941396713257,
474
+ "step": 44
475
+ },
476
+ {
477
+ "clip_ratio/high_max": 0.0859375,
478
+ "clip_ratio/high_mean": 0.0400390625,
479
+ "clip_ratio/low_mean": 0.306640625,
480
+ "clip_ratio/low_min": 0.1875,
481
+ "clip_ratio/region_mean": 0.3466796875,
482
+ "completions/clipped_ratio": 0.0,
483
+ "completions/max_length": 939.0,
484
+ "completions/max_terminated_length": 939.0,
485
+ "completions/mean_length": 571.37060546875,
486
+ "completions/mean_terminated_length": 571.37060546875,
487
+ "completions/min_length": 60.0,
488
+ "completions/min_terminated_length": 60.0,
489
+ "entropy": 2.9497868567705154,
490
+ "epoch": 0.5217391304347826,
491
+ "frac_reward_zero_std": 0.0,
492
+ "grad_norm": 0.6648740172386169,
493
+ "learning_rate": 1e-05,
494
+ "loss": 0.0113,
495
+ "num_tokens": 36239504.0,
496
+ "reward": 3.5769097805023193,
497
+ "reward_std": 0.07018020749092102,
498
+ "rewards/ngram_repetition2/mean": 0.914193868637085,
499
+ "rewards/ngram_repetition2/std": 0.017817478626966476,
500
+ "rewards/ngram_repetition3/mean": 0.9914629459381104,
501
+ "rewards/ngram_repetition3/std": 0.0048860348761081696,
502
+ "rewards/symbolic_reward_accuracy/mean": 0.80029296875,
503
+ "rewards/symbolic_reward_accuracy/std": 0.39987775683403015,
504
+ "rewards/symbolic_reward_partial_score/mean": 0.9482421875,
505
+ "rewards/symbolic_reward_partial_score/std": 0.1259223222732544,
506
+ "rewards/tag_count_reward/mean": 0.999267578125,
507
+ "rewards/tag_count_reward/std": 0.019127286970615387,
508
+ "rewards/thinking_answer_ratio_reward/mean": 0.9757556915283203,
509
+ "rewards/thinking_answer_ratio_reward/std": 0.03801291063427925,
510
+ "sampling/importance_sampling_ratio/max": 2.0,
511
+ "sampling/importance_sampling_ratio/mean": 1.6147382259368896,
512
+ "sampling/importance_sampling_ratio/min": 0.0013765409821644425,
513
+ "sampling/sampling_logp_difference/max": 6.588181495666504,
514
+ "sampling/sampling_logp_difference/mean": 0.8927680850028992,
515
+ "step": 48
516
+ },
517
+ {
518
+ "clip_ratio/high_max": 0.1640625,
519
+ "clip_ratio/high_mean": 0.09228515625,
520
+ "clip_ratio/low_mean": 0.267578125,
521
+ "clip_ratio/low_min": 0.140625,
522
+ "clip_ratio/region_mean": 0.35986328125,
523
+ "completions/clipped_ratio": 0.0,
524
+ "completions/max_length": 1066.0,
525
+ "completions/max_terminated_length": 1066.0,
526
+ "completions/mean_length": 594.3876953125,
527
+ "completions/mean_terminated_length": 594.3876953125,
528
+ "completions/min_length": 80.0,
529
+ "completions/min_terminated_length": 80.0,
530
+ "entropy": 3.021150380373001,
531
+ "epoch": 0.5652173913043478,
532
+ "frac_reward_zero_std": 0.0,
533
+ "grad_norm": 0.3681764006614685,
534
+ "learning_rate": 1e-05,
535
+ "loss": 0.0141,
536
+ "num_tokens": 39532074.0,
537
+ "reward": 3.3776330947875977,
538
+ "reward_std": 0.10503610223531723,
539
+ "rewards/ngram_repetition2/mean": 0.908423900604248,
540
+ "rewards/ngram_repetition2/std": 0.02048499695956707,
541
+ "rewards/ngram_repetition3/mean": 0.9907451272010803,
542
+ "rewards/ngram_repetition3/std": 0.005756227765232325,
543
+ "rewards/symbolic_reward_accuracy/mean": 0.7138671875,
544
+ "rewards/symbolic_reward_accuracy/std": 0.45206260681152344,
545
+ "rewards/symbolic_reward_partial_score/mean": 0.921630859375,
546
+ "rewards/symbolic_reward_partial_score/std": 0.15090703964233398,
547
+ "rewards/tag_count_reward/mean": 0.99951171875,
548
+ "rewards/tag_count_reward/std": 0.015621182508766651,
549
+ "rewards/thinking_answer_ratio_reward/mean": 0.9764349460601807,
550
+ "rewards/thinking_answer_ratio_reward/std": 0.03162452206015587,
551
+ "sampling/importance_sampling_ratio/max": 2.0,
552
+ "sampling/importance_sampling_ratio/mean": 1.6216703653335571,
553
+ "sampling/importance_sampling_ratio/min": 0.001497772173024714,
554
+ "sampling/sampling_logp_difference/max": 6.503776550292969,
555
+ "sampling/sampling_logp_difference/mean": 0.9096119999885559,
556
+ "step": 52
557
+ },
558
+ {
559
+ "clip_ratio/high_max": 0.125,
560
+ "clip_ratio/high_mean": 0.0537109375,
561
+ "clip_ratio/low_mean": 0.30859375,
562
+ "clip_ratio/low_min": 0.1953125,
563
+ "clip_ratio/region_mean": 0.3623046875,
564
+ "completions/clipped_ratio": 0.0,
565
+ "completions/max_length": 908.0,
566
+ "completions/max_terminated_length": 908.0,
567
+ "completions/mean_length": 545.55126953125,
568
+ "completions/mean_terminated_length": 545.55126953125,
569
+ "completions/min_length": 53.0,
570
+ "completions/min_terminated_length": 53.0,
571
+ "entropy": 3.076643869280815,
572
+ "epoch": 0.6086956521739131,
573
+ "frac_reward_zero_std": 0.0,
574
+ "grad_norm": 0.5528954267501831,
575
+ "learning_rate": 1e-05,
576
+ "loss": 0.0124,
577
+ "num_tokens": 42708787.0,
578
+ "reward": 3.5160012245178223,
579
+ "reward_std": 0.06761978566646576,
580
+ "rewards/ngram_repetition2/mean": 0.919230043888092,
581
+ "rewards/ngram_repetition2/std": 0.019755244255065918,
582
+ "rewards/ngram_repetition3/mean": 0.9927445650100708,
583
+ "rewards/ngram_repetition3/std": 0.004674999509006739,
584
+ "rewards/symbolic_reward_accuracy/mean": 0.77490234375,
585
+ "rewards/symbolic_reward_accuracy/std": 0.4177486300468445,
586
+ "rewards/symbolic_reward_partial_score/mean": 0.9378255605697632,
587
+ "rewards/symbolic_reward_partial_score/std": 0.13589942455291748,
588
+ "rewards/tag_count_reward/mean": 0.99951171875,
589
+ "rewards/tag_count_reward/std": 0.015621182508766651,
590
+ "rewards/thinking_answer_ratio_reward/mean": 0.973955512046814,
591
+ "rewards/thinking_answer_ratio_reward/std": 0.03269569203257561,
592
+ "sampling/importance_sampling_ratio/max": 2.0,
593
+ "sampling/importance_sampling_ratio/mean": 1.6185786724090576,
594
+ "sampling/importance_sampling_ratio/min": 0.0006516074645332992,
595
+ "sampling/sampling_logp_difference/max": 7.336068153381348,
596
+ "sampling/sampling_logp_difference/mean": 0.9117289781570435,
597
+ "step": 56
598
+ },
599
+ {
600
+ "clip_ratio/high_max": 0.1015625,
601
+ "clip_ratio/high_mean": 0.0439453125,
602
+ "clip_ratio/low_mean": 0.2900390625,
603
+ "clip_ratio/low_min": 0.16015625,
604
+ "clip_ratio/region_mean": 0.333984375,
605
+ "completions/clipped_ratio": 0.0,
606
+ "completions/max_length": 868.0,
607
+ "completions/max_terminated_length": 868.0,
608
+ "completions/mean_length": 484.09326171875,
609
+ "completions/mean_terminated_length": 484.09326171875,
610
+ "completions/min_length": 22.0,
611
+ "completions/min_terminated_length": 22.0,
612
+ "entropy": 3.069231167435646,
613
+ "epoch": 0.6521739130434783,
614
+ "frac_reward_zero_std": 0.0,
615
+ "grad_norm": 0.7982656359672546,
616
+ "learning_rate": 1e-05,
617
+ "loss": 0.0098,
618
+ "num_tokens": 45756466.0,
619
+ "reward": 3.442070960998535,
620
+ "reward_std": 0.07040801644325256,
621
+ "rewards/ngram_repetition2/mean": 0.9283775091171265,
622
+ "rewards/ngram_repetition2/std": 0.020430130884051323,
623
+ "rewards/ngram_repetition3/mean": 0.9933985471725464,
624
+ "rewards/ngram_repetition3/std": 0.004864003509283066,
625
+ "rewards/symbolic_reward_accuracy/mean": 0.73974609375,
626
+ "rewards/symbolic_reward_accuracy/std": 0.43888023495674133,
627
+ "rewards/symbolic_reward_partial_score/mean": 0.9348958134651184,
628
+ "rewards/symbolic_reward_partial_score/std": 0.13156187534332275,
629
+ "rewards/tag_count_reward/mean": 0.998779296875,
630
+ "rewards/tag_count_reward/std": 0.02468114346265793,
631
+ "rewards/thinking_answer_ratio_reward/mean": 0.9685888290405273,
632
+ "rewards/thinking_answer_ratio_reward/std": 0.051747798919677734,
633
+ "sampling/importance_sampling_ratio/max": 2.0,
634
+ "sampling/importance_sampling_ratio/mean": 1.621800422668457,
635
+ "sampling/importance_sampling_ratio/min": 0.000417891307733953,
636
+ "sampling/sampling_logp_difference/max": 7.780289173126221,
637
+ "sampling/sampling_logp_difference/mean": 0.9235653877258301,
638
+ "step": 60
639
+ },
640
+ {
641
+ "clip_ratio/high_max": 0.03515625,
642
+ "clip_ratio/high_mean": 0.00830078125,
643
+ "clip_ratio/low_mean": 0.3193359375,
644
+ "clip_ratio/low_min": 0.1953125,
645
+ "clip_ratio/region_mean": 0.32763671875,
646
+ "completions/clipped_ratio": 0.0,
647
+ "completions/max_length": 813.0,
648
+ "completions/max_terminated_length": 813.0,
649
+ "completions/mean_length": 467.482421875,
650
+ "completions/mean_terminated_length": 467.482421875,
651
+ "completions/min_length": 80.0,
652
+ "completions/min_terminated_length": 80.0,
653
+ "entropy": 3.1883307099342346,
654
+ "epoch": 0.6956521739130435,
655
+ "frac_reward_zero_std": 0.0,
656
+ "grad_norm": 0.9010258316993713,
657
+ "learning_rate": 1e-05,
658
+ "loss": 0.0079,
659
+ "num_tokens": 48776462.0,
660
+ "reward": 3.5352914333343506,
661
+ "reward_std": 0.11082939803600311,
662
+ "rewards/ngram_repetition2/mean": 0.9302128553390503,
663
+ "rewards/ngram_repetition2/std": 0.01884542964398861,
664
+ "rewards/ngram_repetition3/mean": 0.9941788911819458,
665
+ "rewards/ngram_repetition3/std": 0.0045182653702795506,
666
+ "rewards/symbolic_reward_accuracy/mean": 0.7822265625,
667
+ "rewards/symbolic_reward_accuracy/std": 0.4128333628177643,
668
+ "rewards/symbolic_reward_partial_score/mean": 0.9423828125,
669
+ "rewards/symbolic_reward_partial_score/std": 0.13051293790340424,
670
+ "rewards/tag_count_reward/mean": 0.99951171875,
671
+ "rewards/tag_count_reward/std": 0.015621182508766651,
672
+ "rewards/thinking_answer_ratio_reward/mean": 0.969980776309967,
673
+ "rewards/thinking_answer_ratio_reward/std": 0.03189215064048767,
674
+ "sampling/importance_sampling_ratio/max": 2.0,
675
+ "sampling/importance_sampling_ratio/mean": 1.626150369644165,
676
+ "sampling/importance_sampling_ratio/min": 0.00047154395724646747,
677
+ "sampling/sampling_logp_difference/max": 7.65949821472168,
678
+ "sampling/sampling_logp_difference/mean": 0.9483247995376587,
679
+ "step": 64
680
+ },
681
+ {
682
+ "clip_ratio/high_max": 0.0234375,
683
+ "clip_ratio/high_mean": 0.00390625,
684
+ "clip_ratio/low_mean": 0.333984375,
685
+ "clip_ratio/low_min": 0.203125,
686
+ "clip_ratio/region_mean": 0.337890625,
687
+ "completions/clipped_ratio": 0.0,
688
+ "completions/max_length": 1044.0,
689
+ "completions/max_terminated_length": 1044.0,
690
+ "completions/mean_length": 568.92041015625,
691
+ "completions/mean_terminated_length": 568.92041015625,
692
+ "completions/min_length": 43.0,
693
+ "completions/min_terminated_length": 43.0,
694
+ "entropy": 3.2027508467435837,
695
+ "epoch": 0.7391304347826086,
696
+ "frac_reward_zero_std": 0.0,
697
+ "grad_norm": 1.4200383424758911,
698
+ "learning_rate": 1e-05,
699
+ "loss": 0.0152,
700
+ "num_tokens": 51997867.0,
701
+ "reward": 3.482755422592163,
702
+ "reward_std": 0.08074051141738892,
703
+ "rewards/ngram_repetition2/mean": 0.9018445611000061,
704
+ "rewards/ngram_repetition2/std": 0.020495234057307243,
705
+ "rewards/ngram_repetition3/mean": 0.9901250600814819,
706
+ "rewards/ngram_repetition3/std": 0.005463259294629097,
707
+ "rewards/symbolic_reward_accuracy/mean": 0.7578125,
708
+ "rewards/symbolic_reward_accuracy/std": 0.4285118281841278,
709
+ "rewards/symbolic_reward_partial_score/mean": 0.939697265625,
710
+ "rewards/symbolic_reward_partial_score/std": 0.12308314442634583,
711
+ "rewards/tag_count_reward/mean": 0.998779296875,
712
+ "rewards/tag_count_reward/std": 0.02468114346265793,
713
+ "rewards/thinking_answer_ratio_reward/mean": 0.9734160304069519,
714
+ "rewards/thinking_answer_ratio_reward/std": 0.052874695509672165,
715
+ "sampling/importance_sampling_ratio/max": 2.0,
716
+ "sampling/importance_sampling_ratio/mean": 1.6354997158050537,
717
+ "sampling/importance_sampling_ratio/min": 0.000564142013899982,
718
+ "sampling/sampling_logp_difference/max": 7.4802045822143555,
719
+ "sampling/sampling_logp_difference/mean": 0.9833019971847534,
720
+ "step": 68
721
+ },
722
+ {
723
+ "clip_ratio/high_max": 0.0,
724
+ "clip_ratio/high_mean": 0.0,
725
+ "clip_ratio/low_mean": 0.33203125,
726
+ "clip_ratio/low_min": 0.1953125,
727
+ "clip_ratio/region_mean": 0.33203125,
728
+ "completions/clipped_ratio": 0.0,
729
+ "completions/max_length": 1407.0,
730
+ "completions/max_terminated_length": 1407.0,
731
+ "completions/mean_length": 611.984375,
732
+ "completions/mean_terminated_length": 611.984375,
733
+ "completions/min_length": 206.0,
734
+ "completions/min_terminated_length": 206.0,
735
+ "entropy": 3.311859115958214,
736
+ "epoch": 0.782608695652174,
737
+ "frac_reward_zero_std": 0.0,
738
+ "grad_norm": 2.4716572761535645,
739
+ "learning_rate": 1e-05,
740
+ "loss": 0.0173,
741
+ "num_tokens": 55275787.0,
742
+ "reward": 3.511713981628418,
743
+ "reward_std": 0.18764734268188477,
744
+ "rewards/ngram_repetition2/mean": 0.8970193862915039,
745
+ "rewards/ngram_repetition2/std": 0.020160134881734848,
746
+ "rewards/ngram_repetition3/mean": 0.9902719259262085,
747
+ "rewards/ngram_repetition3/std": 0.005153110716491938,
748
+ "rewards/symbolic_reward_accuracy/mean": 0.7763671875,
749
+ "rewards/symbolic_reward_accuracy/std": 0.4167805016040802,
750
+ "rewards/symbolic_reward_partial_score/mean": 0.9328206181526184,
751
+ "rewards/symbolic_reward_partial_score/std": 0.15078683197498322,
752
+ "rewards/tag_count_reward/mean": 0.99755859375,
753
+ "rewards/tag_count_reward/std": 0.034861668944358826,
754
+ "rewards/thinking_answer_ratio_reward/mean": 0.9727209806442261,
755
+ "rewards/thinking_answer_ratio_reward/std": 0.06795475631952286,
756
+ "sampling/importance_sampling_ratio/max": 2.0,
757
+ "sampling/importance_sampling_ratio/mean": 1.6320133209228516,
758
+ "sampling/importance_sampling_ratio/min": 0.0007096517365425825,
759
+ "sampling/sampling_logp_difference/max": 7.250736236572266,
760
+ "sampling/sampling_logp_difference/mean": 0.9720152616500854,
761
+ "step": 72
762
+ },
763
+ {
764
+ "clip_ratio/high_max": 0.00390625,
765
+ "clip_ratio/high_mean": 0.0009765625,
766
+ "clip_ratio/low_mean": 0.31982421875,
767
+ "clip_ratio/low_min": 0.17578125,
768
+ "clip_ratio/region_mean": 0.32080078125,
769
+ "completions/clipped_ratio": 0.0,
770
+ "completions/max_length": 1373.0,
771
+ "completions/max_terminated_length": 1373.0,
772
+ "completions/mean_length": 853.9833984375,
773
+ "completions/mean_terminated_length": 853.9833984375,
774
+ "completions/min_length": 4.0,
775
+ "completions/min_terminated_length": 4.0,
776
+ "entropy": 3.3660423159599304,
777
+ "epoch": 0.8260869565217391,
778
+ "frac_reward_zero_std": 0.0,
779
+ "grad_norm": 1.4343006610870361,
780
+ "learning_rate": 1e-05,
781
+ "loss": 0.0235,
782
+ "num_tokens": 59081001.0,
783
+ "reward": 3.422502279281616,
784
+ "reward_std": 0.1829456090927124,
785
+ "rewards/ngram_repetition2/mean": 0.8537134528160095,
786
+ "rewards/ngram_repetition2/std": 0.024440823122859,
787
+ "rewards/ngram_repetition3/mean": 0.9847572445869446,
788
+ "rewards/ngram_repetition3/std": 0.005931735038757324,
789
+ "rewards/symbolic_reward_accuracy/mean": 0.73828125,
790
+ "rewards/symbolic_reward_accuracy/std": 0.4396776556968689,
791
+ "rewards/symbolic_reward_partial_score/mean": 0.9226888418197632,
792
+ "rewards/symbolic_reward_partial_score/std": 0.16657814383506775,
793
+ "rewards/tag_count_reward/mean": 0.9951171875,
794
+ "rewards/tag_count_reward/std": 0.04918074235320091,
795
+ "rewards/thinking_answer_ratio_reward/mean": 0.9749013185501099,
796
+ "rewards/thinking_answer_ratio_reward/std": 0.0969509705901146,
797
+ "sampling/importance_sampling_ratio/max": 2.0,
798
+ "sampling/importance_sampling_ratio/mean": 1.6466174125671387,
799
+ "sampling/importance_sampling_ratio/min": 0.00026180053828284144,
800
+ "sampling/sampling_logp_difference/max": 8.24792766571045,
801
+ "sampling/sampling_logp_difference/mean": 1.043502688407898,
802
+ "step": 76
803
+ },
804
+ {
805
+ "clip_ratio/high_max": 0.0,
806
+ "clip_ratio/high_mean": 0.0,
807
+ "clip_ratio/low_mean": 0.33642578125,
808
+ "clip_ratio/low_min": 0.19921875,
809
+ "clip_ratio/region_mean": 0.33642578125,
810
+ "completions/clipped_ratio": 0.0,
811
+ "completions/max_length": 2030.0,
812
+ "completions/max_terminated_length": 2030.0,
813
+ "completions/mean_length": 812.24609375,
814
+ "completions/mean_terminated_length": 812.24609375,
815
+ "completions/min_length": 5.0,
816
+ "completions/min_terminated_length": 5.0,
817
+ "entropy": 3.547543302178383,
818
+ "epoch": 0.8695652173913043,
819
+ "frac_reward_zero_std": 0.0,
820
+ "grad_norm": 2.1804628372192383,
821
+ "learning_rate": 1e-05,
822
+ "loss": 0.025,
823
+ "num_tokens": 62765889.0,
824
+ "reward": 3.5936460494995117,
825
+ "reward_std": 0.13216999173164368,
826
+ "rewards/ngram_repetition2/mean": 0.8755757808685303,
827
+ "rewards/ngram_repetition2/std": 0.022492358461022377,
828
+ "rewards/ngram_repetition3/mean": 0.9866626262664795,
829
+ "rewards/ngram_repetition3/std": 0.005942893214523792,
830
+ "rewards/symbolic_reward_accuracy/mean": 0.8115234375,
831
+ "rewards/symbolic_reward_accuracy/std": 0.39118775725364685,
832
+ "rewards/symbolic_reward_partial_score/mean": 0.9463704228401184,
833
+ "rewards/symbolic_reward_partial_score/std": 0.1394457072019577,
834
+ "rewards/tag_count_reward/mean": 0.995849609375,
835
+ "rewards/tag_count_reward/std": 0.04537592828273773,
836
+ "rewards/thinking_answer_ratio_reward/mean": 0.9756726026535034,
837
+ "rewards/thinking_answer_ratio_reward/std": 0.08393006771802902,
838
+ "sampling/importance_sampling_ratio/max": 2.0,
839
+ "sampling/importance_sampling_ratio/mean": 1.6469275951385498,
840
+ "sampling/importance_sampling_ratio/min": 0.0009282914106734097,
841
+ "sampling/sampling_logp_difference/max": 6.9821648597717285,
842
+ "sampling/sampling_logp_difference/mean": 1.0689876079559326,
843
+ "step": 80
844
+ },
845
+ {
846
+ "clip_ratio/high_max": 0.00390625,
847
+ "clip_ratio/high_mean": 0.00048828125,
848
+ "clip_ratio/low_mean": 0.33740234375,
849
+ "clip_ratio/low_min": 0.19921875,
850
+ "clip_ratio/region_mean": 0.337890625,
851
+ "completions/clipped_ratio": 0.0,
852
+ "completions/max_length": 1538.0,
853
+ "completions/max_terminated_length": 1538.0,
854
+ "completions/mean_length": 874.0634765625,
855
+ "completions/mean_terminated_length": 874.0634765625,
856
+ "completions/min_length": 6.0,
857
+ "completions/min_terminated_length": 6.0,
858
+ "entropy": 3.464124232530594,
859
+ "epoch": 0.9130434782608695,
860
+ "frac_reward_zero_std": 0.0,
861
+ "grad_norm": 2.2991092205047607,
862
+ "learning_rate": 1e-05,
863
+ "loss": 0.0286,
864
+ "num_tokens": 66609059.0,
865
+ "reward": 3.4975390434265137,
866
+ "reward_std": 0.14730925858020782,
867
+ "rewards/ngram_repetition2/mean": 0.8646764755249023,
868
+ "rewards/ngram_repetition2/std": 0.02347402088344097,
869
+ "rewards/ngram_repetition3/mean": 0.9867684841156006,
870
+ "rewards/ngram_repetition3/std": 0.005439760163426399,
871
+ "rewards/symbolic_reward_accuracy/mean": 0.771484375,
872
+ "rewards/symbolic_reward_accuracy/std": 0.4199790060520172,
873
+ "rewards/symbolic_reward_partial_score/mean": 0.92919921875,
874
+ "rewards/symbolic_reward_partial_score/std": 0.1563497930765152,
875
+ "rewards/tag_count_reward/mean": 0.9970703125,
876
+ "rewards/tag_count_reward/std": 0.03817030414938927,
877
+ "rewards/thinking_answer_ratio_reward/mean": 0.9786182641983032,
878
+ "rewards/thinking_answer_ratio_reward/std": 0.07284367829561234,
879
+ "sampling/importance_sampling_ratio/max": 2.0,
880
+ "sampling/importance_sampling_ratio/mean": 1.6411030292510986,
881
+ "sampling/importance_sampling_ratio/min": 0.002884730463847518,
882
+ "sampling/sampling_logp_difference/max": 5.848323822021484,
883
+ "sampling/sampling_logp_difference/mean": 1.0739994049072266,
884
+ "step": 84
885
+ },
886
+ {
887
+ "clip_ratio/high_max": 0.015625,
888
+ "clip_ratio/high_mean": 0.0029296875,
889
+ "clip_ratio/low_mean": 0.32421875,
890
+ "clip_ratio/low_min": 0.1875,
891
+ "clip_ratio/region_mean": 0.3271484375,
892
+ "completions/clipped_ratio": 0.0,
893
+ "completions/max_length": 1897.0,
894
+ "completions/max_terminated_length": 1897.0,
895
+ "completions/mean_length": 795.52587890625,
896
+ "completions/mean_terminated_length": 795.52587890625,
897
+ "completions/min_length": 14.0,
898
+ "completions/min_terminated_length": 14.0,
899
+ "entropy": 3.7464267015457153,
900
+ "epoch": 0.9565217391304348,
901
+ "frac_reward_zero_std": 0.0,
902
+ "grad_norm": 1.355712652206421,
903
+ "learning_rate": 1e-05,
904
+ "loss": 0.0302,
905
+ "num_tokens": 70304056.0,
906
+ "reward": 3.435868501663208,
907
+ "reward_std": 0.1441032588481903,
908
+ "rewards/ngram_repetition2/mean": 0.8944746255874634,
909
+ "rewards/ngram_repetition2/std": 0.022508157417178154,
910
+ "rewards/ngram_repetition3/mean": 0.9901339411735535,
911
+ "rewards/ngram_repetition3/std": 0.005564424674957991,
912
+ "rewards/symbolic_reward_accuracy/mean": 0.74072265625,
913
+ "rewards/symbolic_reward_accuracy/std": 0.4383451044559479,
914
+ "rewards/symbolic_reward_partial_score/mean": 0.9297281503677368,
915
+ "rewards/symbolic_reward_partial_score/std": 0.1441221833229065,
916
+ "rewards/tag_count_reward/mean": 0.99609375,
917
+ "rewards/tag_count_reward/std": 0.04403195157647133,
918
+ "rewards/thinking_answer_ratio_reward/mean": 0.9755286574363708,
919
+ "rewards/thinking_answer_ratio_reward/std": 0.08032744377851486,
920
+ "sampling/importance_sampling_ratio/max": 2.0,
921
+ "sampling/importance_sampling_ratio/mean": 1.6346502304077148,
922
+ "sampling/importance_sampling_ratio/min": 5.58673445993918e-06,
923
+ "sampling/sampling_logp_difference/max": 12.095115661621094,
924
+ "sampling/sampling_logp_difference/mean": 1.0778782367706299,
925
+ "step": 88
926
+ },
927
+ {
928
+ "clip_ratio/high_max": 0.0,
929
+ "clip_ratio/high_mean": 0.0,
930
+ "clip_ratio/low_mean": 0.3232421875,
931
+ "clip_ratio/low_min": 0.20703125,
932
+ "clip_ratio/region_mean": 0.3232421875,
933
+ "completions/clipped_ratio": 0.0,
934
+ "completions/max_length": 1791.0,
935
+ "completions/max_terminated_length": 1791.0,
936
+ "completions/mean_length": 827.90234375,
937
+ "completions/mean_terminated_length": 827.90234375,
938
+ "completions/min_length": 6.0,
939
+ "completions/min_terminated_length": 6.0,
940
+ "entropy": 3.5320966094732285,
941
+ "epoch": 1.0,
942
+ "frac_reward_zero_std": 0.0,
943
+ "grad_norm": 1.9927502870559692,
944
+ "learning_rate": 1e-05,
945
+ "loss": 0.0361,
946
+ "num_tokens": 74071696.0,
947
+ "reward": 3.5067601203918457,
948
+ "reward_std": 0.13380564749240875,
949
+ "rewards/ngram_repetition2/mean": 0.8853453397750854,
950
+ "rewards/ngram_repetition2/std": 0.022779002785682678,
951
+ "rewards/ngram_repetition3/mean": 0.9901328682899475,
952
+ "rewards/ngram_repetition3/std": 0.005021216347813606,
953
+ "rewards/symbolic_reward_accuracy/mean": 0.7734375,
954
+ "rewards/symbolic_reward_accuracy/std": 0.4187093675136566,
955
+ "rewards/symbolic_reward_partial_score/mean": 0.9352620840072632,
956
+ "rewards/symbolic_reward_partial_score/std": 0.1416569948196411,
957
+ "rewards/tag_count_reward/mean": 0.99609375,
958
+ "rewards/tag_count_reward/std": 0.04403195157647133,
959
+ "rewards/thinking_answer_ratio_reward/mean": 0.9774473905563354,
960
+ "rewards/thinking_answer_ratio_reward/std": 0.06120087578892708,
961
+ "sampling/importance_sampling_ratio/max": 2.0,
962
+ "sampling/importance_sampling_ratio/mean": 1.6360163688659668,
963
+ "sampling/importance_sampling_ratio/min": 1.9057076769968262e-06,
964
+ "sampling/sampling_logp_difference/max": 13.17065715789795,
965
+ "sampling/sampling_logp_difference/mean": 1.0849487781524658,
966
+ "step": 92
967
+ },
968
+ {
969
+ "epoch": 1.0,
970
+ "eval_clip_ratio/high_max": 0.0,
971
+ "eval_clip_ratio/high_mean": 0.0,
972
+ "eval_clip_ratio/low_mean": 0.0,
973
+ "eval_clip_ratio/low_min": 0.0,
974
+ "eval_clip_ratio/region_mean": 0.0,
975
+ "eval_completions/clipped_ratio": 0.0,
976
+ "eval_completions/max_length": 1317.4736842105262,
977
+ "eval_completions/max_terminated_length": 1317.4736842105262,
978
+ "eval_completions/mean_length": 848.6509046052631,
979
+ "eval_completions/mean_terminated_length": 848.6509046052631,
980
+ "eval_completions/min_length": 408.2105263157895,
981
+ "eval_completions/min_terminated_length": 408.2105263157895,
982
+ "eval_entropy": 3.616316644768966,
983
+ "eval_frac_reward_zero_std": 0.0,
984
+ "eval_loss": 0.008092080242931843,
985
+ "eval_num_tokens": 74071696.0,
986
+ "eval_reward": 3.544994504828202,
987
+ "eval_reward_std": 0.13573118780914228,
988
+ "eval_rewards/ngram_repetition2/mean": 0.8825263663342124,
989
+ "eval_rewards/ngram_repetition2/std": 0.021967738376636254,
990
+ "eval_rewards/ngram_repetition3/mean": 0.9900523204552499,
991
+ "eval_rewards/ngram_repetition3/std": 0.00497045870380182,
992
+ "eval_rewards/symbolic_reward_accuracy/mean": 0.7894736842105263,
993
+ "eval_rewards/symbolic_reward_accuracy/std": 0.36093331324426753,
994
+ "eval_rewards/symbolic_reward_partial_score/mean": 0.9397957826915541,
995
+ "eval_rewards/symbolic_reward_partial_score/std": 0.12393280491232872,
996
+ "eval_rewards/tag_count_reward/mean": 0.9977384868421053,
997
+ "eval_rewards/tag_count_reward/std": 0.022835058405211096,
998
+ "eval_rewards/thinking_answer_ratio_reward/mean": 0.9787100898592096,
999
+ "eval_rewards/thinking_answer_ratio_reward/std": 0.04720702974468862,
1000
+ "eval_runtime": 701.8881,
1001
+ "eval_samples_per_second": 0.214,
1002
+ "eval_sampling/importance_sampling_ratio/max": 2.0,
1003
+ "eval_sampling/importance_sampling_ratio/mean": 1.632221924631219,
1004
+ "eval_sampling/importance_sampling_ratio/min": 0.004045612331920941,
1005
+ "eval_sampling/sampling_logp_difference/max": 5.546152215254934,
1006
+ "eval_sampling/sampling_logp_difference/mean": 1.0775353092896311,
1007
+ "eval_steps_per_second": 0.003,
1008
+ "step": 92
1009
+ },
1010
+ {
1011
+ "clip_ratio/high_max": 0.0703125,
1012
+ "clip_ratio/high_mean": 0.02294921875,
1013
+ "clip_ratio/low_mean": 0.35302734375,
1014
+ "clip_ratio/low_min": 0.21875,
1015
+ "clip_ratio/region_mean": 0.3759765625,
1016
+ "completions/clipped_ratio": 0.0,
1017
+ "completions/max_length": 1500.0,
1018
+ "completions/max_terminated_length": 1500.0,
1019
+ "completions/mean_length": 857.35693359375,
1020
+ "completions/mean_terminated_length": 857.35693359375,
1021
+ "completions/min_length": 216.0,
1022
+ "completions/min_terminated_length": 216.0,
1023
+ "entropy": 3.729607880115509,
1024
+ "epoch": 1.0434782608695652,
1025
+ "frac_reward_zero_std": 0.0,
1026
+ "grad_norm": 0.33806663751602173,
1027
+ "learning_rate": 1e-05,
1028
+ "loss": 0.0364,
1029
+ "num_tokens": 77877483.0,
1030
+ "reward": 3.549053192138672,
1031
+ "reward_std": 0.09654223173856735,
1032
+ "rewards/ngram_repetition2/mean": 0.8818784952163696,
1033
+ "rewards/ngram_repetition2/std": 0.022474652156233788,
1034
+ "rewards/ngram_repetition3/mean": 0.9899985790252686,
1035
+ "rewards/ngram_repetition3/std": 0.005039301700890064,
1036
+ "rewards/symbolic_reward_accuracy/mean": 0.78662109375,
1037
+ "rewards/symbolic_reward_accuracy/std": 0.40979304909706116,
1038
+ "rewards/symbolic_reward_partial_score/mean": 0.94775390625,
1039
+ "rewards/symbolic_reward_partial_score/std": 0.11653388291597366,
1040
+ "rewards/tag_count_reward/mean": 0.99951171875,
1041
+ "rewards/tag_count_reward/std": 0.015621182508766651,
1042
+ "rewards/thinking_answer_ratio_reward/mean": 0.9826548099517822,
1043
+ "rewards/thinking_answer_ratio_reward/std": 0.024480855092406273,
1044
+ "sampling/importance_sampling_ratio/max": 2.0,
1045
+ "sampling/importance_sampling_ratio/mean": 1.6328657865524292,
1046
+ "sampling/importance_sampling_ratio/min": 0.0006916878628544509,
1047
+ "sampling/sampling_logp_difference/max": 7.276375770568848,
1048
+ "sampling/sampling_logp_difference/mean": 1.0774481296539307,
1049
+ "step": 96
1050
+ },
1051
+ {
1052
+ "clip_ratio/high_max": 0.00390625,
1053
+ "clip_ratio/high_mean": 0.00048828125,
1054
+ "clip_ratio/low_mean": 0.3291015625,
1055
+ "clip_ratio/low_min": 0.20703125,
1056
+ "clip_ratio/region_mean": 0.32958984375,
1057
+ "completions/clipped_ratio": 0.0,
1058
+ "completions/max_length": 1438.0,
1059
+ "completions/max_terminated_length": 1438.0,
1060
+ "completions/mean_length": 829.27978515625,
1061
+ "completions/mean_terminated_length": 829.27978515625,
1062
+ "completions/min_length": 286.0,
1063
+ "completions/min_terminated_length": 286.0,
1064
+ "entropy": 3.5110934525728226,
1065
+ "epoch": 1.0869565217391304,
1066
+ "frac_reward_zero_std": 0.0,
1067
+ "grad_norm": 2.883220672607422,
1068
+ "learning_rate": 1e-05,
1069
+ "loss": 0.0346,
1070
+ "num_tokens": 81641608.0,
1071
+ "reward": 3.5197534561157227,
1072
+ "reward_std": 0.09288936108350754,
1073
+ "rewards/ngram_repetition2/mean": 0.882154107093811,
1074
+ "rewards/ngram_repetition2/std": 0.02149037830531597,
1075
+ "rewards/ngram_repetition3/mean": 0.9907884001731873,
1076
+ "rewards/ngram_repetition3/std": 0.004652977455407381,
1077
+ "rewards/symbolic_reward_accuracy/mean": 0.77392578125,
1078
+ "rewards/symbolic_reward_accuracy/std": 0.4183899462223053,
1079
+ "rewards/symbolic_reward_partial_score/mean": 0.944091796875,
1080
+ "rewards/symbolic_reward_partial_score/std": 0.11925595253705978,
1081
+ "rewards/tag_count_reward/mean": 0.999267578125,
1082
+ "rewards/tag_count_reward/std": 0.019127286970615387,
1083
+ "rewards/thinking_answer_ratio_reward/mean": 0.9813138246536255,
1084
+ "rewards/thinking_answer_ratio_reward/std": 0.031017476692795753,
1085
+ "sampling/importance_sampling_ratio/max": 2.0,
1086
+ "sampling/importance_sampling_ratio/mean": 1.6330723762512207,
1087
+ "sampling/importance_sampling_ratio/min": 0.002558924490585923,
1088
+ "sampling/sampling_logp_difference/max": 5.968168258666992,
1089
+ "sampling/sampling_logp_difference/mean": 1.0812547206878662,
1090
+ "step": 100
1091
+ },
1092
+ {
1093
+ "clip_ratio/high_max": 0.0625,
1094
+ "clip_ratio/high_mean": 0.02294921875,
1095
+ "clip_ratio/low_mean": 0.36181640625,
1096
+ "clip_ratio/low_min": 0.21875,
1097
+ "clip_ratio/region_mean": 0.384765625,
1098
+ "completions/clipped_ratio": 0.0,
1099
+ "completions/max_length": 1466.0,
1100
+ "completions/max_terminated_length": 1466.0,
1101
+ "completions/mean_length": 860.47021484375,
1102
+ "completions/mean_terminated_length": 860.47021484375,
1103
+ "completions/min_length": 216.0,
1104
+ "completions/min_terminated_length": 216.0,
1105
+ "entropy": 3.9600134193897247,
1106
+ "epoch": 1.1304347826086956,
1107
+ "frac_reward_zero_std": 0.0,
1108
+ "grad_norm": 0.501100480556488,
1109
+ "learning_rate": 1e-05,
1110
+ "loss": 0.0304,
1111
+ "num_tokens": 85434763.0,
1112
+ "reward": 3.551880359649658,
1113
+ "reward_std": 0.08972637355327606,
1114
+ "rewards/ngram_repetition2/mean": 0.888477623462677,
1115
+ "rewards/ngram_repetition2/std": 0.0196506567299366,
1116
+ "rewards/ngram_repetition3/mean": 0.9930633306503296,
1117
+ "rewards/ngram_repetition3/std": 0.004272008314728737,
1118
+ "rewards/symbolic_reward_accuracy/mean": 0.7890625,
1119
+ "rewards/symbolic_reward_accuracy/std": 0.408073753118515,
1120
+ "rewards/symbolic_reward_partial_score/mean": 0.9453531503677368,
1121
+ "rewards/symbolic_reward_partial_score/std": 0.12117670476436615,
1122
+ "rewards/tag_count_reward/mean": 0.999755859375,
1123
+ "rewards/tag_count_reward/std": 0.011048543266952038,
1124
+ "rewards/thinking_answer_ratio_reward/mean": 0.9830958843231201,
1125
+ "rewards/thinking_answer_ratio_reward/std": 0.022135263308882713,
1126
+ "sampling/importance_sampling_ratio/max": 2.0,
1127
+ "sampling/importance_sampling_ratio/mean": 1.631166934967041,
1128
+ "sampling/importance_sampling_ratio/min": 0.0002176268317271024,
1129
+ "sampling/sampling_logp_difference/max": 8.43272876739502,
1130
+ "sampling/sampling_logp_difference/mean": 1.10259211063385,
1131
+ "step": 104
1132
+ },
1133
+ {
1134
+ "clip_ratio/high_max": 0.0,
1135
+ "clip_ratio/high_mean": 0.0,
1136
+ "clip_ratio/low_mean": 0.302734375,
1137
+ "clip_ratio/low_min": 0.1796875,
1138
+ "clip_ratio/region_mean": 0.302734375,
1139
+ "completions/clipped_ratio": 0.0,
1140
+ "completions/max_length": 1480.0,
1141
+ "completions/max_terminated_length": 1480.0,
1142
+ "completions/mean_length": 881.1708984375,
1143
+ "completions/mean_terminated_length": 881.1708984375,
1144
+ "completions/min_length": 5.0,
1145
+ "completions/min_terminated_length": 5.0,
1146
+ "entropy": 3.68235644698143,
1147
+ "epoch": 1.1739130434782608,
1148
+ "frac_reward_zero_std": 0.0,
1149
+ "grad_norm": 2.7818312644958496,
1150
+ "learning_rate": 1e-05,
1151
+ "loss": 0.0286,
1152
+ "num_tokens": 89267145.0,
1153
+ "reward": 3.489163398742676,
1154
+ "reward_std": 0.21473455429077148,
1155
+ "rewards/ngram_repetition2/mean": 0.8834545612335205,
1156
+ "rewards/ngram_repetition2/std": 0.02104972116649151,
1157
+ "rewards/ngram_repetition3/mean": 0.9921650886535645,
1158
+ "rewards/ngram_repetition3/std": 0.004745583515614271,
1159
+ "rewards/symbolic_reward_accuracy/mean": 0.77099609375,
1160
+ "rewards/symbolic_reward_accuracy/std": 0.4202943742275238,
1161
+ "rewards/symbolic_reward_partial_score/mean": 0.9245198965072632,
1162
+ "rewards/symbolic_reward_partial_score/std": 0.16520294547080994,
1163
+ "rewards/tag_count_reward/mean": 0.994140625,
1164
+ "rewards/tag_count_reward/std": 0.05382164567708969,
1165
+ "rewards/thinking_answer_ratio_reward/mean": 0.9754455089569092,
1166
+ "rewards/thinking_answer_ratio_reward/std": 0.08314761519432068,
1167
+ "sampling/importance_sampling_ratio/max": 2.0,
1168
+ "sampling/importance_sampling_ratio/mean": 1.6302025318145752,
1169
+ "sampling/importance_sampling_ratio/min": 0.0031186703126877546,
1170
+ "sampling/sampling_logp_difference/max": 5.77034854888916,
1171
+ "sampling/sampling_logp_difference/mean": 1.0910520553588867,
1172
+ "step": 108
1173
+ },
1174
+ {
1175
+ "clip_ratio/high_max": 0.0,
1176
+ "clip_ratio/high_mean": 0.0,
1177
+ "clip_ratio/low_mean": 0.32080078125,
1178
+ "clip_ratio/low_min": 0.16015625,
1179
+ "clip_ratio/region_mean": 0.32080078125,
1180
+ "completions/clipped_ratio": 0.0,
1181
+ "completions/max_length": 1600.0,
1182
+ "completions/max_terminated_length": 1600.0,
1183
+ "completions/mean_length": 1038.5830078125,
1184
+ "completions/mean_terminated_length": 1038.5830078125,
1185
+ "completions/min_length": 259.0,
1186
+ "completions/min_terminated_length": 259.0,
1187
+ "entropy": 3.8035393804311752,
1188
+ "epoch": 1.2173913043478262,
1189
+ "frac_reward_zero_std": 0.0,
1190
+ "grad_norm": 3.930414915084839,
1191
+ "learning_rate": 1e-05,
1192
+ "loss": 0.0317,
1193
+ "num_tokens": 93412403.0,
1194
+ "reward": 3.636284112930298,
1195
+ "reward_std": 0.20913578569889069,
1196
+ "rewards/ngram_repetition2/mean": 0.8412303924560547,
1197
+ "rewards/ngram_repetition2/std": 0.019966477528214455,
1198
+ "rewards/ngram_repetition3/mean": 0.9881943464279175,
1199
+ "rewards/ngram_repetition3/std": 0.005055025219917297,
1200
+ "rewards/symbolic_reward_accuracy/mean": 0.82958984375,
1201
+ "rewards/symbolic_reward_accuracy/std": 0.376084566116333,
1202
+ "rewards/symbolic_reward_partial_score/mean": 0.950439453125,
1203
+ "rewards/symbolic_reward_partial_score/std": 0.12963470816612244,
1204
+ "rewards/tag_count_reward/mean": 0.99853515625,
1205
+ "rewards/tag_count_reward/std": 0.02703022211790085,
1206
+ "rewards/thinking_answer_ratio_reward/mean": 0.9835522770881653,
1207
+ "rewards/thinking_answer_ratio_reward/std": 0.04572867602109909,
1208
+ "sampling/importance_sampling_ratio/max": 2.0,
1209
+ "sampling/importance_sampling_ratio/mean": 1.6397275924682617,
1210
+ "sampling/importance_sampling_ratio/min": 1.4277229638537392e-06,
1211
+ "sampling/sampling_logp_difference/max": 13.459429740905762,
1212
+ "sampling/sampling_logp_difference/mean": 1.105949878692627,
1213
+ "step": 112
1214
+ },
1215
+ {
1216
+ "clip_ratio/high_max": 0.02734375,
1217
+ "clip_ratio/high_mean": 0.00634765625,
1218
+ "clip_ratio/low_mean": 0.32861328125,
1219
+ "clip_ratio/low_min": 0.1953125,
1220
+ "clip_ratio/region_mean": 0.3349609375,
1221
+ "completions/clipped_ratio": 0.0,
1222
+ "completions/max_length": 1575.0,
1223
+ "completions/max_terminated_length": 1575.0,
1224
+ "completions/mean_length": 1060.87890625,
1225
+ "completions/mean_terminated_length": 1060.87890625,
1226
+ "completions/min_length": 495.0,
1227
+ "completions/min_terminated_length": 495.0,
1228
+ "entropy": 3.8799190521240234,
1229
+ "epoch": 1.2608695652173914,
1230
+ "frac_reward_zero_std": 0.0,
1231
+ "grad_norm": 1.9784842729568481,
1232
+ "learning_rate": 1e-05,
1233
+ "loss": 0.0206,
1234
+ "num_tokens": 97660347.0,
1235
+ "reward": 3.4817919731140137,
1236
+ "reward_std": 0.123238705098629,
1237
+ "rewards/ngram_repetition2/mean": 0.8279547691345215,
1238
+ "rewards/ngram_repetition2/std": 0.01901470310986042,
1239
+ "rewards/ngram_repetition3/mean": 0.9874847531318665,
1240
+ "rewards/ngram_repetition3/std": 0.004876126069575548,
1241
+ "rewards/symbolic_reward_accuracy/mean": 0.765625,
1242
+ "rewards/symbolic_reward_accuracy/std": 0.42371100187301636,
1243
+ "rewards/symbolic_reward_partial_score/mean": 0.9267171621322632,
1244
+ "rewards/symbolic_reward_partial_score/std": 0.16115300357341766,
1245
+ "rewards/tag_count_reward/mean": 0.995849609375,
1246
+ "rewards/tag_count_reward/std": 0.04537592828273773,
1247
+ "rewards/thinking_answer_ratio_reward/mean": 0.9820874333381653,
1248
+ "rewards/thinking_answer_ratio_reward/std": 0.05360926315188408,
1249
+ "sampling/importance_sampling_ratio/max": 2.0,
1250
+ "sampling/importance_sampling_ratio/mean": 1.6399784088134766,
1251
+ "sampling/importance_sampling_ratio/min": 0.00026858848286792636,
1252
+ "sampling/sampling_logp_difference/max": 8.222330093383789,
1253
+ "sampling/sampling_logp_difference/mean": 1.0847612619400024,
1254
+ "step": 116
1255
+ },
1256
+ {
1257
+ "clip_ratio/high_max": 0.0,
1258
+ "clip_ratio/high_mean": 0.0,
1259
+ "clip_ratio/low_mean": 0.34912109375,
1260
+ "clip_ratio/low_min": 0.22265625,
1261
+ "clip_ratio/region_mean": 0.34912109375,
1262
+ "completions/clipped_ratio": 0.0,
1263
+ "completions/max_length": 1566.0,
1264
+ "completions/max_terminated_length": 1566.0,
1265
+ "completions/mean_length": 1056.3486328125,
1266
+ "completions/mean_terminated_length": 1056.3486328125,
1267
+ "completions/min_length": 396.0,
1268
+ "completions/min_terminated_length": 396.0,
1269
+ "entropy": 4.022845968604088,
1270
+ "epoch": 1.3043478260869565,
1271
+ "frac_reward_zero_std": 0.0,
1272
+ "grad_norm": 3.9172134399414062,
1273
+ "learning_rate": 1e-05,
1274
+ "loss": 0.0336,
1275
+ "num_tokens": 101886341.0,
1276
+ "reward": 3.4570488929748535,
1277
+ "reward_std": 0.0775345116853714,
1278
+ "rewards/ngram_repetition2/mean": 0.8566447496414185,
1279
+ "rewards/ngram_repetition2/std": 0.017400307580828667,
1280
+ "rewards/ngram_repetition3/mean": 0.991142988204956,
1281
+ "rewards/ngram_repetition3/std": 0.004262521397322416,
1282
+ "rewards/symbolic_reward_accuracy/mean": 0.74951171875,
1283
+ "rewards/symbolic_reward_accuracy/std": 0.43340006470680237,
1284
+ "rewards/symbolic_reward_partial_score/mean": 0.929931640625,
1285
+ "rewards/symbolic_reward_partial_score/std": 0.146311417222023,
1286
+ "rewards/tag_count_reward/mean": 0.999755859375,
1287
+ "rewards/tag_count_reward/std": 0.011048543266952038,
1288
+ "rewards/thinking_answer_ratio_reward/mean": 0.985991895198822,
1289
+ "rewards/thinking_answer_ratio_reward/std": 0.007111483719199896,
1290
+ "sampling/importance_sampling_ratio/max": 2.0,
1291
+ "sampling/importance_sampling_ratio/mean": 1.6356934309005737,
1292
+ "sampling/importance_sampling_ratio/min": 0.0025200077798217535,
1293
+ "sampling/sampling_logp_difference/max": 5.983493328094482,
1294
+ "sampling/sampling_logp_difference/mean": 1.1201770305633545,
1295
+ "step": 120
1296
+ },
1297
+ {
1298
+ "clip_ratio/high_max": 0.015625,
1299
+ "clip_ratio/high_mean": 0.001953125,
1300
+ "clip_ratio/low_mean": 0.36181640625,
1301
+ "clip_ratio/low_min": 0.25,
1302
+ "clip_ratio/region_mean": 0.36376953125,
1303
+ "completions/clipped_ratio": 0.0,
1304
+ "completions/max_length": 1387.0,
1305
+ "completions/max_terminated_length": 1387.0,
1306
+ "completions/mean_length": 929.70458984375,
1307
+ "completions/mean_terminated_length": 929.70458984375,
1308
+ "completions/min_length": 29.0,
1309
+ "completions/min_terminated_length": 29.0,
1310
+ "entropy": 4.241550624370575,
1311
+ "epoch": 1.3478260869565217,
1312
+ "frac_reward_zero_std": 0.0,
1313
+ "grad_norm": 0.469667911529541,
1314
+ "learning_rate": 1e-05,
1315
+ "loss": 0.0194,
1316
+ "num_tokens": 105843464.0,
1317
+ "reward": 3.478210926055908,
1318
+ "reward_std": 0.06497098505496979,
1319
+ "rewards/ngram_repetition2/mean": 0.9079857468605042,
1320
+ "rewards/ngram_repetition2/std": 0.014988835901021957,
1321
+ "rewards/ngram_repetition3/mean": 0.995934009552002,
1322
+ "rewards/ngram_repetition3/std": 0.0035896108020097017,
1323
+ "rewards/symbolic_reward_accuracy/mean": 0.755859375,
1324
+ "rewards/symbolic_reward_accuracy/std": 0.4296814203262329,
1325
+ "rewards/symbolic_reward_partial_score/mean": 0.9385986328125,
1326
+ "rewards/symbolic_reward_partial_score/std": 0.1285245567560196,
1327
+ "rewards/tag_count_reward/mean": 0.9990234375,
1328
+ "rewards/tag_count_reward/std": 0.022080888971686363,
1329
+ "rewards/thinking_answer_ratio_reward/mean": 0.9830856323242188,
1330
+ "rewards/thinking_answer_ratio_reward/std": 0.04350229352712631,
1331
+ "sampling/importance_sampling_ratio/max": 2.0,
1332
+ "sampling/importance_sampling_ratio/mean": 1.624282956123352,
1333
+ "sampling/importance_sampling_ratio/min": 0.0017993793589994311,
1334
+ "sampling/sampling_logp_difference/max": 6.320313453674316,
1335
+ "sampling/sampling_logp_difference/mean": 1.111283779144287,
1336
+ "step": 124
1337
+ },
1338
+ {
1339
+ "clip_ratio/high_max": 0.0,
1340
+ "clip_ratio/high_mean": 0.0,
1341
+ "clip_ratio/low_mean": 0.36279296875,
1342
+ "clip_ratio/low_min": 0.234375,
1343
+ "clip_ratio/region_mean": 0.36279296875,
1344
+ "completions/clipped_ratio": 0.0,
1345
+ "completions/max_length": 1281.0,
1346
+ "completions/max_terminated_length": 1281.0,
1347
+ "completions/mean_length": 887.55078125,
1348
+ "completions/mean_terminated_length": 887.55078125,
1349
+ "completions/min_length": 222.0,
1350
+ "completions/min_terminated_length": 222.0,
1351
+ "entropy": 4.1487970650196075,
1352
+ "epoch": 1.391304347826087,
1353
+ "frac_reward_zero_std": 0.0,
1354
+ "grad_norm": 0.6331172585487366,
1355
+ "learning_rate": 1e-05,
1356
+ "loss": 0.018,
1357
+ "num_tokens": 109714256.0,
1358
+ "reward": 3.4955267906188965,
1359
+ "reward_std": 0.07705140858888626,
1360
+ "rewards/ngram_repetition2/mean": 0.9199254512786865,
1361
+ "rewards/ngram_repetition2/std": 0.01465023122727871,
1362
+ "rewards/ngram_repetition3/mean": 0.9966325163841248,
1363
+ "rewards/ngram_repetition3/std": 0.0036702074576169252,
1364
+ "rewards/symbolic_reward_accuracy/mean": 0.76416015625,
1365
+ "rewards/symbolic_reward_accuracy/std": 0.4246262311935425,
1366
+ "rewards/symbolic_reward_partial_score/mean": 0.940185546875,
1367
+ "rewards/symbolic_reward_partial_score/std": 0.1274607628583908,
1368
+ "rewards/tag_count_reward/mean": 0.998046875,
1369
+ "rewards/tag_count_reward/std": 0.03119652159512043,
1370
+ "rewards/thinking_answer_ratio_reward/mean": 0.9808384776115417,
1371
+ "rewards/thinking_answer_ratio_reward/std": 0.05348685011267662,
1372
+ "sampling/importance_sampling_ratio/max": 2.0,
1373
+ "sampling/importance_sampling_ratio/mean": 1.6253031492233276,
1374
+ "sampling/importance_sampling_ratio/min": 0.0035080641973763704,
1375
+ "sampling/sampling_logp_difference/max": 5.652690887451172,
1376
+ "sampling/sampling_logp_difference/mean": 1.1522209644317627,
1377
+ "step": 128
1378
+ },
1379
+ {
1380
+ "clip_ratio/high_max": 0.0,
1381
+ "clip_ratio/high_mean": 0.0,
1382
+ "clip_ratio/low_mean": 0.32470703125,
1383
+ "clip_ratio/low_min": 0.1953125,
1384
+ "clip_ratio/region_mean": 0.32470703125,
1385
+ "completions/clipped_ratio": 0.0,
1386
+ "completions/max_length": 1373.0,
1387
+ "completions/max_terminated_length": 1373.0,
1388
+ "completions/mean_length": 918.24755859375,
1389
+ "completions/mean_terminated_length": 918.24755859375,
1390
+ "completions/min_length": 13.0,
1391
+ "completions/min_terminated_length": 13.0,
1392
+ "entropy": 4.297388672828674,
1393
+ "epoch": 1.434782608695652,
1394
+ "frac_reward_zero_std": 0.0,
1395
+ "grad_norm": 1.1936229467391968,
1396
+ "learning_rate": 1e-05,
1397
+ "loss": 0.0167,
1398
+ "num_tokens": 113647915.0,
1399
+ "reward": 3.6550018787384033,
1400
+ "reward_std": 0.12648092210292816,
1401
+ "rewards/ngram_repetition2/mean": 0.9229152798652649,
1402
+ "rewards/ngram_repetition2/std": 0.014652576297521591,
1403
+ "rewards/ngram_repetition3/mean": 0.9968780279159546,
1404
+ "rewards/ngram_repetition3/std": 0.004642123356461525,
1405
+ "rewards/symbolic_reward_accuracy/mean": 0.837890625,
1406
+ "rewards/symbolic_reward_accuracy/std": 0.3686411380767822,
1407
+ "rewards/symbolic_reward_partial_score/mean": 0.9538980722427368,
1408
+ "rewards/symbolic_reward_partial_score/std": 0.12867037951946259,
1409
+ "rewards/tag_count_reward/mean": 0.996337890625,
1410
+ "rewards/tag_count_reward/std": 0.04264424368739128,
1411
+ "rewards/thinking_answer_ratio_reward/mean": 0.9786805510520935,
1412
+ "rewards/thinking_answer_ratio_reward/std": 0.07263471931219101,
1413
+ "sampling/importance_sampling_ratio/max": 2.0,
1414
+ "sampling/importance_sampling_ratio/mean": 1.6224591732025146,
1415
+ "sampling/importance_sampling_ratio/min": 0.00012925604823976755,
1416
+ "sampling/sampling_logp_difference/max": 8.953715324401855,
1417
+ "sampling/sampling_logp_difference/mean": 1.143388032913208,
1418
+ "step": 132
1419
+ },
1420
+ {
1421
+ "clip_ratio/high_max": 0.00390625,
1422
+ "clip_ratio/high_mean": 0.00048828125,
1423
+ "clip_ratio/low_mean": 0.33984375,
1424
+ "clip_ratio/low_min": 0.22265625,
1425
+ "clip_ratio/region_mean": 0.34033203125,
1426
+ "completions/clipped_ratio": 0.0,
1427
+ "completions/max_length": 1310.0,
1428
+ "completions/max_terminated_length": 1310.0,
1429
+ "completions/mean_length": 871.56787109375,
1430
+ "completions/mean_terminated_length": 871.56787109375,
1431
+ "completions/min_length": 269.0,
1432
+ "completions/min_terminated_length": 269.0,
1433
+ "entropy": 4.470572978258133,
1434
+ "epoch": 1.4782608695652173,
1435
+ "frac_reward_zero_std": 0.0,
1436
+ "grad_norm": 0.7518815398216248,
1437
+ "learning_rate": 1e-05,
1438
+ "loss": 0.0146,
1439
+ "num_tokens": 117492310.0,
1440
+ "reward": 3.562993049621582,
1441
+ "reward_std": 0.1021745428442955,
1442
+ "rewards/ngram_repetition2/mean": 0.9367643594741821,
1443
+ "rewards/ngram_repetition2/std": 0.01393189001828432,
1444
+ "rewards/ngram_repetition3/mean": 0.9970389604568481,
1445
+ "rewards/ngram_repetition3/std": 0.0056902337819337845,
1446
+ "rewards/symbolic_reward_accuracy/mean": 0.7998046875,
1447
+ "rewards/symbolic_reward_accuracy/std": 0.4002441465854645,
1448
+ "rewards/symbolic_reward_partial_score/mean": 0.9386392831802368,
1449
+ "rewards/symbolic_reward_partial_score/std": 0.15098440647125244,
1450
+ "rewards/tag_count_reward/mean": 0.99560546875,
1451
+ "rewards/tag_count_reward/std": 0.04667995125055313,
1452
+ "rewards/thinking_answer_ratio_reward/mean": 0.980078399181366,
1453
+ "rewards/thinking_answer_ratio_reward/std": 0.04658709093928337,
1454
+ "sampling/importance_sampling_ratio/max": 2.0,
1455
+ "sampling/importance_sampling_ratio/mean": 1.610716462135315,
1456
+ "sampling/importance_sampling_ratio/min": 0.0008395969052799046,
1457
+ "sampling/sampling_logp_difference/max": 7.0825886726379395,
1458
+ "sampling/sampling_logp_difference/mean": 1.1626429557800293,
1459
+ "step": 136
1460
+ },
1461
+ {
1462
+ "clip_ratio/high_max": 0.00390625,
1463
+ "clip_ratio/high_mean": 0.00048828125,
1464
+ "clip_ratio/low_mean": 0.3232421875,
1465
+ "clip_ratio/low_min": 0.19140625,
1466
+ "clip_ratio/region_mean": 0.32373046875,
1467
+ "completions/clipped_ratio": 0.0,
1468
+ "completions/max_length": 1429.0,
1469
+ "completions/max_terminated_length": 1429.0,
1470
+ "completions/mean_length": 977.38232421875,
1471
+ "completions/mean_terminated_length": 977.38232421875,
1472
+ "completions/min_length": 74.0,
1473
+ "completions/min_terminated_length": 74.0,
1474
+ "entropy": 4.514361083507538,
1475
+ "epoch": 1.5217391304347827,
1476
+ "frac_reward_zero_std": 0.0,
1477
+ "grad_norm": 0.8003511428833008,
1478
+ "learning_rate": 1e-05,
1479
+ "loss": 0.0119,
1480
+ "num_tokens": 121528069.0,
1481
+ "reward": 3.4866585731506348,
1482
+ "reward_std": 0.09917771071195602,
1483
+ "rewards/ngram_repetition2/mean": 0.9362497925758362,
1484
+ "rewards/ngram_repetition2/std": 0.013396705500781536,
1485
+ "rewards/ngram_repetition3/mean": 0.9971665143966675,
1486
+ "rewards/ngram_repetition3/std": 0.00501285633072257,
1487
+ "rewards/symbolic_reward_accuracy/mean": 0.76416015625,
1488
+ "rewards/symbolic_reward_accuracy/std": 0.4246262311935425,
1489
+ "rewards/symbolic_reward_partial_score/mean": 0.933837890625,
1490
+ "rewards/symbolic_reward_partial_score/std": 0.15093302726745605,
1491
+ "rewards/tag_count_reward/mean": 0.995361328125,
1492
+ "rewards/tag_count_reward/std": 0.04794727638363838,
1493
+ "rewards/thinking_answer_ratio_reward/mean": 0.9805018901824951,
1494
+ "rewards/thinking_answer_ratio_reward/std": 0.06314614415168762,
1495
+ "sampling/importance_sampling_ratio/max": 2.0,
1496
+ "sampling/importance_sampling_ratio/mean": 1.6080012321472168,
1497
+ "sampling/importance_sampling_ratio/min": 0.00030363252153620124,
1498
+ "sampling/sampling_logp_difference/max": 8.099692344665527,
1499
+ "sampling/sampling_logp_difference/mean": 1.1383968591690063,
1500
+ "step": 140
1501
+ },
1502
+ {
1503
+ "clip_ratio/high_max": 0.03125,
1504
+ "clip_ratio/high_mean": 0.00830078125,
1505
+ "clip_ratio/low_mean": 0.2998046875,
1506
+ "clip_ratio/low_min": 0.16796875,
1507
+ "clip_ratio/region_mean": 0.30810546875,
1508
+ "completions/clipped_ratio": 0.0,
1509
+ "completions/max_length": 1363.0,
1510
+ "completions/max_terminated_length": 1363.0,
1511
+ "completions/mean_length": 898.19921875,
1512
+ "completions/mean_terminated_length": 898.19921875,
1513
+ "completions/min_length": 324.0,
1514
+ "completions/min_terminated_length": 324.0,
1515
+ "entropy": 4.418534815311432,
1516
+ "epoch": 1.5652173913043477,
1517
+ "frac_reward_zero_std": 0.0,
1518
+ "grad_norm": 0.44075483083724976,
1519
+ "learning_rate": 1e-05,
1520
+ "loss": 0.013,
1521
+ "num_tokens": 125398493.0,
1522
+ "reward": 3.593691825866699,
1523
+ "reward_std": 0.1295282244682312,
1524
+ "rewards/ngram_repetition2/mean": 0.9471422433853149,
1525
+ "rewards/ngram_repetition2/std": 0.012752486392855644,
1526
+ "rewards/ngram_repetition3/mean": 0.9974905252456665,
1527
+ "rewards/ngram_repetition3/std": 0.005424594972282648,
1528
+ "rewards/symbolic_reward_accuracy/mean": 0.81396484375,
1529
+ "rewards/symbolic_reward_accuracy/std": 0.38923007249832153,
1530
+ "rewards/symbolic_reward_partial_score/mean": 0.94189453125,
1531
+ "rewards/symbolic_reward_partial_score/std": 0.1490928679704666,
1532
+ "rewards/tag_count_reward/mean": 0.99462890625,
1533
+ "rewards/tag_count_reward/std": 0.051555756479501724,
1534
+ "rewards/thinking_answer_ratio_reward/mean": 0.9792462587356567,
1535
+ "rewards/thinking_answer_ratio_reward/std": 0.05640830844640732,
1536
+ "sampling/importance_sampling_ratio/max": 2.0,
1537
+ "sampling/importance_sampling_ratio/mean": 1.607062816619873,
1538
+ "sampling/importance_sampling_ratio/min": 0.0016754436073824763,
1539
+ "sampling/sampling_logp_difference/max": 6.391677379608154,
1540
+ "sampling/sampling_logp_difference/mean": 1.144271731376648,
1541
+ "step": 144
1542
+ },
1543
+ {
1544
+ "clip_ratio/high_max": 0.09375,
1545
+ "clip_ratio/high_mean": 0.0263671875,
1546
+ "clip_ratio/low_mean": 0.30810546875,
1547
+ "clip_ratio/low_min": 0.16015625,
1548
+ "clip_ratio/region_mean": 0.33447265625,
1549
+ "completions/clipped_ratio": 0.0,
1550
+ "completions/max_length": 1594.0,
1551
+ "completions/max_terminated_length": 1594.0,
1552
+ "completions/mean_length": 1067.33642578125,
1553
+ "completions/mean_terminated_length": 1067.33642578125,
1554
+ "completions/min_length": 39.0,
1555
+ "completions/min_terminated_length": 39.0,
1556
+ "entropy": 4.551530241966248,
1557
+ "epoch": 1.608695652173913,
1558
+ "frac_reward_zero_std": 0.0,
1559
+ "grad_norm": 0.3275977373123169,
1560
+ "learning_rate": 1e-05,
1561
+ "loss": 0.0142,
1562
+ "num_tokens": 129669166.0,
1563
+ "reward": 3.5267133712768555,
1564
+ "reward_std": 0.0681791827082634,
1565
+ "rewards/ngram_repetition2/mean": 0.9426812529563904,
1566
+ "rewards/ngram_repetition2/std": 0.01187061332166195,
1567
+ "rewards/ngram_repetition3/mean": 0.9978224039077759,
1568
+ "rewards/ngram_repetition3/std": 0.00368481851182878,
1569
+ "rewards/symbolic_reward_accuracy/mean": 0.77880859375,
1570
+ "rewards/symbolic_reward_accuracy/std": 0.4151504933834076,
1571
+ "rewards/symbolic_reward_partial_score/mean": 0.9430338144302368,
1572
+ "rewards/symbolic_reward_partial_score/std": 0.1309022754430771,
1573
+ "rewards/tag_count_reward/mean": 0.996826171875,
1574
+ "rewards/tag_count_reward/std": 0.03971915319561958,
1575
+ "rewards/thinking_answer_ratio_reward/mean": 0.9831109046936035,
1576
+ "rewards/thinking_answer_ratio_reward/std": 0.05386970564723015,
1577
+ "sampling/importance_sampling_ratio/max": 2.0,
1578
+ "sampling/importance_sampling_ratio/mean": 1.6074790954589844,
1579
+ "sampling/importance_sampling_ratio/min": 9.599540499038994e-05,
1580
+ "sampling/sampling_logp_difference/max": 9.25121021270752,
1581
+ "sampling/sampling_logp_difference/mean": 1.1549028158187866,
1582
+ "step": 148
1583
+ },
1584
+ {
1585
+ "clip_ratio/high_max": 0.0859375,
1586
+ "clip_ratio/high_mean": 0.029296875,
1587
+ "clip_ratio/low_mean": 0.32666015625,
1588
+ "clip_ratio/low_min": 0.1953125,
1589
+ "clip_ratio/region_mean": 0.35595703125,
1590
+ "completions/clipped_ratio": 0.0,
1591
+ "completions/max_length": 1579.0,
1592
+ "completions/max_terminated_length": 1579.0,
1593
+ "completions/mean_length": 1040.93359375,
1594
+ "completions/mean_terminated_length": 1040.93359375,
1595
+ "completions/min_length": 565.0,
1596
+ "completions/min_terminated_length": 565.0,
1597
+ "entropy": 4.479687809944153,
1598
+ "epoch": 1.6521739130434783,
1599
+ "frac_reward_zero_std": 0.0,
1600
+ "grad_norm": 0.3054217994213104,
1601
+ "learning_rate": 1e-05,
1602
+ "loss": 0.0146,
1603
+ "num_tokens": 133847750.0,
1604
+ "reward": 3.668701171875,
1605
+ "reward_std": 0.0505342110991478,
1606
+ "rewards/ngram_repetition2/mean": 0.9501175880432129,
1607
+ "rewards/ngram_repetition2/std": 0.010720828548073769,
1608
+ "rewards/ngram_repetition3/mean": 0.998192548751831,
1609
+ "rewards/ngram_repetition3/std": 0.002623009728267789,
1610
+ "rewards/symbolic_reward_accuracy/mean": 0.8369140625,
1611
+ "rewards/symbolic_reward_accuracy/std": 0.36953428387641907,
1612
+ "rewards/symbolic_reward_partial_score/mean": 0.9660237431526184,
1613
+ "rewards/symbolic_reward_partial_score/std": 0.0860089510679245,
1614
+ "rewards/tag_count_reward/mean": 0.99951171875,
1615
+ "rewards/tag_count_reward/std": 0.015621182508766651,
1616
+ "rewards/thinking_answer_ratio_reward/mean": 0.9854624271392822,
1617
+ "rewards/thinking_answer_ratio_reward/std": 0.02365659922361374,
1618
+ "sampling/importance_sampling_ratio/max": 2.0,
1619
+ "sampling/importance_sampling_ratio/mean": 1.607342004776001,
1620
+ "sampling/importance_sampling_ratio/min": 2.2357276975526474e-05,
1621
+ "sampling/sampling_logp_difference/max": 10.708358764648438,
1622
+ "sampling/sampling_logp_difference/mean": 1.1617491245269775,
1623
+ "step": 152
1624
+ },
1625
+ {
1626
+ "clip_ratio/high_max": 0.1484375,
1627
+ "clip_ratio/high_mean": 0.0595703125,
1628
+ "clip_ratio/low_mean": 0.2744140625,
1629
+ "clip_ratio/low_min": 0.16015625,
1630
+ "clip_ratio/region_mean": 0.333984375,
1631
+ "completions/clipped_ratio": 0.0,
1632
+ "completions/max_length": 1557.0,
1633
+ "completions/max_terminated_length": 1557.0,
1634
+ "completions/mean_length": 1038.66357421875,
1635
+ "completions/mean_terminated_length": 1038.66357421875,
1636
+ "completions/min_length": 41.0,
1637
+ "completions/min_terminated_length": 41.0,
1638
+ "entropy": 4.511291533708572,
1639
+ "epoch": 1.6956521739130435,
1640
+ "frac_reward_zero_std": 0.0,
1641
+ "grad_norm": 0.3336785137653351,
1642
+ "learning_rate": 1e-05,
1643
+ "loss": 0.0107,
1644
+ "num_tokens": 138043861.0,
1645
+ "reward": 3.3625810146331787,
1646
+ "reward_std": 0.14225080609321594,
1647
+ "rewards/ngram_repetition2/mean": 0.9543116092681885,
1648
+ "rewards/ngram_repetition2/std": 0.01117774099111557,
1649
+ "rewards/ngram_repetition3/mean": 0.998005211353302,
1650
+ "rewards/ngram_repetition3/std": 0.004470388405025005,
1651
+ "rewards/symbolic_reward_accuracy/mean": 0.7109375,
1652
+ "rewards/symbolic_reward_accuracy/std": 0.4534377157688141,
1653
+ "rewards/symbolic_reward_partial_score/mean": 0.916015625,
1654
+ "rewards/symbolic_reward_partial_score/std": 0.16625778377056122,
1655
+ "rewards/tag_count_reward/mean": 0.995361328125,
1656
+ "rewards/tag_count_reward/std": 0.04794727638363838,
1657
+ "rewards/thinking_answer_ratio_reward/mean": 0.9806008338928223,
1658
+ "rewards/thinking_answer_ratio_reward/std": 0.06250394135713577,
1659
+ "sampling/importance_sampling_ratio/max": 2.0,
1660
+ "sampling/importance_sampling_ratio/mean": 1.6025564670562744,
1661
+ "sampling/importance_sampling_ratio/min": 5.173232784727588e-05,
1662
+ "sampling/sampling_logp_difference/max": 9.869427680969238,
1663
+ "sampling/sampling_logp_difference/mean": 1.1629631519317627,
1664
+ "step": 156
1665
+ },
1666
+ {
1667
+ "clip_ratio/high_max": 0.109375,
1668
+ "clip_ratio/high_mean": 0.04296875,
1669
+ "clip_ratio/low_mean": 0.2861328125,
1670
+ "clip_ratio/low_min": 0.14453125,
1671
+ "clip_ratio/region_mean": 0.3291015625,
1672
+ "completions/clipped_ratio": 0.0,
1673
+ "completions/max_length": 1716.0,
1674
+ "completions/max_terminated_length": 1716.0,
1675
+ "completions/mean_length": 1049.35009765625,
1676
+ "completions/mean_terminated_length": 1049.35009765625,
1677
+ "completions/min_length": 10.0,
1678
+ "completions/min_terminated_length": 10.0,
1679
+ "entropy": 4.4311443865299225,
1680
+ "epoch": 1.7391304347826086,
1681
+ "frac_reward_zero_std": 0.0,
1682
+ "grad_norm": 0.22241854667663574,
1683
+ "learning_rate": 1e-05,
1684
+ "loss": 0.012,
1685
+ "num_tokens": 142239682.0,
1686
+ "reward": 3.6017355918884277,
1687
+ "reward_std": 0.12942886352539062,
1688
+ "rewards/ngram_repetition2/mean": 0.9581420421600342,
1689
+ "rewards/ngram_repetition2/std": 0.01057825330644846,
1690
+ "rewards/ngram_repetition3/mean": 0.9981793165206909,
1691
+ "rewards/ngram_repetition3/std": 0.004097526893019676,
1692
+ "rewards/symbolic_reward_accuracy/mean": 0.81884765625,
1693
+ "rewards/symbolic_reward_accuracy/std": 0.3852384090423584,
1694
+ "rewards/symbolic_reward_partial_score/mean": 0.9380696415901184,
1695
+ "rewards/symbolic_reward_partial_score/std": 0.16267931461334229,
1696
+ "rewards/tag_count_reward/mean": 0.99658203125,
1697
+ "rewards/tag_count_reward/std": 0.04120838642120361,
1698
+ "rewards/thinking_answer_ratio_reward/mean": 0.9825441837310791,
1699
+ "rewards/thinking_answer_ratio_reward/std": 0.04960091784596443,
1700
+ "sampling/importance_sampling_ratio/max": 2.0,
1701
+ "sampling/importance_sampling_ratio/mean": 1.6059880256652832,
1702
+ "sampling/importance_sampling_ratio/min": 0.0016915490850806236,
1703
+ "sampling/sampling_logp_difference/max": 6.382110595703125,
1704
+ "sampling/sampling_logp_difference/mean": 1.1938023567199707,
1705
+ "step": 160
1706
+ },
1707
+ {
1708
+ "clip_ratio/high_max": 0.10546875,
1709
+ "clip_ratio/high_mean": 0.046875,
1710
+ "clip_ratio/low_mean": 0.2822265625,
1711
+ "clip_ratio/low_min": 0.15234375,
1712
+ "clip_ratio/region_mean": 0.3291015625,
1713
+ "completions/clipped_ratio": 0.0,
1714
+ "completions/max_length": 1622.0,
1715
+ "completions/max_terminated_length": 1622.0,
1716
+ "completions/mean_length": 1033.77587890625,
1717
+ "completions/mean_terminated_length": 1033.77587890625,
1718
+ "completions/min_length": 250.0,
1719
+ "completions/min_terminated_length": 250.0,
1720
+ "entropy": 4.365045428276062,
1721
+ "epoch": 1.7826086956521738,
1722
+ "frac_reward_zero_std": 0.0,
1723
+ "grad_norm": 0.21216052770614624,
1724
+ "learning_rate": 1e-05,
1725
+ "loss": 0.0114,
1726
+ "num_tokens": 146419447.0,
1727
+ "reward": 3.5902209281921387,
1728
+ "reward_std": 0.1201181709766388,
1729
+ "rewards/ngram_repetition2/mean": 0.9616591930389404,
1730
+ "rewards/ngram_repetition2/std": 0.010357243940234184,
1731
+ "rewards/ngram_repetition3/mean": 0.9983977675437927,
1732
+ "rewards/ngram_repetition3/std": 0.004474774468690157,
1733
+ "rewards/symbolic_reward_accuracy/mean": 0.8076171875,
1734
+ "rewards/symbolic_reward_accuracy/std": 0.3942683935165405,
1735
+ "rewards/symbolic_reward_partial_score/mean": 0.948486328125,
1736
+ "rewards/symbolic_reward_partial_score/std": 0.12491386383771896,
1737
+ "rewards/tag_count_reward/mean": 0.9970703125,
1738
+ "rewards/tag_count_reward/std": 0.03817030414938927,
1739
+ "rewards/thinking_answer_ratio_reward/mean": 0.9829385876655579,
1740
+ "rewards/thinking_answer_ratio_reward/std": 0.04094744101166725,
1741
+ "sampling/importance_sampling_ratio/max": 2.0,
1742
+ "sampling/importance_sampling_ratio/mean": 1.609969139099121,
1743
+ "sampling/importance_sampling_ratio/min": 3.6088283650315134e-06,
1744
+ "sampling/sampling_logp_difference/max": 12.532127380371094,
1745
+ "sampling/sampling_logp_difference/mean": 1.2227305173873901,
1746
+ "step": 164
1747
+ },
1748
+ {
1749
+ "clip_ratio/high_max": 0.09765625,
1750
+ "clip_ratio/high_mean": 0.0400390625,
1751
+ "clip_ratio/low_mean": 0.26953125,
1752
+ "clip_ratio/low_min": 0.15625,
1753
+ "clip_ratio/region_mean": 0.3095703125,
1754
+ "completions/clipped_ratio": 0.0,
1755
+ "completions/max_length": 1699.0,
1756
+ "completions/max_terminated_length": 1699.0,
1757
+ "completions/mean_length": 1053.5283203125,
1758
+ "completions/mean_terminated_length": 1053.5283203125,
1759
+ "completions/min_length": 42.0,
1760
+ "completions/min_terminated_length": 42.0,
1761
+ "entropy": 4.248802453279495,
1762
+ "epoch": 1.8260869565217392,
1763
+ "frac_reward_zero_std": 0.0,
1764
+ "grad_norm": 0.31685301661491394,
1765
+ "learning_rate": 1e-05,
1766
+ "loss": 0.0072,
1767
+ "num_tokens": 150655505.0,
1768
+ "reward": 3.371926784515381,
1769
+ "reward_std": 0.20888543128967285,
1770
+ "rewards/ngram_repetition2/mean": 0.9610307216644287,
1771
+ "rewards/ngram_repetition2/std": 0.011032468639314175,
1772
+ "rewards/ngram_repetition3/mean": 0.9981548190116882,
1773
+ "rewards/ngram_repetition3/std": 0.005917286965996027,
1774
+ "rewards/symbolic_reward_accuracy/mean": 0.71337890625,
1775
+ "rewards/symbolic_reward_accuracy/std": 0.45229339599609375,
1776
+ "rewards/symbolic_reward_partial_score/mean": 0.919189453125,
1777
+ "rewards/symbolic_reward_partial_score/std": 0.157909095287323,
1778
+ "rewards/tag_count_reward/mean": 0.99658203125,
1779
+ "rewards/tag_count_reward/std": 0.04120838642120361,
1780
+ "rewards/thinking_answer_ratio_reward/mean": 0.9805506467819214,
1781
+ "rewards/thinking_answer_ratio_reward/std": 0.06105503812432289,
1782
+ "sampling/importance_sampling_ratio/max": 2.0,
1783
+ "sampling/importance_sampling_ratio/mean": 1.6129553318023682,
1784
+ "sampling/importance_sampling_ratio/min": 5.888082341698464e-07,
1785
+ "sampling/sampling_logp_difference/max": 14.345165252685547,
1786
+ "sampling/sampling_logp_difference/mean": 1.2391889095306396,
1787
+ "step": 168
1788
+ },
1789
+ {
1790
+ "clip_ratio/high_max": 0.0625,
1791
+ "clip_ratio/high_mean": 0.02001953125,
1792
+ "clip_ratio/low_mean": 0.32177734375,
1793
+ "clip_ratio/low_min": 0.19140625,
1794
+ "clip_ratio/region_mean": 0.341796875,
1795
+ "completions/clipped_ratio": 0.0,
1796
+ "completions/max_length": 1696.0,
1797
+ "completions/max_terminated_length": 1696.0,
1798
+ "completions/mean_length": 1089.537109375,
1799
+ "completions/mean_terminated_length": 1089.537109375,
1800
+ "completions/min_length": 465.0,
1801
+ "completions/min_terminated_length": 465.0,
1802
+ "entropy": 4.472538262605667,
1803
+ "epoch": 1.8695652173913042,
1804
+ "frac_reward_zero_std": 0.0,
1805
+ "grad_norm": 0.8991731405258179,
1806
+ "learning_rate": 1e-05,
1807
+ "loss": 0.009,
1808
+ "num_tokens": 154917789.0,
1809
+ "reward": 3.514044761657715,
1810
+ "reward_std": 0.12172482162714005,
1811
+ "rewards/ngram_repetition2/mean": 0.9645071029663086,
1812
+ "rewards/ngram_repetition2/std": 0.009678427129983902,
1813
+ "rewards/ngram_repetition3/mean": 0.9983615875244141,
1814
+ "rewards/ngram_repetition3/std": 0.004190252162516117,
1815
+ "rewards/symbolic_reward_accuracy/mean": 0.77392578125,
1816
+ "rewards/symbolic_reward_accuracy/std": 0.4183899462223053,
1817
+ "rewards/symbolic_reward_partial_score/mean": 0.9379475712776184,
1818
+ "rewards/symbolic_reward_partial_score/std": 0.13337303698062897,
1819
+ "rewards/tag_count_reward/mean": 0.998779296875,
1820
+ "rewards/tag_count_reward/std": 0.02468114346265793,
1821
+ "rewards/thinking_answer_ratio_reward/mean": 0.9837693572044373,
1822
+ "rewards/thinking_answer_ratio_reward/std": 0.0436534620821476,
1823
+ "sampling/importance_sampling_ratio/max": 2.0,
1824
+ "sampling/importance_sampling_ratio/mean": 1.6039378643035889,
1825
+ "sampling/importance_sampling_ratio/min": 0.00011818981874966994,
1826
+ "sampling/sampling_logp_difference/max": 9.043218612670898,
1827
+ "sampling/sampling_logp_difference/mean": 1.231788158416748,
1828
+ "step": 172
1829
+ },
1830
+ {
1831
+ "clip_ratio/high_max": 0.0,
1832
+ "clip_ratio/high_mean": 0.0,
1833
+ "clip_ratio/low_mean": 0.3046875,
1834
+ "clip_ratio/low_min": 0.1875,
1835
+ "clip_ratio/region_mean": 0.3046875,
1836
+ "completions/clipped_ratio": 0.0,
1837
+ "completions/max_length": 1795.0,
1838
+ "completions/max_terminated_length": 1795.0,
1839
+ "completions/mean_length": 1130.46533203125,
1840
+ "completions/mean_terminated_length": 1130.46533203125,
1841
+ "completions/min_length": 240.0,
1842
+ "completions/min_terminated_length": 240.0,
1843
+ "entropy": 4.910979479551315,
1844
+ "epoch": 1.9130434782608696,
1845
+ "frac_reward_zero_std": 0.0,
1846
+ "grad_norm": 3.6889185905456543,
1847
+ "learning_rate": 1e-05,
1848
+ "loss": 0.0104,
1849
+ "num_tokens": 159301910.0,
1850
+ "reward": 3.603275775909424,
1851
+ "reward_std": 0.09857556223869324,
1852
+ "rewards/ngram_repetition2/mean": 0.9671369791030884,
1853
+ "rewards/ngram_repetition2/std": 0.009682528674602509,
1854
+ "rewards/ngram_repetition3/mean": 0.9982260465621948,
1855
+ "rewards/ngram_repetition3/std": 0.005538651719689369,
1856
+ "rewards/symbolic_reward_accuracy/mean": 0.8125,
1857
+ "rewards/symbolic_reward_accuracy/std": 0.3904076814651489,
1858
+ "rewards/symbolic_reward_partial_score/mean": 0.9497476816177368,
1859
+ "rewards/symbolic_reward_partial_score/std": 0.12718616425991058,
1860
+ "rewards/tag_count_reward/mean": 0.9990234375,
1861
+ "rewards/tag_count_reward/std": 0.022080888971686363,
1862
+ "rewards/thinking_answer_ratio_reward/mean": 0.9851157665252686,
1863
+ "rewards/thinking_answer_ratio_reward/std": 0.04367439076304436,
1864
+ "sampling/importance_sampling_ratio/max": 2.0,
1865
+ "sampling/importance_sampling_ratio/mean": 1.588046669960022,
1866
+ "sampling/importance_sampling_ratio/min": 0.0005490838666446507,
1867
+ "sampling/sampling_logp_difference/max": 7.507259368896484,
1868
+ "sampling/sampling_logp_difference/mean": 1.1903247833251953,
1869
+ "step": 176
1870
+ },
1871
+ {
1872
+ "clip_ratio/high_max": 0.03125,
1873
+ "clip_ratio/high_mean": 0.0068359375,
1874
+ "clip_ratio/low_mean": 0.32373046875,
1875
+ "clip_ratio/low_min": 0.203125,
1876
+ "clip_ratio/region_mean": 0.33056640625,
1877
+ "completions/clipped_ratio": 0.0,
1878
+ "completions/max_length": 1933.0,
1879
+ "completions/max_terminated_length": 1933.0,
1880
+ "completions/mean_length": 1252.52880859375,
1881
+ "completions/mean_terminated_length": 1252.52880859375,
1882
+ "completions/min_length": 297.0,
1883
+ "completions/min_terminated_length": 297.0,
1884
+ "entropy": 4.105740413069725,
1885
+ "epoch": 1.9565217391304348,
1886
+ "frac_reward_zero_std": 0.0,
1887
+ "grad_norm": 1.827364206314087,
1888
+ "learning_rate": 1e-05,
1889
+ "loss": 0.019,
1890
+ "num_tokens": 163936017.0,
1891
+ "reward": 3.5113019943237305,
1892
+ "reward_std": 0.056809887290000916,
1893
+ "rewards/ngram_repetition2/mean": 0.9607850313186646,
1894
+ "rewards/ngram_repetition2/std": 0.010354345664381981,
1895
+ "rewards/ngram_repetition3/mean": 0.9982134699821472,
1896
+ "rewards/ngram_repetition3/std": 0.004524180665612221,
1897
+ "rewards/symbolic_reward_accuracy/mean": 0.76708984375,
1898
+ "rewards/symbolic_reward_accuracy/std": 0.42278870940208435,
1899
+ "rewards/symbolic_reward_partial_score/mean": 0.9481608271598816,
1900
+ "rewards/symbolic_reward_partial_score/std": 0.1097652018070221,
1901
+ "rewards/tag_count_reward/mean": 0.99951171875,
1902
+ "rewards/tag_count_reward/std": 0.015621182508766651,
1903
+ "rewards/thinking_answer_ratio_reward/mean": 0.9859845638275146,
1904
+ "rewards/thinking_answer_ratio_reward/std": 0.025435911491513252,
1905
+ "sampling/importance_sampling_ratio/max": 2.0,
1906
+ "sampling/importance_sampling_ratio/mean": 1.619266390800476,
1907
+ "sampling/importance_sampling_ratio/min": 0.00031076392042450607,
1908
+ "sampling/sampling_logp_difference/max": 8.07647705078125,
1909
+ "sampling/sampling_logp_difference/mean": 1.2865667343139648,
1910
+ "step": 180
1911
+ },
1912
+ {
1913
+ "clip_ratio/high_max": 0.03125,
1914
+ "clip_ratio/high_mean": 0.00830078125,
1915
+ "clip_ratio/low_mean": 0.33154296875,
1916
+ "clip_ratio/low_min": 0.19921875,
1917
+ "clip_ratio/region_mean": 0.33984375,
1918
+ "completions/clipped_ratio": 0.0,
1919
+ "completions/max_length": 2023.0,
1920
+ "completions/max_terminated_length": 2023.0,
1921
+ "completions/mean_length": 1276.42138671875,
1922
+ "completions/mean_terminated_length": 1276.42138671875,
1923
+ "completions/min_length": 565.0,
1924
+ "completions/min_terminated_length": 565.0,
1925
+ "entropy": 4.40667536854744,
1926
+ "epoch": 2.0,
1927
+ "frac_reward_zero_std": 0.0,
1928
+ "grad_norm": 1.1742569208145142,
1929
+ "learning_rate": 1e-05,
1930
+ "loss": 0.007,
1931
+ "num_tokens": 168609552.0,
1932
+ "reward": 3.480891227722168,
1933
+ "reward_std": 0.0965752899646759,
1934
+ "rewards/ngram_repetition2/mean": 0.9636451005935669,
1935
+ "rewards/ngram_repetition2/std": 0.009043324738740921,
1936
+ "rewards/ngram_repetition3/mean": 0.9984027147293091,
1937
+ "rewards/ngram_repetition3/std": 0.003917085938155651,
1938
+ "rewards/symbolic_reward_accuracy/mean": 0.76318359375,
1939
+ "rewards/symbolic_reward_accuracy/std": 0.42523249983787537,
1940
+ "rewards/symbolic_reward_partial_score/mean": 0.926025390625,
1941
+ "rewards/symbolic_reward_partial_score/std": 0.15876662731170654,
1942
+ "rewards/tag_count_reward/mean": 0.9990234375,
1943
+ "rewards/tag_count_reward/std": 0.022080888971686363,
1944
+ "rewards/thinking_answer_ratio_reward/mean": 0.9854665398597717,
1945
+ "rewards/thinking_answer_ratio_reward/std": 0.04371679201722145,
1946
+ "sampling/importance_sampling_ratio/max": 2.0,
1947
+ "sampling/importance_sampling_ratio/mean": 1.607491374015808,
1948
+ "sampling/importance_sampling_ratio/min": 1.9882392621184408e-07,
1949
+ "sampling/sampling_logp_difference/max": 15.430846214294434,
1950
+ "sampling/sampling_logp_difference/mean": 1.2338640689849854,
1951
+ "step": 184
1952
+ },
1953
+ {
1954
+ "epoch": 2.0,
1955
+ "eval_clip_ratio/high_max": 0.0,
1956
+ "eval_clip_ratio/high_mean": 0.0,
1957
+ "eval_clip_ratio/low_mean": 0.0,
1958
+ "eval_clip_ratio/low_min": 0.0,
1959
+ "eval_clip_ratio/region_mean": 0.0,
1960
+ "eval_completions/clipped_ratio": 0.0,
1961
+ "eval_completions/max_length": 2041.3684210526317,
1962
+ "eval_completions/max_terminated_length": 2041.3684210526317,
1963
+ "eval_completions/mean_length": 1445.561677631579,
1964
+ "eval_completions/mean_terminated_length": 1445.561677631579,
1965
+ "eval_completions/min_length": 811.421052631579,
1966
+ "eval_completions/min_terminated_length": 811.421052631579,
1967
+ "eval_entropy": 4.193256829914294,
1968
+ "eval_frac_reward_zero_std": 0.0,
1969
+ "eval_loss": 0.003358551999554038,
1970
+ "eval_num_tokens": 168609552.0,
1971
+ "eval_reward": 3.548053026199341,
1972
+ "eval_reward_std": 0.11297402592754224,
1973
+ "eval_rewards/ngram_repetition2/mean": 0.9634329080581665,
1974
+ "eval_rewards/ngram_repetition2/std": 0.008954386226832867,
1975
+ "eval_rewards/ngram_repetition3/mean": 0.9986425355861062,
1976
+ "eval_rewards/ngram_repetition3/std": 0.0030909376218914986,
1977
+ "eval_rewards/symbolic_reward_accuracy/mean": 0.790296052631579,
1978
+ "eval_rewards/symbolic_reward_accuracy/std": 0.33185332699825887,
1979
+ "eval_rewards/symbolic_reward_partial_score/mean": 0.9396244507086905,
1980
+ "eval_rewards/symbolic_reward_partial_score/std": 0.10662586594882764,
1981
+ "eval_rewards/tag_count_reward/mean": 0.9983552631578947,
1982
+ "eval_rewards/tag_count_reward/std": 0.015857031078715073,
1983
+ "eval_rewards/thinking_answer_ratio_reward/mean": 0.986048914884266,
1984
+ "eval_rewards/thinking_answer_ratio_reward/std": 0.025907071926140862,
1985
+ "eval_runtime": 885.1922,
1986
+ "eval_samples_per_second": 0.169,
1987
+ "eval_sampling/importance_sampling_ratio/max": 2.0,
1988
+ "eval_sampling/importance_sampling_ratio/mean": 1.6087917842363055,
1989
+ "eval_sampling/importance_sampling_ratio/min": 0.0033183323022584724,
1990
+ "eval_sampling/sampling_logp_difference/max": 6.073739528656006,
1991
+ "eval_sampling/sampling_logp_difference/mean": 1.240292894212823,
1992
+ "eval_steps_per_second": 0.002,
1993
+ "step": 184
1994
+ },
1995
+ {
1996
+ "epoch": 2.0,
1997
+ "step": 184,
1998
+ "total_flos": 0.0,
1999
+ "train_loss": 0.0,
2000
+ "train_runtime": 2.3434,
2001
+ "train_samples_per_second": 2560.359,
2002
+ "train_steps_per_second": 78.518
2003
+ }
2004
+ ],
2005
+ "logging_steps": 4,
2006
+ "max_steps": 184,
2007
+ "num_input_tokens_seen": 168609552,
2008
+ "num_train_epochs": 2,
2009
+ "save_steps": 500,
2010
+ "stateful_callbacks": {
2011
+ "TrainerControl": {
2012
+ "args": {
2013
+ "should_epoch_stop": false,
2014
+ "should_evaluate": false,
2015
+ "should_log": false,
2016
+ "should_save": true,
2017
+ "should_training_stop": true
2018
+ },
2019
+ "attributes": {}
2020
+ }
2021
+ },
2022
+ "total_flos": 0.0,
2023
+ "train_batch_size": 16,
2024
+ "trial_name": null,
2025
+ "trial_params": null
2026
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71f5640fe4ef5d58c11a6b709b1604b2ee41366c015db1bbc7c9a5c47f6ce303
3
+ size 11665