Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- added_tokens.json +24 -0
- all_results.json +8 -0
- config.json +30 -0
- generation_config.json +6 -0
- merges.txt +0 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +346 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +208 -0
- train_results.json +8 -0
- trainer_state.json +1162 -0
- training_args.bin +3 -0
- vocab.json +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<tool_call>": 151657,
|
| 4 |
+
"<|box_end|>": 151649,
|
| 5 |
+
"<|box_start|>": 151648,
|
| 6 |
+
"<|endoftext|>": 151643,
|
| 7 |
+
"<|file_sep|>": 151664,
|
| 8 |
+
"<|fim_middle|>": 151660,
|
| 9 |
+
"<|fim_pad|>": 151662,
|
| 10 |
+
"<|fim_prefix|>": 151659,
|
| 11 |
+
"<|fim_suffix|>": 151661,
|
| 12 |
+
"<|im_end|>": 151645,
|
| 13 |
+
"<|im_start|>": 151644,
|
| 14 |
+
"<|image_pad|>": 151655,
|
| 15 |
+
"<|object_ref_end|>": 151647,
|
| 16 |
+
"<|object_ref_start|>": 151646,
|
| 17 |
+
"<|quad_end|>": 151651,
|
| 18 |
+
"<|quad_start|>": 151650,
|
| 19 |
+
"<|repo_name|>": 151663,
|
| 20 |
+
"<|video_pad|>": 151656,
|
| 21 |
+
"<|vision_end|>": 151653,
|
| 22 |
+
"<|vision_pad|>": 151654,
|
| 23 |
+
"<|vision_start|>": 151652
|
| 24 |
+
}
|
all_results.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"total_flos": 0.0,
|
| 3 |
+
"train_loss": 0.10659663751721382,
|
| 4 |
+
"train_runtime": 145281.9904,
|
| 5 |
+
"train_samples": 631,
|
| 6 |
+
"train_samples_per_second": 0.087,
|
| 7 |
+
"train_steps_per_second": 0.001
|
| 8 |
+
}
|
config.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "/work/home/liuweichu/Qwen2.5-Math-7B",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"Qwen2ForCausalLM"
|
| 5 |
+
],
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 151643,
|
| 8 |
+
"eos_token_id": 151643,
|
| 9 |
+
"hidden_act": "silu",
|
| 10 |
+
"hidden_size": 3584,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"intermediate_size": 18944,
|
| 13 |
+
"max_position_embeddings": 4096,
|
| 14 |
+
"max_window_layers": 28,
|
| 15 |
+
"model_type": "qwen2",
|
| 16 |
+
"num_attention_heads": 28,
|
| 17 |
+
"num_hidden_layers": 28,
|
| 18 |
+
"num_key_value_heads": 4,
|
| 19 |
+
"rms_norm_eps": 1e-06,
|
| 20 |
+
"rope_scaling": null,
|
| 21 |
+
"rope_theta": 10000,
|
| 22 |
+
"sliding_window": 4096,
|
| 23 |
+
"tie_word_embeddings": false,
|
| 24 |
+
"torch_dtype": "bfloat16",
|
| 25 |
+
"transformers_version": "4.49.0",
|
| 26 |
+
"use_cache": true,
|
| 27 |
+
"use_mrope": false,
|
| 28 |
+
"use_sliding_window": false,
|
| 29 |
+
"vocab_size": 152064
|
| 30 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"eos_token_id": 151643,
|
| 4 |
+
"max_new_tokens": 2048,
|
| 5 |
+
"transformers_version": "4.49.0"
|
| 6 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model-00001-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:50f97bfc1069218cd448e227bccfe082d45630af674d68df8d555374ac742156
|
| 3 |
+
size 4877660776
|
model-00002-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e2c644185c03b21bbcd9aaafdfb2345e83b5948cd5997992fdc97c5f0123689
|
| 3 |
+
size 4932751008
|
model-00003-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:65434840cbdc19add497f8260a95fcf19d46ed88df25a2f647f717da80956422
|
| 3 |
+
size 4330865200
|
model-00004-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b5d61644122f0f277b1cc88e292b052cb750506cd75abde97b08e2c59d92c8f8
|
| 3 |
+
size 1089994880
|
model.safetensors.index.json
ADDED
|
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 15231233024
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"lm_head.weight": "model-00004-of-00004.safetensors",
|
| 7 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
| 8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 13 |
+
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 14 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 15 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 16 |
+
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 17 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 18 |
+
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 19 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 20 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 21 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 22 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 23 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 24 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 25 |
+
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 26 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 27 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 28 |
+
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 29 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 30 |
+
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 31 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 32 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 33 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 34 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 35 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 36 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 37 |
+
"model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 38 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 39 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 40 |
+
"model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 41 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 42 |
+
"model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 43 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 44 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 45 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 46 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 47 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 48 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 49 |
+
"model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 50 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 51 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 52 |
+
"model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 53 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 54 |
+
"model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 55 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 56 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 57 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 58 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 59 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 60 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 61 |
+
"model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 62 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 63 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 64 |
+
"model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 65 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 66 |
+
"model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 67 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 68 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 69 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 70 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 71 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 72 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 73 |
+
"model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 74 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 75 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 76 |
+
"model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 77 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 78 |
+
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 79 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 80 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 81 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 82 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 83 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 84 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 85 |
+
"model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 86 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 87 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 88 |
+
"model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 89 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 90 |
+
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 91 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 92 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 93 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 94 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 95 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 96 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 97 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 98 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 99 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 100 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 101 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 102 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 103 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 104 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 105 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 106 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 107 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 108 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 109 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 110 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 111 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 112 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 113 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 114 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 115 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 116 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 117 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 118 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 119 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 120 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 121 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 122 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 123 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 124 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 125 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 126 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 127 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 128 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 129 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 130 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 131 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 132 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 133 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 134 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 135 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 136 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 137 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 138 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 139 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 140 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 141 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 142 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 143 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 144 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 145 |
+
"model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 146 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 147 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 148 |
+
"model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 149 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 150 |
+
"model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 151 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 152 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 153 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 154 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 155 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 156 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 157 |
+
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 158 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 159 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 160 |
+
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 161 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 162 |
+
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 163 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 164 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 165 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 166 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 167 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 168 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 169 |
+
"model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 170 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 171 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 172 |
+
"model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 173 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 174 |
+
"model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 175 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 176 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 177 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 178 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 179 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 180 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 181 |
+
"model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 182 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 183 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 184 |
+
"model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 185 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 186 |
+
"model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 187 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 188 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 189 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 190 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 191 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 192 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 193 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 194 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 195 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 196 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 197 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 198 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 199 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 200 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 201 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 202 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 203 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 204 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 205 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 206 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 207 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 208 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 209 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 210 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 211 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 212 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 213 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 214 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 215 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 216 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 217 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 218 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 219 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 220 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 221 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 222 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 223 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 224 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 225 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 226 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 227 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 228 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 229 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 230 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 231 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 232 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 233 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 234 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 235 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 236 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 237 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 238 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 239 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 240 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 241 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 242 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 243 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 244 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 245 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 246 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 247 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 248 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 249 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 250 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 251 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 252 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 253 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 254 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 255 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 256 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 257 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 258 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 259 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 260 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 261 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 262 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 263 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 264 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 265 |
+
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 266 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 267 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 268 |
+
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 269 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 270 |
+
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 271 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 272 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 273 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 274 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 275 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 276 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 277 |
+
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 278 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 279 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 280 |
+
"model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 281 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 282 |
+
"model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 283 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 284 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 285 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 286 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 287 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 288 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 289 |
+
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 290 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 291 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 292 |
+
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 293 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 294 |
+
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 295 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 296 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 297 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 298 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 299 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 300 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 301 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 302 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 303 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 304 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 305 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 306 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 307 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 308 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 309 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 310 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 311 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 312 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 313 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 314 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 315 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 316 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 317 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 318 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 319 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 320 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 321 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 322 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 323 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 324 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 325 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 326 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 327 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 328 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 329 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 330 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 331 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 332 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 333 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 334 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 335 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 336 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 337 |
+
"model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 338 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 339 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 340 |
+
"model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 341 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 342 |
+
"model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 343 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 344 |
+
"model.norm.weight": "model-00003-of-00004.safetensors"
|
| 345 |
+
}
|
| 346 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|endoftext|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5eee858c5123a4279c3e1f7b81247343f356ac767940b2692a928ad929543214
|
| 3 |
+
size 11422063
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
}
|
| 181 |
+
},
|
| 182 |
+
"additional_special_tokens": [
|
| 183 |
+
"<|im_start|>",
|
| 184 |
+
"<|im_end|>",
|
| 185 |
+
"<|object_ref_start|>",
|
| 186 |
+
"<|object_ref_end|>",
|
| 187 |
+
"<|box_start|>",
|
| 188 |
+
"<|box_end|>",
|
| 189 |
+
"<|quad_start|>",
|
| 190 |
+
"<|quad_end|>",
|
| 191 |
+
"<|vision_start|>",
|
| 192 |
+
"<|vision_end|>",
|
| 193 |
+
"<|vision_pad|>",
|
| 194 |
+
"<|image_pad|>",
|
| 195 |
+
"<|video_pad|>"
|
| 196 |
+
],
|
| 197 |
+
"bos_token": null,
|
| 198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'Please reason step by step, and put your final answer within \\\\boxed{}.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nPlease reason step by step, and put your final answer within \\\\boxed{}.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
| 199 |
+
"clean_up_tokenization_spaces": false,
|
| 200 |
+
"eos_token": "<|endoftext|>",
|
| 201 |
+
"errors": "replace",
|
| 202 |
+
"extra_special_tokens": {},
|
| 203 |
+
"model_max_length": 131072,
|
| 204 |
+
"pad_token": "<|endoftext|>",
|
| 205 |
+
"split_special_tokens": false,
|
| 206 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 207 |
+
"unk_token": null
|
| 208 |
+
}
|
train_results.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"total_flos": 0.0,
|
| 3 |
+
"train_loss": 0.10659663751721382,
|
| 4 |
+
"train_runtime": 145281.9904,
|
| 5 |
+
"train_samples": 631,
|
| 6 |
+
"train_samples_per_second": 0.087,
|
| 7 |
+
"train_steps_per_second": 0.001
|
| 8 |
+
}
|
trainer_state.json
ADDED
|
@@ -0,0 +1,1162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": null,
|
| 3 |
+
"best_model_checkpoint": null,
|
| 4 |
+
"epoch": 19.810126582278482,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 80,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"clip_ratio": 0.0,
|
| 13 |
+
"completion_length": 1317.9442443847656,
|
| 14 |
+
"epoch": 0.20253164556962025,
|
| 15 |
+
"grad_norm": 0.15780125558376312,
|
| 16 |
+
"kl": 0.0,
|
| 17 |
+
"learning_rate": 3.75e-07,
|
| 18 |
+
"loss": 0.0571,
|
| 19 |
+
"reward": 0.24441965110599995,
|
| 20 |
+
"reward_std": 0.26795641146600246,
|
| 21 |
+
"rewards/accuracy_reward": 0.20312500791624188,
|
| 22 |
+
"rewards/format_reward": 0.04129464481957257,
|
| 23 |
+
"step": 1
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"clip_ratio": 0.0,
|
| 27 |
+
"completion_length": 1309.1719398498535,
|
| 28 |
+
"epoch": 0.4050632911392405,
|
| 29 |
+
"grad_norm": 0.14830459654331207,
|
| 30 |
+
"kl": 0.0,
|
| 31 |
+
"learning_rate": 7.5e-07,
|
| 32 |
+
"loss": 0.0497,
|
| 33 |
+
"reward": 0.20312500861473382,
|
| 34 |
+
"reward_std": 0.2503739036619663,
|
| 35 |
+
"rewards/accuracy_reward": 0.1551339355064556,
|
| 36 |
+
"rewards/format_reward": 0.047991073690354824,
|
| 37 |
+
"step": 2
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"clip_ratio": 0.0,
|
| 41 |
+
"completion_length": 1356.9543018341064,
|
| 42 |
+
"epoch": 0.6075949367088608,
|
| 43 |
+
"grad_norm": 0.12031414359807968,
|
| 44 |
+
"kl": 0.00023540854454040527,
|
| 45 |
+
"learning_rate": 1.125e-06,
|
| 46 |
+
"loss": 0.0485,
|
| 47 |
+
"reward": 0.19642858079168946,
|
| 48 |
+
"reward_std": 0.25526259164325893,
|
| 49 |
+
"rewards/accuracy_reward": 0.15736607962753624,
|
| 50 |
+
"rewards/format_reward": 0.03906250209547579,
|
| 51 |
+
"step": 3
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"clip_ratio": 0.0,
|
| 55 |
+
"completion_length": 1246.4500675201416,
|
| 56 |
+
"epoch": 0.810126582278481,
|
| 57 |
+
"grad_norm": 0.1266031116247177,
|
| 58 |
+
"kl": 0.0002713203430175781,
|
| 59 |
+
"learning_rate": 1.5e-06,
|
| 60 |
+
"loss": 0.0401,
|
| 61 |
+
"reward": 0.20424107799772173,
|
| 62 |
+
"reward_std": 0.24340283521451056,
|
| 63 |
+
"rewards/accuracy_reward": 0.16294643690343946,
|
| 64 |
+
"rewards/format_reward": 0.041294645285233855,
|
| 65 |
+
"step": 4
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"clip_ratio": 0.0,
|
| 69 |
+
"completion_length": 1280.3237266540527,
|
| 70 |
+
"epoch": 1.2025316455696202,
|
| 71 |
+
"grad_norm": 0.1469860076904297,
|
| 72 |
+
"kl": 0.00030854344367980957,
|
| 73 |
+
"learning_rate": 1.875e-06,
|
| 74 |
+
"loss": 0.0877,
|
| 75 |
+
"reward": 0.21316965215373784,
|
| 76 |
+
"reward_std": 0.2447465395089239,
|
| 77 |
+
"rewards/accuracy_reward": 0.17745536426082253,
|
| 78 |
+
"rewards/format_reward": 0.03571428754366934,
|
| 79 |
+
"step": 5
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"clip_ratio": 0.0,
|
| 83 |
+
"completion_length": 1362.0257396697998,
|
| 84 |
+
"epoch": 1.4050632911392404,
|
| 85 |
+
"grad_norm": 0.10963964462280273,
|
| 86 |
+
"kl": 0.0003065764904022217,
|
| 87 |
+
"learning_rate": 2.25e-06,
|
| 88 |
+
"loss": 0.0574,
|
| 89 |
+
"reward": 0.21651786682195961,
|
| 90 |
+
"reward_std": 0.2525859682355076,
|
| 91 |
+
"rewards/accuracy_reward": 0.18191965203732252,
|
| 92 |
+
"rewards/format_reward": 0.03459821629803628,
|
| 93 |
+
"step": 6
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"clip_ratio": 0.0,
|
| 97 |
+
"completion_length": 1365.2232837677002,
|
| 98 |
+
"epoch": 1.6075949367088609,
|
| 99 |
+
"grad_norm": 0.143161341547966,
|
| 100 |
+
"kl": 0.0009369254112243652,
|
| 101 |
+
"learning_rate": 2.6250000000000003e-06,
|
| 102 |
+
"loss": 0.0556,
|
| 103 |
+
"reward": 0.22321429511066526,
|
| 104 |
+
"reward_std": 0.26517220702953637,
|
| 105 |
+
"rewards/accuracy_reward": 0.1696428646100685,
|
| 106 |
+
"rewards/format_reward": 0.05357143119908869,
|
| 107 |
+
"step": 7
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"clip_ratio": 0.0,
|
| 111 |
+
"completion_length": 1320.9459075927734,
|
| 112 |
+
"epoch": 1.810126582278481,
|
| 113 |
+
"grad_norm": 0.0994948148727417,
|
| 114 |
+
"kl": 0.002080678939819336,
|
| 115 |
+
"learning_rate": 3e-06,
|
| 116 |
+
"loss": 0.0839,
|
| 117 |
+
"reward": 0.26227679871954024,
|
| 118 |
+
"reward_std": 0.28934473474510014,
|
| 119 |
+
"rewards/accuracy_reward": 0.2087053683353588,
|
| 120 |
+
"rewards/format_reward": 0.053571431897580624,
|
| 121 |
+
"step": 8
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"clip_ratio": 0.0,
|
| 125 |
+
"completion_length": 1262.8661212921143,
|
| 126 |
+
"epoch": 2.2025316455696204,
|
| 127 |
+
"grad_norm": 0.8203178644180298,
|
| 128 |
+
"kl": 0.0046384334564208984,
|
| 129 |
+
"learning_rate": 2.998572332372787e-06,
|
| 130 |
+
"loss": 0.0457,
|
| 131 |
+
"reward": 0.2868303719442338,
|
| 132 |
+
"reward_std": 0.3498251587152481,
|
| 133 |
+
"rewards/accuracy_reward": 0.18861608114093542,
|
| 134 |
+
"rewards/format_reward": 0.09821428963914514,
|
| 135 |
+
"step": 9
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"clip_ratio": 0.0,
|
| 139 |
+
"completion_length": 1360.1105499267578,
|
| 140 |
+
"epoch": 2.4050632911392404,
|
| 141 |
+
"grad_norm": 0.1783958524465561,
|
| 142 |
+
"kl": 0.013251304626464844,
|
| 143 |
+
"learning_rate": 2.994292047137618e-06,
|
| 144 |
+
"loss": 0.0292,
|
| 145 |
+
"reward": 0.26227679825387895,
|
| 146 |
+
"reward_std": 0.30967882834374905,
|
| 147 |
+
"rewards/accuracy_reward": 0.15736607799772173,
|
| 148 |
+
"rewards/format_reward": 0.10491071920841932,
|
| 149 |
+
"step": 10
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"clip_ratio": 0.0,
|
| 153 |
+
"completion_length": 1258.4922485351562,
|
| 154 |
+
"epoch": 2.607594936708861,
|
| 155 |
+
"grad_norm": 0.14482943713665009,
|
| 156 |
+
"kl": 0.01790475845336914,
|
| 157 |
+
"learning_rate": 2.9871672920607156e-06,
|
| 158 |
+
"loss": 0.0542,
|
| 159 |
+
"reward": 0.2879464461002499,
|
| 160 |
+
"reward_std": 0.36507281148806214,
|
| 161 |
+
"rewards/accuracy_reward": 0.12723214749712497,
|
| 162 |
+
"rewards/format_reward": 0.16071429359726608,
|
| 163 |
+
"step": 11
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"clip_ratio": 0.0,
|
| 167 |
+
"completion_length": 1224.5542240142822,
|
| 168 |
+
"epoch": 2.810126582278481,
|
| 169 |
+
"grad_norm": 0.18995879590511322,
|
| 170 |
+
"kl": 0.015253305435180664,
|
| 171 |
+
"learning_rate": 2.9772116295183124e-06,
|
| 172 |
+
"loss": 0.0481,
|
| 173 |
+
"reward": 0.40066965902224183,
|
| 174 |
+
"reward_std": 0.44980036094784737,
|
| 175 |
+
"rewards/accuracy_reward": 0.1986607233993709,
|
| 176 |
+
"rewards/format_reward": 0.20200893806759268,
|
| 177 |
+
"step": 12
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"clip_ratio": 0.0,
|
| 181 |
+
"completion_length": 1317.55810546875,
|
| 182 |
+
"epoch": 3.2025316455696204,
|
| 183 |
+
"grad_norm": 0.12641476094722748,
|
| 184 |
+
"kl": 0.028325557708740234,
|
| 185 |
+
"learning_rate": 2.9644440106799e-06,
|
| 186 |
+
"loss": 0.083,
|
| 187 |
+
"reward": 0.5022321622818708,
|
| 188 |
+
"reward_std": 0.48967235907912254,
|
| 189 |
+
"rewards/accuracy_reward": 0.21093750931322575,
|
| 190 |
+
"rewards/format_reward": 0.29129465599544346,
|
| 191 |
+
"step": 13
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"clip_ratio": 0.0,
|
| 195 |
+
"completion_length": 1256.8672466278076,
|
| 196 |
+
"epoch": 3.4050632911392404,
|
| 197 |
+
"grad_norm": 9.634344100952148,
|
| 198 |
+
"kl": 0.02230978012084961,
|
| 199 |
+
"learning_rate": 2.9488887394336023e-06,
|
| 200 |
+
"loss": 0.0578,
|
| 201 |
+
"reward": 0.5457589533179998,
|
| 202 |
+
"reward_std": 0.5024354420602322,
|
| 203 |
+
"rewards/accuracy_reward": 0.16406250861473382,
|
| 204 |
+
"rewards/format_reward": 0.3816964467987418,
|
| 205 |
+
"step": 14
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"clip_ratio": 0.0,
|
| 209 |
+
"completion_length": 1307.5703716278076,
|
| 210 |
+
"epoch": 3.607594936708861,
|
| 211 |
+
"grad_norm": 0.38609325885772705,
|
| 212 |
+
"kl": 0.02093505859375,
|
| 213 |
+
"learning_rate": 2.9305754261223403e-06,
|
| 214 |
+
"loss": 0.0582,
|
| 215 |
+
"reward": 0.6060268161818385,
|
| 216 |
+
"reward_std": 0.48538014432415366,
|
| 217 |
+
"rewards/accuracy_reward": 0.17410715005826205,
|
| 218 |
+
"rewards/format_reward": 0.4319196632131934,
|
| 219 |
+
"step": 15
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"clip_ratio": 0.0,
|
| 223 |
+
"completion_length": 1322.5646591186523,
|
| 224 |
+
"epoch": 3.810126582278481,
|
| 225 |
+
"grad_norm": 0.29984602332115173,
|
| 226 |
+
"kl": 0.025579452514648438,
|
| 227 |
+
"learning_rate": 2.9095389311788626e-06,
|
| 228 |
+
"loss": 0.0647,
|
| 229 |
+
"reward": 0.6071428805589676,
|
| 230 |
+
"reward_std": 0.48221714748069644,
|
| 231 |
+
"rewards/accuracy_reward": 0.12723214947618544,
|
| 232 |
+
"rewards/format_reward": 0.47991073597222567,
|
| 233 |
+
"step": 16
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"clip_ratio": 0.0,
|
| 237 |
+
"completion_length": 1265.1797485351562,
|
| 238 |
+
"epoch": 4.2025316455696204,
|
| 239 |
+
"grad_norm": 0.22652213275432587,
|
| 240 |
+
"kl": 0.047135353088378906,
|
| 241 |
+
"learning_rate": 2.88581929876693e-06,
|
| 242 |
+
"loss": 0.032,
|
| 243 |
+
"reward": 0.728794677183032,
|
| 244 |
+
"reward_std": 0.5240897228941321,
|
| 245 |
+
"rewards/accuracy_reward": 0.17522322153672576,
|
| 246 |
+
"rewards/format_reward": 0.5535714561119676,
|
| 247 |
+
"step": 17
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"clip_ratio": 0.0,
|
| 251 |
+
"completion_length": 1309.780195236206,
|
| 252 |
+
"epoch": 4.405063291139241,
|
| 253 |
+
"grad_norm": 0.16519580781459808,
|
| 254 |
+
"kl": 0.013474464416503906,
|
| 255 |
+
"learning_rate": 2.859461680554975e-06,
|
| 256 |
+
"loss": 0.0598,
|
| 257 |
+
"reward": 0.7276786109432578,
|
| 258 |
+
"reward_std": 0.5046426299959421,
|
| 259 |
+
"rewards/accuracy_reward": 0.1540178640279919,
|
| 260 |
+
"rewards/format_reward": 0.5736607434228063,
|
| 261 |
+
"step": 18
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"clip_ratio": 0.0,
|
| 265 |
+
"completion_length": 1320.1898040771484,
|
| 266 |
+
"epoch": 4.6075949367088604,
|
| 267 |
+
"grad_norm": 0.13712696731090546,
|
| 268 |
+
"kl": 0.01743030548095703,
|
| 269 |
+
"learning_rate": 2.8305162497673325e-06,
|
| 270 |
+
"loss": 0.074,
|
| 271 |
+
"reward": 0.7343750400468707,
|
| 272 |
+
"reward_std": 0.5123187024146318,
|
| 273 |
+
"rewards/accuracy_reward": 0.15513393573928624,
|
| 274 |
+
"rewards/format_reward": 0.5792410988360643,
|
| 275 |
+
"step": 19
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"clip_ratio": 0.0,
|
| 279 |
+
"completion_length": 1219.2167301177979,
|
| 280 |
+
"epoch": 4.810126582278481,
|
| 281 |
+
"grad_norm": 0.13668957352638245,
|
| 282 |
+
"kl": 0.014723777770996094,
|
| 283 |
+
"learning_rate": 2.7990381056766585e-06,
|
| 284 |
+
"loss": 0.0726,
|
| 285 |
+
"reward": 0.8549107480794191,
|
| 286 |
+
"reward_std": 0.45913853123784065,
|
| 287 |
+
"rewards/accuracy_reward": 0.18303572200238705,
|
| 288 |
+
"rewards/format_reward": 0.6718750316649675,
|
| 289 |
+
"step": 20
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"clip_ratio": 0.0,
|
| 293 |
+
"completion_length": 1226.5603199005127,
|
| 294 |
+
"epoch": 5.2025316455696204,
|
| 295 |
+
"grad_norm": 0.13830004632472992,
|
| 296 |
+
"kl": 0.0236053466796875,
|
| 297 |
+
"learning_rate": 2.765087168719329e-06,
|
| 298 |
+
"loss": 0.021,
|
| 299 |
+
"reward": 0.8816964756697416,
|
| 300 |
+
"reward_std": 0.5056763701140881,
|
| 301 |
+
"rewards/accuracy_reward": 0.20424108230508864,
|
| 302 |
+
"rewards/format_reward": 0.6774553898721933,
|
| 303 |
+
"step": 21
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"clip_ratio": 0.0,
|
| 307 |
+
"completion_length": 1145.9241542816162,
|
| 308 |
+
"epoch": 5.405063291139241,
|
| 309 |
+
"grad_norm": 0.21640558540821075,
|
| 310 |
+
"kl": 0.021097183227539062,
|
| 311 |
+
"learning_rate": 2.728728066433488e-06,
|
| 312 |
+
"loss": 0.0532,
|
| 313 |
+
"reward": 0.8839286137372255,
|
| 314 |
+
"reward_std": 0.43036860041320324,
|
| 315 |
+
"rewards/accuracy_reward": 0.18861608009319752,
|
| 316 |
+
"rewards/format_reward": 0.6953125363215804,
|
| 317 |
+
"step": 22
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"clip_ratio": 0.0,
|
| 321 |
+
"completion_length": 1197.8103218078613,
|
| 322 |
+
"epoch": 5.6075949367088604,
|
| 323 |
+
"grad_norm": 0.1460823118686676,
|
| 324 |
+
"kl": 0.0402984619140625,
|
| 325 |
+
"learning_rate": 2.690030010436853e-06,
|
| 326 |
+
"loss": 0.0818,
|
| 327 |
+
"reward": 0.8895089738070965,
|
| 328 |
+
"reward_std": 0.4404608942568302,
|
| 329 |
+
"rewards/accuracy_reward": 0.14732143667060882,
|
| 330 |
+
"rewards/format_reward": 0.7421875428408384,
|
| 331 |
+
"step": 23
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"clip_ratio": 0.0,
|
| 335 |
+
"completion_length": 1357.7104873657227,
|
| 336 |
+
"epoch": 5.810126582278481,
|
| 337 |
+
"grad_norm": 0.27858996391296387,
|
| 338 |
+
"kl": 0.045017242431640625,
|
| 339 |
+
"learning_rate": 2.649066664678467e-06,
|
| 340 |
+
"loss": 0.0808,
|
| 341 |
+
"reward": 0.8839286100119352,
|
| 342 |
+
"reward_std": 0.4558837213553488,
|
| 343 |
+
"rewards/accuracy_reward": 0.16183036426082253,
|
| 344 |
+
"rewards/format_reward": 0.7220982499420643,
|
| 345 |
+
"step": 24
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"clip_ratio": 0.0,
|
| 349 |
+
"completion_length": 1194.8192462921143,
|
| 350 |
+
"epoch": 6.2025316455696204,
|
| 351 |
+
"grad_norm": 0.14784154295921326,
|
| 352 |
+
"kl": 0.016931533813476562,
|
| 353 |
+
"learning_rate": 2.605916005215186e-06,
|
| 354 |
+
"loss": 0.0616,
|
| 355 |
+
"reward": 0.9006696790456772,
|
| 356 |
+
"reward_std": 0.436885382514447,
|
| 357 |
+
"rewards/accuracy_reward": 0.15290179336443543,
|
| 358 |
+
"rewards/format_reward": 0.7477678973227739,
|
| 359 |
+
"step": 25
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"clip_ratio": 0.0,
|
| 363 |
+
"completion_length": 1149.721025466919,
|
| 364 |
+
"epoch": 6.405063291139241,
|
| 365 |
+
"grad_norm": 0.12463140487670898,
|
| 366 |
+
"kl": 0.01778697967529297,
|
| 367 |
+
"learning_rate": 2.5606601717798212e-06,
|
| 368 |
+
"loss": 0.0871,
|
| 369 |
+
"reward": 0.9709821846336126,
|
| 370 |
+
"reward_std": 0.43305877037346363,
|
| 371 |
+
"rewards/accuracy_reward": 0.16294643736910075,
|
| 372 |
+
"rewards/format_reward": 0.8080357536673546,
|
| 373 |
+
"step": 26
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"clip_ratio": 0.0,
|
| 377 |
+
"completion_length": 1091.338228225708,
|
| 378 |
+
"epoch": 6.6075949367088604,
|
| 379 |
+
"grad_norm": 0.16402533650398254,
|
| 380 |
+
"kl": 0.028328895568847656,
|
| 381 |
+
"learning_rate": 2.5133853114234908e-06,
|
| 382 |
+
"loss": 0.0804,
|
| 383 |
+
"reward": 1.0167411249130964,
|
| 384 |
+
"reward_std": 0.4595717927441001,
|
| 385 |
+
"rewards/accuracy_reward": 0.23772322584409267,
|
| 386 |
+
"rewards/format_reward": 0.7790178991854191,
|
| 387 |
+
"step": 27
|
| 388 |
+
},
|
| 389 |
+
{
|
| 390 |
+
"clip_ratio": 0.0,
|
| 391 |
+
"completion_length": 1120.704236984253,
|
| 392 |
+
"epoch": 6.810126582278481,
|
| 393 |
+
"grad_norm": 0.152034193277359,
|
| 394 |
+
"kl": 0.01720428466796875,
|
| 395 |
+
"learning_rate": 2.464181414529809e-06,
|
| 396 |
+
"loss": 0.0649,
|
| 397 |
+
"reward": 0.9787946827709675,
|
| 398 |
+
"reward_std": 0.40247320383787155,
|
| 399 |
+
"rewards/accuracy_reward": 0.17299107660073787,
|
| 400 |
+
"rewards/format_reward": 0.8058036062866449,
|
| 401 |
+
"step": 28
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"clip_ratio": 0.0,
|
| 405 |
+
"completion_length": 1154.484426498413,
|
| 406 |
+
"epoch": 7.2025316455696204,
|
| 407 |
+
"grad_norm": 0.13842816650867462,
|
| 408 |
+
"kl": 0.03877449035644531,
|
| 409 |
+
"learning_rate": 2.4131421435130812e-06,
|
| 410 |
+
"loss": 0.0955,
|
| 411 |
+
"reward": 0.9866071864962578,
|
| 412 |
+
"reward_std": 0.47181375650689006,
|
| 413 |
+
"rewards/accuracy_reward": 0.19642858125735074,
|
| 414 |
+
"rewards/format_reward": 0.7901786081492901,
|
| 415 |
+
"step": 29
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"clip_ratio": 0.0,
|
| 419 |
+
"completion_length": 1082.405179977417,
|
| 420 |
+
"epoch": 7.405063291139241,
|
| 421 |
+
"grad_norm": 0.14722459018230438,
|
| 422 |
+
"kl": 0.04826545715332031,
|
| 423 |
+
"learning_rate": 2.3603646545265692e-06,
|
| 424 |
+
"loss": 0.0519,
|
| 425 |
+
"reward": 1.0546875447034836,
|
| 426 |
+
"reward_std": 0.4271851107478142,
|
| 427 |
+
"rewards/accuracy_reward": 0.21428572619333863,
|
| 428 |
+
"rewards/format_reward": 0.8404018338769674,
|
| 429 |
+
"step": 30
|
| 430 |
+
},
|
| 431 |
+
{
|
| 432 |
+
"clip_ratio": 0.0,
|
| 433 |
+
"completion_length": 1093.8527278900146,
|
| 434 |
+
"epoch": 7.6075949367088604,
|
| 435 |
+
"grad_norm": 0.1317053586244583,
|
| 436 |
+
"kl": 0.01566791534423828,
|
| 437 |
+
"learning_rate": 2.305949412520236e-06,
|
| 438 |
+
"loss": 0.0803,
|
| 439 |
+
"reward": 1.0401786137372255,
|
| 440 |
+
"reward_std": 0.3743963772431016,
|
| 441 |
+
"rewards/accuracy_reward": 0.19196429441217333,
|
| 442 |
+
"rewards/format_reward": 0.8482143301516771,
|
| 443 |
+
"step": 31
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"clip_ratio": 0.0,
|
| 447 |
+
"completion_length": 1086.0333862304688,
|
| 448 |
+
"epoch": 7.810126582278481,
|
| 449 |
+
"grad_norm": 0.11714313179254532,
|
| 450 |
+
"kl": 0.020026206970214844,
|
| 451 |
+
"learning_rate": 2.25e-06,
|
| 452 |
+
"loss": 0.0793,
|
| 453 |
+
"reward": 1.056919701397419,
|
| 454 |
+
"reward_std": 0.37524981633760035,
|
| 455 |
+
"rewards/accuracy_reward": 0.2187500090803951,
|
| 456 |
+
"rewards/format_reward": 0.8381696902215481,
|
| 457 |
+
"step": 32
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"clip_ratio": 0.0,
|
| 461 |
+
"completion_length": 1113.4855403900146,
|
| 462 |
+
"epoch": 8.20253164556962,
|
| 463 |
+
"grad_norm": 0.13244719803333282,
|
| 464 |
+
"kl": 0.017362594604492188,
|
| 465 |
+
"learning_rate": 2.192622919852551e-06,
|
| 466 |
+
"loss": 0.0644,
|
| 467 |
+
"reward": 1.0312500484287739,
|
| 468 |
+
"reward_std": 0.36712014535441995,
|
| 469 |
+
"rewards/accuracy_reward": 0.1886160805588588,
|
| 470 |
+
"rewards/format_reward": 0.8426339719444513,
|
| 471 |
+
"step": 33
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"clip_ratio": 0.0,
|
| 475 |
+
"completion_length": 1150.787992477417,
|
| 476 |
+
"epoch": 8.405063291139241,
|
| 477 |
+
"grad_norm": 0.11223872005939484,
|
| 478 |
+
"kl": 0.02086639404296875,
|
| 479 |
+
"learning_rate": 2.1339273926110494e-06,
|
| 480 |
+
"loss": 0.0693,
|
| 481 |
+
"reward": 1.0625000447034836,
|
| 482 |
+
"reward_std": 0.3789821336977184,
|
| 483 |
+
"rewards/accuracy_reward": 0.21651786437723786,
|
| 484 |
+
"rewards/format_reward": 0.8459821864962578,
|
| 485 |
+
"step": 34
|
| 486 |
+
},
|
| 487 |
+
{
|
| 488 |
+
"clip_ratio": 0.0,
|
| 489 |
+
"completion_length": 1145.8158931732178,
|
| 490 |
+
"epoch": 8.60759493670886,
|
| 491 |
+
"grad_norm": 0.18010753393173218,
|
| 492 |
+
"kl": 0.021386146545410156,
|
| 493 |
+
"learning_rate": 2.074025148547635e-06,
|
| 494 |
+
"loss": 0.0767,
|
| 495 |
+
"reward": 1.0558036286383867,
|
| 496 |
+
"reward_std": 0.38298405637033284,
|
| 497 |
+
"rewards/accuracy_reward": 0.1941964360885322,
|
| 498 |
+
"rewards/format_reward": 0.8616071809083223,
|
| 499 |
+
"step": 35
|
| 500 |
+
},
|
| 501 |
+
{
|
| 502 |
+
"clip_ratio": 0.0,
|
| 503 |
+
"completion_length": 1208.060474395752,
|
| 504 |
+
"epoch": 8.810126582278482,
|
| 505 |
+
"grad_norm": 0.18328261375427246,
|
| 506 |
+
"kl": 0.01535797119140625,
|
| 507 |
+
"learning_rate": 2.0130302149885033e-06,
|
| 508 |
+
"loss": 0.0796,
|
| 509 |
+
"reward": 1.027901828289032,
|
| 510 |
+
"reward_std": 0.3823082959279418,
|
| 511 |
+
"rewards/accuracy_reward": 0.17410715273581445,
|
| 512 |
+
"rewards/format_reward": 0.8537946827709675,
|
| 513 |
+
"step": 36
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"clip_ratio": 0.0,
|
| 517 |
+
"completion_length": 1159.372817993164,
|
| 518 |
+
"epoch": 9.20253164556962,
|
| 519 |
+
"grad_norm": 0.12609297037124634,
|
| 520 |
+
"kl": 0.013571739196777344,
|
| 521 |
+
"learning_rate": 1.9510586992564096e-06,
|
| 522 |
+
"loss": 0.0778,
|
| 523 |
+
"reward": 1.0636161137372255,
|
| 524 |
+
"reward_std": 0.3645266597159207,
|
| 525 |
+
"rewards/accuracy_reward": 0.19308036612346768,
|
| 526 |
+
"rewards/format_reward": 0.8705357536673546,
|
| 527 |
+
"step": 37
|
| 528 |
+
},
|
| 529 |
+
{
|
| 530 |
+
"clip_ratio": 0.0,
|
| 531 |
+
"completion_length": 1254.4621124267578,
|
| 532 |
+
"epoch": 9.405063291139241,
|
| 533 |
+
"grad_norm": 0.14299969375133514,
|
| 534 |
+
"kl": 0.013241767883300781,
|
| 535 |
+
"learning_rate": 1.888228567653781e-06,
|
| 536 |
+
"loss": 0.0773,
|
| 537 |
+
"reward": 1.053571479395032,
|
| 538 |
+
"reward_std": 0.37696896330453455,
|
| 539 |
+
"rewards/accuracy_reward": 0.1975446525029838,
|
| 540 |
+
"rewards/format_reward": 0.856026828289032,
|
| 541 |
+
"step": 38
|
| 542 |
+
},
|
| 543 |
+
{
|
| 544 |
+
"clip_ratio": 0.0,
|
| 545 |
+
"completion_length": 1183.1540699005127,
|
| 546 |
+
"epoch": 9.60759493670886,
|
| 547 |
+
"grad_norm": 0.11042484641075134,
|
| 548 |
+
"kl": 0.01685810089111328,
|
| 549 |
+
"learning_rate": 1.8246594209071543e-06,
|
| 550 |
+
"loss": 0.0612,
|
| 551 |
+
"reward": 1.068080399185419,
|
| 552 |
+
"reward_std": 0.375900273444131,
|
| 553 |
+
"rewards/accuracy_reward": 0.19419643899891526,
|
| 554 |
+
"rewards/format_reward": 0.873883968219161,
|
| 555 |
+
"step": 39
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"clip_ratio": 0.0,
|
| 559 |
+
"completion_length": 1185.3604793548584,
|
| 560 |
+
"epoch": 9.810126582278482,
|
| 561 |
+
"grad_norm": 0.11164165288209915,
|
| 562 |
+
"kl": 0.01548004150390625,
|
| 563 |
+
"learning_rate": 1.7604722665003958e-06,
|
| 564 |
+
"loss": 0.0371,
|
| 565 |
+
"reward": 1.061383979395032,
|
| 566 |
+
"reward_std": 0.34133189218118787,
|
| 567 |
+
"rewards/accuracy_reward": 0.17299107869621366,
|
| 568 |
+
"rewards/format_reward": 0.8883928935974836,
|
| 569 |
+
"step": 40
|
| 570 |
+
},
|
| 571 |
+
{
|
| 572 |
+
"clip_ratio": 0.0,
|
| 573 |
+
"completion_length": 1076.3594131469727,
|
| 574 |
+
"epoch": 10.20253164556962,
|
| 575 |
+
"grad_norm": 0.16031011939048767,
|
| 576 |
+
"kl": 0.01823902130126953,
|
| 577 |
+
"learning_rate": 1.6957892883300778e-06,
|
| 578 |
+
"loss": 0.0584,
|
| 579 |
+
"reward": 1.1026786230504513,
|
| 580 |
+
"reward_std": 0.3611738483887166,
|
| 581 |
+
"rewards/accuracy_reward": 0.2131696519209072,
|
| 582 |
+
"rewards/format_reward": 0.8895089663565159,
|
| 583 |
+
"step": 41
|
| 584 |
+
},
|
| 585 |
+
{
|
| 586 |
+
"clip_ratio": 0.0,
|
| 587 |
+
"completion_length": 1139.5413455963135,
|
| 588 |
+
"epoch": 10.405063291139241,
|
| 589 |
+
"grad_norm": 0.1290842443704605,
|
| 590 |
+
"kl": 0.015422821044921875,
|
| 591 |
+
"learning_rate": 1.6307336141214877e-06,
|
| 592 |
+
"loss": 0.0619,
|
| 593 |
+
"reward": 1.0714286230504513,
|
| 594 |
+
"reward_std": 0.308660123962909,
|
| 595 |
+
"rewards/accuracy_reward": 0.18191965122241527,
|
| 596 |
+
"rewards/format_reward": 0.889508968219161,
|
| 597 |
+
"step": 42
|
| 598 |
+
},
|
| 599 |
+
{
|
| 600 |
+
"clip_ratio": 0.0,
|
| 601 |
+
"completion_length": 1171.5971488952637,
|
| 602 |
+
"epoch": 10.60759493670886,
|
| 603 |
+
"grad_norm": 0.18351688981056213,
|
| 604 |
+
"kl": 0.016489028930664062,
|
| 605 |
+
"learning_rate": 1.5654290810480041e-06,
|
| 606 |
+
"loss": 0.0774,
|
| 607 |
+
"reward": 1.1361607685685158,
|
| 608 |
+
"reward_std": 0.3446261656936258,
|
| 609 |
+
"rewards/accuracy_reward": 0.23325893853325397,
|
| 610 |
+
"rewards/format_reward": 0.9029018245637417,
|
| 611 |
+
"step": 43
|
| 612 |
+
},
|
| 613 |
+
{
|
| 614 |
+
"clip_ratio": 0.0,
|
| 615 |
+
"completion_length": 1160.4833908081055,
|
| 616 |
+
"epoch": 10.810126582278482,
|
| 617 |
+
"grad_norm": 0.19561266899108887,
|
| 618 |
+
"kl": 0.013916015625,
|
| 619 |
+
"learning_rate": 1.5e-06,
|
| 620 |
+
"loss": 0.059,
|
| 621 |
+
"reward": 1.0591518338769674,
|
| 622 |
+
"reward_std": 0.3170809505973011,
|
| 623 |
+
"rewards/accuracy_reward": 0.16071429196745157,
|
| 624 |
+
"rewards/format_reward": 0.8984375353902578,
|
| 625 |
+
"step": 44
|
| 626 |
+
},
|
| 627 |
+
{
|
| 628 |
+
"clip_ratio": 0.0,
|
| 629 |
+
"completion_length": 1271.4141178131104,
|
| 630 |
+
"epoch": 11.20253164556962,
|
| 631 |
+
"grad_norm": 0.12841616570949554,
|
| 632 |
+
"kl": 0.016252517700195312,
|
| 633 |
+
"learning_rate": 1.4345709189519962e-06,
|
| 634 |
+
"loss": 0.0853,
|
| 635 |
+
"reward": 1.0993304047733545,
|
| 636 |
+
"reward_std": 0.3096196516416967,
|
| 637 |
+
"rewards/accuracy_reward": 0.20535715261939913,
|
| 638 |
+
"rewards/format_reward": 0.8939732499420643,
|
| 639 |
+
"step": 45
|
| 640 |
+
},
|
| 641 |
+
{
|
| 642 |
+
"clip_ratio": 0.0,
|
| 643 |
+
"completion_length": 1125.301383972168,
|
| 644 |
+
"epoch": 11.405063291139241,
|
| 645 |
+
"grad_norm": 0.4378478229045868,
|
| 646 |
+
"kl": 0.020122528076171875,
|
| 647 |
+
"learning_rate": 1.3692663858785126e-06,
|
| 648 |
+
"loss": 0.0546,
|
| 649 |
+
"reward": 1.0714286211878061,
|
| 650 |
+
"reward_std": 0.34814802813343704,
|
| 651 |
+
"rewards/accuracy_reward": 0.18191965098958462,
|
| 652 |
+
"rewards/format_reward": 0.889508968219161,
|
| 653 |
+
"step": 46
|
| 654 |
+
},
|
| 655 |
+
{
|
| 656 |
+
"clip_ratio": 0.0,
|
| 657 |
+
"completion_length": 1158.8650093078613,
|
| 658 |
+
"epoch": 11.60759493670886,
|
| 659 |
+
"grad_norm": 0.10889195650815964,
|
| 660 |
+
"kl": 0.016626358032226562,
|
| 661 |
+
"learning_rate": 1.304210711669923e-06,
|
| 662 |
+
"loss": 0.0547,
|
| 663 |
+
"reward": 1.0825893394649029,
|
| 664 |
+
"reward_std": 0.3138458870816976,
|
| 665 |
+
"rewards/accuracy_reward": 0.16629465005826205,
|
| 666 |
+
"rewards/format_reward": 0.916294677183032,
|
| 667 |
+
"step": 47
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"clip_ratio": 0.0,
|
| 671 |
+
"completion_length": 1108.9167308807373,
|
| 672 |
+
"epoch": 11.810126582278482,
|
| 673 |
+
"grad_norm": 0.14371342957019806,
|
| 674 |
+
"kl": 0.015249252319335938,
|
| 675 |
+
"learning_rate": 1.2395277334996047e-06,
|
| 676 |
+
"loss": 0.0269,
|
| 677 |
+
"reward": 1.1517857667058706,
|
| 678 |
+
"reward_std": 0.3139377860352397,
|
| 679 |
+
"rewards/accuracy_reward": 0.22433036961592734,
|
| 680 |
+
"rewards/format_reward": 0.9274553880095482,
|
| 681 |
+
"step": 48
|
| 682 |
+
},
|
| 683 |
+
{
|
| 684 |
+
"clip_ratio": 0.0,
|
| 685 |
+
"completion_length": 1162.2154502868652,
|
| 686 |
+
"epoch": 12.20253164556962,
|
| 687 |
+
"grad_norm": 0.1115531250834465,
|
| 688 |
+
"kl": 0.013177871704101562,
|
| 689 |
+
"learning_rate": 1.1753405790928457e-06,
|
| 690 |
+
"loss": 0.0628,
|
| 691 |
+
"reward": 1.1294643320143223,
|
| 692 |
+
"reward_std": 0.2925685364753008,
|
| 693 |
+
"rewards/accuracy_reward": 0.18191964994184673,
|
| 694 |
+
"rewards/format_reward": 0.9475446660071611,
|
| 695 |
+
"step": 49
|
| 696 |
+
},
|
| 697 |
+
{
|
| 698 |
+
"clip_ratio": 0.0,
|
| 699 |
+
"completion_length": 1243.0078659057617,
|
| 700 |
+
"epoch": 12.405063291139241,
|
| 701 |
+
"grad_norm": 0.10767732560634613,
|
| 702 |
+
"kl": 0.013563156127929688,
|
| 703 |
+
"learning_rate": 1.1117714323462188e-06,
|
| 704 |
+
"loss": 0.0732,
|
| 705 |
+
"reward": 1.110491119325161,
|
| 706 |
+
"reward_std": 0.3003906770609319,
|
| 707 |
+
"rewards/accuracy_reward": 0.18415179511066526,
|
| 708 |
+
"rewards/format_reward": 0.9263393171131611,
|
| 709 |
+
"step": 50
|
| 710 |
+
},
|
| 711 |
+
{
|
| 712 |
+
"clip_ratio": 0.0,
|
| 713 |
+
"completion_length": 1244.2746181488037,
|
| 714 |
+
"epoch": 12.60759493670886,
|
| 715 |
+
"grad_norm": 0.09185539186000824,
|
| 716 |
+
"kl": 0.03409385681152344,
|
| 717 |
+
"learning_rate": 1.0489413007435905e-06,
|
| 718 |
+
"loss": 0.0717,
|
| 719 |
+
"reward": 1.11049110814929,
|
| 720 |
+
"reward_std": 0.308702711481601,
|
| 721 |
+
"rewards/accuracy_reward": 0.18973215168807656,
|
| 722 |
+
"rewards/format_reward": 0.9207589607685804,
|
| 723 |
+
"step": 51
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"clip_ratio": 0.0,
|
| 727 |
+
"completion_length": 1158.5021438598633,
|
| 728 |
+
"epoch": 12.810126582278482,
|
| 729 |
+
"grad_norm": 0.18302814662456512,
|
| 730 |
+
"kl": 0.02344512939453125,
|
| 731 |
+
"learning_rate": 9.86969785011497e-07,
|
| 732 |
+
"loss": 0.0387,
|
| 733 |
+
"reward": 1.1562500428408384,
|
| 734 |
+
"reward_std": 0.3003106499090791,
|
| 735 |
+
"rewards/accuracy_reward": 0.22767858230508864,
|
| 736 |
+
"rewards/format_reward": 0.9285714570432901,
|
| 737 |
+
"step": 52
|
| 738 |
+
},
|
| 739 |
+
{
|
| 740 |
+
"clip_ratio": 0.0,
|
| 741 |
+
"completion_length": 1237.9732666015625,
|
| 742 |
+
"epoch": 13.20253164556962,
|
| 743 |
+
"grad_norm": 0.11227720230817795,
|
| 744 |
+
"kl": 0.016330718994140625,
|
| 745 |
+
"learning_rate": 9.259748514523654e-07,
|
| 746 |
+
"loss": 0.0601,
|
| 747 |
+
"reward": 1.1171875558793545,
|
| 748 |
+
"reward_std": 0.359636815963313,
|
| 749 |
+
"rewards/accuracy_reward": 0.20758929557632655,
|
| 750 |
+
"rewards/format_reward": 0.9095982518047094,
|
| 751 |
+
"step": 53
|
| 752 |
+
},
|
| 753 |
+
{
|
| 754 |
+
"clip_ratio": 0.0,
|
| 755 |
+
"completion_length": 1117.2076396942139,
|
| 756 |
+
"epoch": 13.405063291139241,
|
| 757 |
+
"grad_norm": 0.1859021931886673,
|
| 758 |
+
"kl": 0.0164947509765625,
|
| 759 |
+
"learning_rate": 8.660726073889511e-07,
|
| 760 |
+
"loss": 0.0416,
|
| 761 |
+
"reward": 1.1428571790456772,
|
| 762 |
+
"reward_std": 0.2875336476135999,
|
| 763 |
+
"rewards/accuracy_reward": 0.20424108090810478,
|
| 764 |
+
"rewards/format_reward": 0.9386161006987095,
|
| 765 |
+
"step": 54
|
| 766 |
+
},
|
| 767 |
+
{
|
| 768 |
+
"clip_ratio": 0.0,
|
| 769 |
+
"completion_length": 1236.578176498413,
|
| 770 |
+
"epoch": 13.60759493670886,
|
| 771 |
+
"grad_norm": 0.3497838079929352,
|
| 772 |
+
"kl": 0.01940155029296875,
|
| 773 |
+
"learning_rate": 8.073770801474494e-07,
|
| 774 |
+
"loss": 0.0805,
|
| 775 |
+
"reward": 1.1495536267757416,
|
| 776 |
+
"reward_std": 0.3257214743643999,
|
| 777 |
+
"rewards/accuracy_reward": 0.22433036705479026,
|
| 778 |
+
"rewards/format_reward": 0.9252232424914837,
|
| 779 |
+
"step": 55
|
| 780 |
+
},
|
| 781 |
+
{
|
| 782 |
+
"clip_ratio": 0.0,
|
| 783 |
+
"completion_length": 1211.5646495819092,
|
| 784 |
+
"epoch": 13.810126582278482,
|
| 785 |
+
"grad_norm": 0.19044233858585358,
|
| 786 |
+
"kl": 0.01702880859375,
|
| 787 |
+
"learning_rate": 7.500000000000003e-07,
|
| 788 |
+
"loss": 0.0655,
|
| 789 |
+
"reward": 1.1183036211878061,
|
| 790 |
+
"reward_std": 0.298118500970304,
|
| 791 |
+
"rewards/accuracy_reward": 0.19196429604198784,
|
| 792 |
+
"rewards/format_reward": 0.9263393171131611,
|
| 793 |
+
"step": 56
|
| 794 |
+
},
|
| 795 |
+
{
|
| 796 |
+
"clip_ratio": 0.0,
|
| 797 |
+
"completion_length": 1143.214334487915,
|
| 798 |
+
"epoch": 14.20253164556962,
|
| 799 |
+
"grad_norm": 1.5419952869415283,
|
| 800 |
+
"kl": 0.035312652587890625,
|
| 801 |
+
"learning_rate": 6.94050587479764e-07,
|
| 802 |
+
"loss": 0.0659,
|
| 803 |
+
"reward": 1.1439732648432255,
|
| 804 |
+
"reward_std": 0.3306191167794168,
|
| 805 |
+
"rewards/accuracy_reward": 0.22879465378355235,
|
| 806 |
+
"rewards/format_reward": 0.9151786025613546,
|
| 807 |
+
"step": 57
|
| 808 |
+
},
|
| 809 |
+
{
|
| 810 |
+
"clip_ratio": 0.0,
|
| 811 |
+
"completion_length": 1238.2567501068115,
|
| 812 |
+
"epoch": 14.405063291139241,
|
| 813 |
+
"grad_norm": 0.15846788883209229,
|
| 814 |
+
"kl": 0.016237258911132812,
|
| 815 |
+
"learning_rate": 6.396353454734313e-07,
|
| 816 |
+
"loss": 0.0613,
|
| 817 |
+
"reward": 1.1305804066359997,
|
| 818 |
+
"reward_std": 0.2859719139523804,
|
| 819 |
+
"rewards/accuracy_reward": 0.1941964365541935,
|
| 820 |
+
"rewards/format_reward": 0.9363839570432901,
|
| 821 |
+
"step": 58
|
| 822 |
+
},
|
| 823 |
+
{
|
| 824 |
+
"clip_ratio": 0.0,
|
| 825 |
+
"completion_length": 1179.9319820404053,
|
| 826 |
+
"epoch": 14.60759493670886,
|
| 827 |
+
"grad_norm": 0.2593388855457306,
|
| 828 |
+
"kl": 0.014705657958984375,
|
| 829 |
+
"learning_rate": 5.868578564869191e-07,
|
| 830 |
+
"loss": 0.0796,
|
| 831 |
+
"reward": 1.1138393357396126,
|
| 832 |
+
"reward_std": 0.30559679167345166,
|
| 833 |
+
"rewards/accuracy_reward": 0.17075893632136285,
|
| 834 |
+
"rewards/format_reward": 0.9430803842842579,
|
| 835 |
+
"step": 59
|
| 836 |
+
},
|
| 837 |
+
{
|
| 838 |
+
"clip_ratio": 0.0,
|
| 839 |
+
"completion_length": 1276.8438034057617,
|
| 840 |
+
"epoch": 14.810126582278482,
|
| 841 |
+
"grad_norm": 134161.59375,
|
| 842 |
+
"kl": 46.516122817993164,
|
| 843 |
+
"learning_rate": 5.358185854701909e-07,
|
| 844 |
+
"loss": 3.7375,
|
| 845 |
+
"reward": 1.1573661230504513,
|
| 846 |
+
"reward_std": 0.30025100195780396,
|
| 847 |
+
"rewards/accuracy_reward": 0.21316965157166123,
|
| 848 |
+
"rewards/format_reward": 0.944196455180645,
|
| 849 |
+
"step": 60
|
| 850 |
+
},
|
| 851 |
+
{
|
| 852 |
+
"clip_ratio": 0.0,
|
| 853 |
+
"completion_length": 1248.2243881225586,
|
| 854 |
+
"epoch": 15.20253164556962,
|
| 855 |
+
"grad_norm": 0.1083560436964035,
|
| 856 |
+
"kl": 0.022684097290039062,
|
| 857 |
+
"learning_rate": 4.866146885765096e-07,
|
| 858 |
+
"loss": 0.0465,
|
| 859 |
+
"reward": 1.081473272293806,
|
| 860 |
+
"reward_std": 0.2627506146673113,
|
| 861 |
+
"rewards/accuracy_reward": 0.15401786530856043,
|
| 862 |
+
"rewards/format_reward": 0.9274553898721933,
|
| 863 |
+
"step": 61
|
| 864 |
+
},
|
| 865 |
+
{
|
| 866 |
+
"clip_ratio": 0.0,
|
| 867 |
+
"completion_length": 1141.5882091522217,
|
| 868 |
+
"epoch": 15.405063291139241,
|
| 869 |
+
"grad_norm": 0.13774190843105316,
|
| 870 |
+
"kl": 0.023365020751953125,
|
| 871 |
+
"learning_rate": 4.3933982822017883e-07,
|
| 872 |
+
"loss": 0.0737,
|
| 873 |
+
"reward": 1.1640625558793545,
|
| 874 |
+
"reward_std": 0.30663056182675064,
|
| 875 |
+
"rewards/accuracy_reward": 0.22544643806759268,
|
| 876 |
+
"rewards/format_reward": 0.9386161006987095,
|
| 877 |
+
"step": 62
|
| 878 |
+
},
|
| 879 |
+
{
|
| 880 |
+
"clip_ratio": 0.0,
|
| 881 |
+
"completion_length": 1172.7221584320068,
|
| 882 |
+
"epoch": 15.60759493670886,
|
| 883 |
+
"grad_norm": 0.17014561593532562,
|
| 884 |
+
"kl": 0.018375396728515625,
|
| 885 |
+
"learning_rate": 3.9408399478481406e-07,
|
| 886 |
+
"loss": 0.0651,
|
| 887 |
+
"reward": 1.1718750577419996,
|
| 888 |
+
"reward_std": 0.31751804961822927,
|
| 889 |
+
"rewards/accuracy_reward": 0.23549108125735074,
|
| 890 |
+
"rewards/format_reward": 0.9363839570432901,
|
| 891 |
+
"step": 63
|
| 892 |
+
},
|
| 893 |
+
{
|
| 894 |
+
"clip_ratio": 0.0,
|
| 895 |
+
"completion_length": 1135.0229759216309,
|
| 896 |
+
"epoch": 15.810126582278482,
|
| 897 |
+
"grad_norm": 0.15105918049812317,
|
| 898 |
+
"kl": 0.018392562866210938,
|
| 899 |
+
"learning_rate": 3.5093333532153313e-07,
|
| 900 |
+
"loss": 0.0429,
|
| 901 |
+
"reward": 1.1841518245637417,
|
| 902 |
+
"reward_std": 0.29690629336982965,
|
| 903 |
+
"rewards/accuracy_reward": 0.26004465436562896,
|
| 904 |
+
"rewards/format_reward": 0.9241071715950966,
|
| 905 |
+
"step": 64
|
| 906 |
+
},
|
| 907 |
+
{
|
| 908 |
+
"clip_ratio": 0.0,
|
| 909 |
+
"completion_length": 1078.2868738174438,
|
| 910 |
+
"epoch": 16.20253164556962,
|
| 911 |
+
"grad_norm": 0.13650694489479065,
|
| 912 |
+
"kl": 0.018194198608398438,
|
| 913 |
+
"learning_rate": 3.0996998956314745e-07,
|
| 914 |
+
"loss": 0.0484,
|
| 915 |
+
"reward": 1.1852679140865803,
|
| 916 |
+
"reward_std": 0.2528684106655419,
|
| 917 |
+
"rewards/accuracy_reward": 0.22656250791624188,
|
| 918 |
+
"rewards/format_reward": 0.9587053749710321,
|
| 919 |
+
"step": 65
|
| 920 |
+
},
|
| 921 |
+
{
|
| 922 |
+
"clip_ratio": 0.0,
|
| 923 |
+
"completion_length": 1191.1630020141602,
|
| 924 |
+
"epoch": 16.40506329113924,
|
| 925 |
+
"grad_norm": 0.15124773979187012,
|
| 926 |
+
"kl": 0.01834869384765625,
|
| 927 |
+
"learning_rate": 2.7127193356651214e-07,
|
| 928 |
+
"loss": 0.0597,
|
| 929 |
+
"reward": 1.1328125484287739,
|
| 930 |
+
"reward_std": 0.2856026024091989,
|
| 931 |
+
"rewards/accuracy_reward": 0.18526786810252815,
|
| 932 |
+
"rewards/format_reward": 0.9475446678698063,
|
| 933 |
+
"step": 66
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"clip_ratio": 0.0,
|
| 937 |
+
"completion_length": 1234.5659103393555,
|
| 938 |
+
"epoch": 16.60759493670886,
|
| 939 |
+
"grad_norm": 0.139739990234375,
|
| 940 |
+
"kl": 0.019105911254882812,
|
| 941 |
+
"learning_rate": 2.3491283128067176e-07,
|
| 942 |
+
"loss": 0.0696,
|
| 943 |
+
"reward": 1.1473214849829674,
|
| 944 |
+
"reward_std": 0.2781486874446273,
|
| 945 |
+
"rewards/accuracy_reward": 0.2075892968568951,
|
| 946 |
+
"rewards/format_reward": 0.9397321715950966,
|
| 947 |
+
"step": 67
|
| 948 |
+
},
|
| 949 |
+
{
|
| 950 |
+
"clip_ratio": 0.0,
|
| 951 |
+
"completion_length": 1205.6500625610352,
|
| 952 |
+
"epoch": 16.810126582278482,
|
| 953 |
+
"grad_norm": 0.14179465174674988,
|
| 954 |
+
"kl": 0.018032073974609375,
|
| 955 |
+
"learning_rate": 2.0096189432334195e-07,
|
| 956 |
+
"loss": 0.0426,
|
| 957 |
+
"reward": 1.1506696976721287,
|
| 958 |
+
"reward_std": 0.29133763001300395,
|
| 959 |
+
"rewards/accuracy_reward": 0.21651786845177412,
|
| 960 |
+
"rewards/format_reward": 0.9341518152505159,
|
| 961 |
+
"step": 68
|
| 962 |
+
},
|
| 963 |
+
{
|
| 964 |
+
"clip_ratio": 0.0,
|
| 965 |
+
"completion_length": 1186.745590209961,
|
| 966 |
+
"epoch": 17.20253164556962,
|
| 967 |
+
"grad_norm": 0.13754098117351532,
|
| 968 |
+
"kl": 0.016582489013671875,
|
| 969 |
+
"learning_rate": 1.6948375023266743e-07,
|
| 970 |
+
"loss": 0.0482,
|
| 971 |
+
"reward": 1.1607143357396126,
|
| 972 |
+
"reward_std": 0.2897793191950768,
|
| 973 |
+
"rewards/accuracy_reward": 0.20982143899891526,
|
| 974 |
+
"rewards/format_reward": 0.9508928786963224,
|
| 975 |
+
"step": 69
|
| 976 |
+
},
|
| 977 |
+
{
|
| 978 |
+
"clip_ratio": 0.0,
|
| 979 |
+
"completion_length": 1208.3125495910645,
|
| 980 |
+
"epoch": 17.40506329113924,
|
| 981 |
+
"grad_norm": 0.12631621956825256,
|
| 982 |
+
"kl": 0.01808929443359375,
|
| 983 |
+
"learning_rate": 1.405383194450251e-07,
|
| 984 |
+
"loss": 0.0554,
|
| 985 |
+
"reward": 1.1183036155998707,
|
| 986 |
+
"reward_std": 0.26985309086740017,
|
| 987 |
+
"rewards/accuracy_reward": 0.18191965017467737,
|
| 988 |
+
"rewards/format_reward": 0.9363839570432901,
|
| 989 |
+
"step": 70
|
| 990 |
+
},
|
| 991 |
+
{
|
| 992 |
+
"clip_ratio": 0.0,
|
| 993 |
+
"completion_length": 1204.0859870910645,
|
| 994 |
+
"epoch": 17.60759493670886,
|
| 995 |
+
"grad_norm": 0.12439820170402527,
|
| 996 |
+
"kl": 0.01674652099609375,
|
| 997 |
+
"learning_rate": 1.141807012330699e-07,
|
| 998 |
+
"loss": 0.061,
|
| 999 |
+
"reward": 1.10267860814929,
|
| 1000 |
+
"reward_std": 0.30230870423838496,
|
| 1001 |
+
"rewards/accuracy_reward": 0.16406250756699592,
|
| 1002 |
+
"rewards/format_reward": 0.9386160988360643,
|
| 1003 |
+
"step": 71
|
| 1004 |
+
},
|
| 1005 |
+
{
|
| 1006 |
+
"clip_ratio": 0.0,
|
| 1007 |
+
"completion_length": 1168.6833896636963,
|
| 1008 |
+
"epoch": 17.810126582278482,
|
| 1009 |
+
"grad_norm": 0.11665742844343185,
|
| 1010 |
+
"kl": 0.025030136108398438,
|
| 1011 |
+
"learning_rate": 9.046106882113752e-08,
|
| 1012 |
+
"loss": 0.051,
|
| 1013 |
+
"reward": 1.1763393394649029,
|
| 1014 |
+
"reward_std": 0.32615134259685874,
|
| 1015 |
+
"rewards/accuracy_reward": 0.23995536949951202,
|
| 1016 |
+
"rewards/format_reward": 0.9363839589059353,
|
| 1017 |
+
"step": 72
|
| 1018 |
+
},
|
| 1019 |
+
{
|
| 1020 |
+
"clip_ratio": 0.0,
|
| 1021 |
+
"completion_length": 1177.4955806732178,
|
| 1022 |
+
"epoch": 18.20253164556962,
|
| 1023 |
+
"grad_norm": 0.1363748013973236,
|
| 1024 |
+
"kl": 0.018270492553710938,
|
| 1025 |
+
"learning_rate": 6.942457387765977e-08,
|
| 1026 |
+
"loss": 0.0674,
|
| 1027 |
+
"reward": 1.1439732611179352,
|
| 1028 |
+
"reward_std": 0.2613161935005337,
|
| 1029 |
+
"rewards/accuracy_reward": 0.19754465110599995,
|
| 1030 |
+
"rewards/format_reward": 0.946428595110774,
|
| 1031 |
+
"step": 73
|
| 1032 |
+
},
|
| 1033 |
+
{
|
| 1034 |
+
"clip_ratio": 0.0,
|
| 1035 |
+
"completion_length": 1190.054738998413,
|
| 1036 |
+
"epoch": 18.40506329113924,
|
| 1037 |
+
"grad_norm": 0.12824372947216034,
|
| 1038 |
+
"kl": 0.018024444580078125,
|
| 1039 |
+
"learning_rate": 5.11112605663977e-08,
|
| 1040 |
+
"loss": 0.0457,
|
| 1041 |
+
"reward": 1.1283482611179352,
|
| 1042 |
+
"reward_std": 0.2721500853076577,
|
| 1043 |
+
"rewards/accuracy_reward": 0.1919642947614193,
|
| 1044 |
+
"rewards/format_reward": 0.9363839570432901,
|
| 1045 |
+
"step": 74
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"clip_ratio": 0.0,
|
| 1049 |
+
"completion_length": 1171.8717079162598,
|
| 1050 |
+
"epoch": 18.60759493670886,
|
| 1051 |
+
"grad_norm": 0.9167880415916443,
|
| 1052 |
+
"kl": 0.0291290283203125,
|
| 1053 |
+
"learning_rate": 3.5555989320099955e-08,
|
| 1054 |
+
"loss": 0.0824,
|
| 1055 |
+
"reward": 1.1261161230504513,
|
| 1056 |
+
"reward_std": 0.2760429719928652,
|
| 1057 |
+
"rewards/accuracy_reward": 0.1953125084983185,
|
| 1058 |
+
"rewards/format_reward": 0.9308036025613546,
|
| 1059 |
+
"step": 75
|
| 1060 |
+
},
|
| 1061 |
+
{
|
| 1062 |
+
"clip_ratio": 0.0,
|
| 1063 |
+
"completion_length": 1209.302146911621,
|
| 1064 |
+
"epoch": 18.810126582278482,
|
| 1065 |
+
"grad_norm": 0.09688866138458252,
|
| 1066 |
+
"kl": 0.017160415649414062,
|
| 1067 |
+
"learning_rate": 2.278837048168797e-08,
|
| 1068 |
+
"loss": 0.0286,
|
| 1069 |
+
"reward": 1.1729911025613546,
|
| 1070 |
+
"reward_std": 0.2933972212485969,
|
| 1071 |
+
"rewards/accuracy_reward": 0.23102679941803217,
|
| 1072 |
+
"rewards/format_reward": 0.9419643096625805,
|
| 1073 |
+
"step": 76
|
| 1074 |
+
},
|
| 1075 |
+
{
|
| 1076 |
+
"clip_ratio": 0.0,
|
| 1077 |
+
"completion_length": 1140.1205825805664,
|
| 1078 |
+
"epoch": 19.20253164556962,
|
| 1079 |
+
"grad_norm": 0.12310791015625,
|
| 1080 |
+
"kl": 0.024255752563476562,
|
| 1081 |
+
"learning_rate": 1.2832707939284426e-08,
|
| 1082 |
+
"loss": 0.0368,
|
| 1083 |
+
"reward": 1.1975446920841932,
|
| 1084 |
+
"reward_std": 0.29775712732225657,
|
| 1085 |
+
"rewards/accuracy_reward": 0.2533482244471088,
|
| 1086 |
+
"rewards/format_reward": 0.9441964514553547,
|
| 1087 |
+
"step": 77
|
| 1088 |
+
},
|
| 1089 |
+
{
|
| 1090 |
+
"clip_ratio": 0.0,
|
| 1091 |
+
"completion_length": 1153.5368843078613,
|
| 1092 |
+
"epoch": 19.40506329113924,
|
| 1093 |
+
"grad_norm": 0.15291966497898102,
|
| 1094 |
+
"kl": 0.01901531219482422,
|
| 1095 |
+
"learning_rate": 5.707952862381682e-09,
|
| 1096 |
+
"loss": 0.054,
|
| 1097 |
+
"reward": 1.099330399185419,
|
| 1098 |
+
"reward_std": 0.2274289857596159,
|
| 1099 |
+
"rewards/accuracy_reward": 0.14285714621655643,
|
| 1100 |
+
"rewards/format_reward": 0.9564732350409031,
|
| 1101 |
+
"step": 78
|
| 1102 |
+
},
|
| 1103 |
+
{
|
| 1104 |
+
"clip_ratio": 0.0,
|
| 1105 |
+
"completion_length": 1215.3181285858154,
|
| 1106 |
+
"epoch": 19.60759493670886,
|
| 1107 |
+
"grad_norm": 0.11192745715379715,
|
| 1108 |
+
"kl": 0.017858505249023438,
|
| 1109 |
+
"learning_rate": 1.4276676272133026e-09,
|
| 1110 |
+
"loss": 0.0329,
|
| 1111 |
+
"reward": 1.1216518245637417,
|
| 1112 |
+
"reward_std": 0.2780349128879607,
|
| 1113 |
+
"rewards/accuracy_reward": 0.19308036274742335,
|
| 1114 |
+
"rewards/format_reward": 0.9285714607685804,
|
| 1115 |
+
"step": 79
|
| 1116 |
+
},
|
| 1117 |
+
{
|
| 1118 |
+
"clip_ratio": 0.0,
|
| 1119 |
+
"completion_length": 1280.5479927062988,
|
| 1120 |
+
"epoch": 19.810126582278482,
|
| 1121 |
+
"grad_norm": 0.12247645109891891,
|
| 1122 |
+
"kl": 0.018152236938476562,
|
| 1123 |
+
"learning_rate": 0.0,
|
| 1124 |
+
"loss": 0.0587,
|
| 1125 |
+
"reward": 1.1674107685685158,
|
| 1126 |
+
"reward_std": 0.30244723055511713,
|
| 1127 |
+
"rewards/accuracy_reward": 0.23549108253791928,
|
| 1128 |
+
"rewards/format_reward": 0.9319196753203869,
|
| 1129 |
+
"step": 80
|
| 1130 |
+
},
|
| 1131 |
+
{
|
| 1132 |
+
"epoch": 19.810126582278482,
|
| 1133 |
+
"step": 80,
|
| 1134 |
+
"total_flos": 0.0,
|
| 1135 |
+
"train_loss": 0.10659663751721382,
|
| 1136 |
+
"train_runtime": 145281.9904,
|
| 1137 |
+
"train_samples_per_second": 0.087,
|
| 1138 |
+
"train_steps_per_second": 0.001
|
| 1139 |
+
}
|
| 1140 |
+
],
|
| 1141 |
+
"logging_steps": 1,
|
| 1142 |
+
"max_steps": 80,
|
| 1143 |
+
"num_input_tokens_seen": 0,
|
| 1144 |
+
"num_train_epochs": 20,
|
| 1145 |
+
"save_steps": 10,
|
| 1146 |
+
"stateful_callbacks": {
|
| 1147 |
+
"TrainerControl": {
|
| 1148 |
+
"args": {
|
| 1149 |
+
"should_epoch_stop": false,
|
| 1150 |
+
"should_evaluate": false,
|
| 1151 |
+
"should_log": false,
|
| 1152 |
+
"should_save": true,
|
| 1153 |
+
"should_training_stop": true
|
| 1154 |
+
},
|
| 1155 |
+
"attributes": {}
|
| 1156 |
+
}
|
| 1157 |
+
},
|
| 1158 |
+
"total_flos": 0.0,
|
| 1159 |
+
"train_batch_size": 4,
|
| 1160 |
+
"trial_name": null,
|
| 1161 |
+
"trial_params": null
|
| 1162 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1eac3eeaaff024929fd363cc09cd0ded01b0b96ceb7501cec487c056c068ae18
|
| 3 |
+
size 8248
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|