davidanugraha commited on
Commit
f6461dc
·
verified ·
1 Parent(s): 8c34c3d

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: davidanugraha/Qwen3-4B-Concise-SimPO-StrategyB-Stage1
5
+ tags:
6
+ - llama-factory
7
+ - full
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: concise_phase2_short_qwen3_4b_config1_new
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # concise_phase2_short_qwen3_4b_config1_new
18
+
19
+ This model is a fine-tuned version of [davidanugraha/Qwen3-4B-Concise-SimPO-StrategyB-Stage1](https://huggingface.co/davidanugraha/Qwen3-4B-Concise-SimPO-StrategyB-Stage1) on the dpo_concise_phase2_short dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 1e-06
39
+ - train_batch_size: 1
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 4
44
+ - gradient_accumulation_steps: 16
45
+ - total_train_batch_size: 64
46
+ - total_eval_batch_size: 32
47
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: linear
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - num_epochs: 2.0
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.52.4
59
+ - Pytorch 2.7.1
60
+ - Datasets 3.6.0
61
+ - Tokenizers 0.21.1
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 44833908572160.0,
4
+ "train_loss": 0.9574610433157753,
5
+ "train_runtime": 3050.2608,
6
+ "train_samples_per_second": 2.849,
7
+ "train_steps_per_second": 0.045
8
+ }
chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2560,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 9728,
14
+ "max_position_embeddings": 40960,
15
+ "max_window_layers": 36,
16
+ "model_type": "qwen3",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 36,
19
+ "num_key_value_heads": 8,
20
+ "rms_norm_eps": 1e-06,
21
+ "rope_scaling": null,
22
+ "rope_theta": 1000000,
23
+ "sliding_window": null,
24
+ "tie_word_embeddings": true,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.52.4",
27
+ "use_cache": false,
28
+ "use_sliding_window": false,
29
+ "vocab_size": 151936
30
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.6,
10
+ "top_k": 20,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.52.4"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3f9e02b6844e86d9bf2897f244cf0f7838636b5063ea03a9cd9221091211092
3
+ size 4967215360
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0fca0d52800ac075944f4858b6159a7f2852807778b0dd28de257b0b7c78b8b
3
+ size 3077766632
model.safetensors.index.json ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 8044936192
4
+ },
5
+ "weight_map": {
6
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
7
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
8
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
9
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
10
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
12
+ "model.layers.0.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
15
+ "model.layers.0.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
16
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
17
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
18
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
19
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
20
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
21
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
22
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
23
+ "model.layers.1.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
24
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
25
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
26
+ "model.layers.1.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
27
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
28
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
30
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
31
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
33
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
34
+ "model.layers.10.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
35
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
36
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.layers.10.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
38
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
39
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
40
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
41
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
42
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
43
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
45
+ "model.layers.11.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
46
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
47
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
48
+ "model.layers.11.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
49
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
50
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
51
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
52
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
54
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
55
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
56
+ "model.layers.12.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
57
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
58
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.layers.12.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
60
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
61
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
62
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
63
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
64
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
65
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
66
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
67
+ "model.layers.13.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
68
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
69
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
70
+ "model.layers.13.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
71
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
72
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
73
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
74
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
75
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
76
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
77
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
78
+ "model.layers.14.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
79
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
80
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
81
+ "model.layers.14.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
82
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
83
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
84
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
85
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
86
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
87
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
88
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
89
+ "model.layers.15.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
90
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.layers.15.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
93
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
94
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
95
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
96
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
97
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
98
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
99
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
100
+ "model.layers.16.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
101
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
102
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
103
+ "model.layers.16.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
104
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
105
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
106
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
107
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
108
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
109
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
110
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
111
+ "model.layers.17.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
112
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
113
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.layers.17.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
115
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
116
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
117
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
118
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
119
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
120
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
121
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
122
+ "model.layers.18.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
123
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
124
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
125
+ "model.layers.18.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
126
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
127
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
128
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
129
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
130
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
131
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
132
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
133
+ "model.layers.19.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
134
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
135
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
136
+ "model.layers.19.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
137
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
138
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
139
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
140
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
141
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
142
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
143
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
144
+ "model.layers.2.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
145
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
146
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
147
+ "model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
148
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
149
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
150
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
151
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
152
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
153
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
154
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
155
+ "model.layers.20.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
156
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
157
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
158
+ "model.layers.20.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
159
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
160
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
161
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
162
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
163
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
164
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
165
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
166
+ "model.layers.21.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
167
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
168
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
169
+ "model.layers.21.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
170
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
171
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
172
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
173
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
174
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
175
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
176
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
177
+ "model.layers.22.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
178
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
179
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
180
+ "model.layers.22.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
181
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
182
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
183
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
184
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
185
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
186
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
187
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
188
+ "model.layers.23.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
189
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
190
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
191
+ "model.layers.23.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
192
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
193
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
194
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
195
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
196
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
197
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
198
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
199
+ "model.layers.24.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
200
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
201
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
202
+ "model.layers.24.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
203
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
204
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
205
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
206
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
207
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
208
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
209
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
210
+ "model.layers.25.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
211
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
212
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
213
+ "model.layers.25.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
214
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
215
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
216
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
217
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
218
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
219
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
220
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
221
+ "model.layers.26.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
222
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
223
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
224
+ "model.layers.26.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
225
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
226
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
227
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
228
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
229
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
230
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
231
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
232
+ "model.layers.27.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
233
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
234
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
235
+ "model.layers.27.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
236
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
237
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
238
+ "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
239
+ "model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
240
+ "model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
241
+ "model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
242
+ "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
243
+ "model.layers.28.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
244
+ "model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
245
+ "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
246
+ "model.layers.28.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
247
+ "model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
248
+ "model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
249
+ "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
250
+ "model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
251
+ "model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
252
+ "model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
253
+ "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
254
+ "model.layers.29.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
255
+ "model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
256
+ "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
257
+ "model.layers.29.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
258
+ "model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
259
+ "model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
260
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
261
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
262
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
263
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
264
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
265
+ "model.layers.3.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
266
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
267
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
268
+ "model.layers.3.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
269
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
270
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
271
+ "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
272
+ "model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
273
+ "model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
274
+ "model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
275
+ "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
276
+ "model.layers.30.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
277
+ "model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
278
+ "model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
279
+ "model.layers.30.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
280
+ "model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
281
+ "model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
282
+ "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
283
+ "model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
284
+ "model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
285
+ "model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
286
+ "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
287
+ "model.layers.31.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
288
+ "model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
289
+ "model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
290
+ "model.layers.31.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
291
+ "model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
292
+ "model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
293
+ "model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
294
+ "model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
295
+ "model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
296
+ "model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
297
+ "model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
298
+ "model.layers.32.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
299
+ "model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
300
+ "model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
301
+ "model.layers.32.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
302
+ "model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
303
+ "model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
304
+ "model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
305
+ "model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
306
+ "model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
307
+ "model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
308
+ "model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
309
+ "model.layers.33.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
310
+ "model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
311
+ "model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
312
+ "model.layers.33.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
313
+ "model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
314
+ "model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
315
+ "model.layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors",
316
+ "model.layers.34.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
317
+ "model.layers.34.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
318
+ "model.layers.34.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
319
+ "model.layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
320
+ "model.layers.34.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
321
+ "model.layers.34.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
322
+ "model.layers.34.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
323
+ "model.layers.34.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
324
+ "model.layers.34.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
325
+ "model.layers.34.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
326
+ "model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors",
327
+ "model.layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
328
+ "model.layers.35.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
329
+ "model.layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
330
+ "model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
331
+ "model.layers.35.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
332
+ "model.layers.35.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
333
+ "model.layers.35.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
334
+ "model.layers.35.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
335
+ "model.layers.35.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
336
+ "model.layers.35.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
337
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
338
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
339
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
340
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
341
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
342
+ "model.layers.4.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
343
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
344
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
345
+ "model.layers.4.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
346
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
347
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
348
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
349
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
350
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
351
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
352
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
353
+ "model.layers.5.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
354
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
355
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
356
+ "model.layers.5.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
357
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
358
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
359
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
360
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
361
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
362
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
363
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
364
+ "model.layers.6.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
365
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
366
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
367
+ "model.layers.6.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
368
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
369
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
370
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
371
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
372
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
373
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
374
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
375
+ "model.layers.7.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
376
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
377
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
378
+ "model.layers.7.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
379
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
380
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
381
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
382
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
383
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
384
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
385
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
386
+ "model.layers.8.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
387
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
388
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
389
+ "model.layers.8.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
390
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
391
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
392
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
393
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
394
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
395
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
396
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
397
+ "model.layers.9.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
398
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
399
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
400
+ "model.layers.9.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
401
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
402
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
403
+ "model.norm.weight": "model-00002-of-00002.safetensors"
404
+ }
405
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 131072,
235
+ "pad_token": "<|endoftext|>",
236
+ "padding_side": "right",
237
+ "split_special_tokens": false,
238
+ "tokenizer_class": "Qwen2Tokenizer",
239
+ "unk_token": null
240
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 44833908572160.0,
4
+ "train_loss": 0.9574610433157753,
5
+ "train_runtime": 3050.2608,
6
+ "train_samples_per_second": 2.849,
7
+ "train_steps_per_second": 0.045
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 1, "total_steps": 136, "loss": 1.0051, "accuracy": 0.4375, "lr": 0.0, "epoch": 0.014719411223551058, "percentage": 0.74, "elapsed_time": "0:00:27", "remaining_time": "1:02:57"}
2
+ {"current_steps": 2, "total_steps": 136, "loss": 1.0176, "accuracy": 0.359375, "lr": 7.142857142857142e-08, "epoch": 0.029438822447102116, "percentage": 1.47, "elapsed_time": "0:00:52", "remaining_time": "0:58:56"}
3
+ {"current_steps": 3, "total_steps": 136, "loss": 0.9677, "accuracy": 0.4375, "lr": 1.4285714285714285e-07, "epoch": 0.04415823367065318, "percentage": 2.21, "elapsed_time": "0:01:15", "remaining_time": "0:55:49"}
4
+ {"current_steps": 4, "total_steps": 136, "loss": 1.0147, "accuracy": 0.5, "lr": 2.1428571428571426e-07, "epoch": 0.05887764489420423, "percentage": 2.94, "elapsed_time": "0:01:37", "remaining_time": "0:53:43"}
5
+ {"current_steps": 5, "total_steps": 136, "loss": 1.0084, "accuracy": 0.453125, "lr": 2.857142857142857e-07, "epoch": 0.07359705611775529, "percentage": 3.68, "elapsed_time": "0:01:59", "remaining_time": "0:52:19"}
6
+ {"current_steps": 6, "total_steps": 136, "loss": 1.0025, "accuracy": 0.359375, "lr": 3.5714285714285716e-07, "epoch": 0.08831646734130635, "percentage": 4.41, "elapsed_time": "0:02:22", "remaining_time": "0:51:27"}
7
+ {"current_steps": 7, "total_steps": 136, "loss": 1.0125, "accuracy": 0.453125, "lr": 4.285714285714285e-07, "epoch": 0.10303587856485741, "percentage": 5.15, "elapsed_time": "0:02:44", "remaining_time": "0:50:31"}
8
+ {"current_steps": 8, "total_steps": 136, "loss": 0.9824, "accuracy": 0.421875, "lr": 5e-07, "epoch": 0.11775528978840846, "percentage": 5.88, "elapsed_time": "0:03:06", "remaining_time": "0:49:49"}
9
+ {"current_steps": 9, "total_steps": 136, "loss": 0.9864, "accuracy": 0.40625, "lr": 5.714285714285714e-07, "epoch": 0.13247470101195952, "percentage": 6.62, "elapsed_time": "0:03:27", "remaining_time": "0:48:51"}
10
+ {"current_steps": 10, "total_steps": 136, "loss": 0.9971, "accuracy": 0.46875, "lr": 6.428571428571429e-07, "epoch": 0.14719411223551057, "percentage": 7.35, "elapsed_time": "0:03:49", "remaining_time": "0:48:11"}
11
+ {"current_steps": 11, "total_steps": 136, "loss": 1.0003, "accuracy": 0.453125, "lr": 7.142857142857143e-07, "epoch": 0.16191352345906163, "percentage": 8.09, "elapsed_time": "0:04:10", "remaining_time": "0:47:21"}
12
+ {"current_steps": 12, "total_steps": 136, "loss": 1.0228, "accuracy": 0.390625, "lr": 7.857142857142856e-07, "epoch": 0.1766329346826127, "percentage": 8.82, "elapsed_time": "0:04:32", "remaining_time": "0:47:00"}
13
+ {"current_steps": 13, "total_steps": 136, "loss": 1.0224, "accuracy": 0.4375, "lr": 8.57142857142857e-07, "epoch": 0.19135234590616376, "percentage": 9.56, "elapsed_time": "0:04:56", "remaining_time": "0:46:47"}
14
+ {"current_steps": 14, "total_steps": 136, "loss": 1.0276, "accuracy": 0.28125, "lr": 9.285714285714285e-07, "epoch": 0.20607175712971482, "percentage": 10.29, "elapsed_time": "0:05:17", "remaining_time": "0:46:04"}
15
+ {"current_steps": 15, "total_steps": 136, "loss": 1.0038, "accuracy": 0.375, "lr": 1e-06, "epoch": 0.22079116835326587, "percentage": 11.03, "elapsed_time": "0:05:38", "remaining_time": "0:45:31"}
16
+ {"current_steps": 16, "total_steps": 136, "loss": 0.9802, "accuracy": 0.59375, "lr": 9.918032786885245e-07, "epoch": 0.23551057957681693, "percentage": 11.76, "elapsed_time": "0:06:00", "remaining_time": "0:45:03"}
17
+ {"current_steps": 17, "total_steps": 136, "loss": 1.0029, "accuracy": 0.546875, "lr": 9.83606557377049e-07, "epoch": 0.250229990800368, "percentage": 12.5, "elapsed_time": "0:06:23", "remaining_time": "0:44:43"}
18
+ {"current_steps": 18, "total_steps": 136, "loss": 1.0136, "accuracy": 0.453125, "lr": 9.754098360655736e-07, "epoch": 0.26494940202391903, "percentage": 13.24, "elapsed_time": "0:06:46", "remaining_time": "0:44:25"}
19
+ {"current_steps": 19, "total_steps": 136, "loss": 0.9901, "accuracy": 0.40625, "lr": 9.672131147540984e-07, "epoch": 0.2796688132474701, "percentage": 13.97, "elapsed_time": "0:07:08", "remaining_time": "0:43:59"}
20
+ {"current_steps": 20, "total_steps": 136, "loss": 0.9931, "accuracy": 0.484375, "lr": 9.59016393442623e-07, "epoch": 0.29438822447102114, "percentage": 14.71, "elapsed_time": "0:07:30", "remaining_time": "0:43:31"}
21
+ {"current_steps": 21, "total_steps": 136, "loss": 0.9931, "accuracy": 0.453125, "lr": 9.508196721311474e-07, "epoch": 0.3091076356945722, "percentage": 15.44, "elapsed_time": "0:07:51", "remaining_time": "0:43:00"}
22
+ {"current_steps": 22, "total_steps": 136, "loss": 1.0038, "accuracy": 0.421875, "lr": 9.426229508196721e-07, "epoch": 0.32382704691812325, "percentage": 16.18, "elapsed_time": "0:08:11", "remaining_time": "0:42:29"}
23
+ {"current_steps": 23, "total_steps": 136, "loss": 0.9865, "accuracy": 0.484375, "lr": 9.344262295081968e-07, "epoch": 0.33854645814167433, "percentage": 16.91, "elapsed_time": "0:08:33", "remaining_time": "0:42:02"}
24
+ {"current_steps": 24, "total_steps": 136, "loss": 0.9727, "accuracy": 0.53125, "lr": 9.262295081967213e-07, "epoch": 0.3532658693652254, "percentage": 17.65, "elapsed_time": "0:08:54", "remaining_time": "0:41:36"}
25
+ {"current_steps": 25, "total_steps": 136, "loss": 1.0008, "accuracy": 0.4375, "lr": 9.180327868852458e-07, "epoch": 0.36798528058877644, "percentage": 18.38, "elapsed_time": "0:09:18", "remaining_time": "0:41:17"}
26
+ {"current_steps": 26, "total_steps": 136, "loss": 0.9675, "accuracy": 0.421875, "lr": 9.098360655737705e-07, "epoch": 0.3827046918123275, "percentage": 19.12, "elapsed_time": "0:09:41", "remaining_time": "0:40:58"}
27
+ {"current_steps": 27, "total_steps": 136, "loss": 0.9707, "accuracy": 0.515625, "lr": 9.01639344262295e-07, "epoch": 0.39742410303587855, "percentage": 19.85, "elapsed_time": "0:10:04", "remaining_time": "0:40:38"}
28
+ {"current_steps": 28, "total_steps": 136, "loss": 0.9673, "accuracy": 0.5625, "lr": 8.934426229508196e-07, "epoch": 0.41214351425942963, "percentage": 20.59, "elapsed_time": "0:10:26", "remaining_time": "0:40:15"}
29
+ {"current_steps": 29, "total_steps": 136, "loss": 0.9737, "accuracy": 0.53125, "lr": 8.852459016393443e-07, "epoch": 0.42686292548298066, "percentage": 21.32, "elapsed_time": "0:10:48", "remaining_time": "0:39:54"}
30
+ {"current_steps": 30, "total_steps": 136, "loss": 0.9833, "accuracy": 0.515625, "lr": 8.770491803278688e-07, "epoch": 0.44158233670653174, "percentage": 22.06, "elapsed_time": "0:11:10", "remaining_time": "0:39:29"}
31
+ {"current_steps": 31, "total_steps": 136, "loss": 0.9906, "accuracy": 0.46875, "lr": 8.688524590163933e-07, "epoch": 0.4563017479300828, "percentage": 22.79, "elapsed_time": "0:11:31", "remaining_time": "0:39:02"}
32
+ {"current_steps": 32, "total_steps": 136, "loss": 0.9748, "accuracy": 0.453125, "lr": 8.60655737704918e-07, "epoch": 0.47102115915363385, "percentage": 23.53, "elapsed_time": "0:11:53", "remaining_time": "0:38:38"}
33
+ {"current_steps": 33, "total_steps": 136, "loss": 0.9736, "accuracy": 0.53125, "lr": 8.524590163934425e-07, "epoch": 0.48574057037718493, "percentage": 24.26, "elapsed_time": "0:12:15", "remaining_time": "0:38:16"}
34
+ {"current_steps": 34, "total_steps": 136, "loss": 0.9562, "accuracy": 0.5625, "lr": 8.442622950819672e-07, "epoch": 0.500459981600736, "percentage": 25.0, "elapsed_time": "0:12:38", "remaining_time": "0:37:54"}
35
+ {"current_steps": 35, "total_steps": 136, "loss": 0.965, "accuracy": 0.625, "lr": 8.360655737704919e-07, "epoch": 0.515179392824287, "percentage": 25.74, "elapsed_time": "0:13:00", "remaining_time": "0:37:32"}
36
+ {"current_steps": 36, "total_steps": 136, "loss": 0.9815, "accuracy": 0.453125, "lr": 8.278688524590164e-07, "epoch": 0.5298988040478381, "percentage": 26.47, "elapsed_time": "0:13:22", "remaining_time": "0:37:08"}
37
+ {"current_steps": 37, "total_steps": 136, "loss": 0.9836, "accuracy": 0.453125, "lr": 8.196721311475409e-07, "epoch": 0.5446182152713891, "percentage": 27.21, "elapsed_time": "0:13:45", "remaining_time": "0:36:47"}
38
+ {"current_steps": 38, "total_steps": 136, "loss": 0.9685, "accuracy": 0.515625, "lr": 8.114754098360656e-07, "epoch": 0.5593376264949402, "percentage": 27.94, "elapsed_time": "0:14:06", "remaining_time": "0:36:22"}
39
+ {"current_steps": 39, "total_steps": 136, "loss": 0.9656, "accuracy": 0.53125, "lr": 8.032786885245901e-07, "epoch": 0.5740570377184913, "percentage": 28.68, "elapsed_time": "0:14:28", "remaining_time": "0:36:00"}
40
+ {"current_steps": 40, "total_steps": 136, "loss": 0.9814, "accuracy": 0.46875, "lr": 7.950819672131147e-07, "epoch": 0.5887764489420423, "percentage": 29.41, "elapsed_time": "0:14:49", "remaining_time": "0:35:33"}
41
+ {"current_steps": 41, "total_steps": 136, "loss": 1.008, "accuracy": 0.40625, "lr": 7.868852459016393e-07, "epoch": 0.6034958601655934, "percentage": 30.15, "elapsed_time": "0:15:12", "remaining_time": "0:35:15"}
42
+ {"current_steps": 42, "total_steps": 136, "loss": 0.9866, "accuracy": 0.484375, "lr": 7.786885245901639e-07, "epoch": 0.6182152713891444, "percentage": 30.88, "elapsed_time": "0:15:35", "remaining_time": "0:34:54"}
43
+ {"current_steps": 43, "total_steps": 136, "loss": 0.9796, "accuracy": 0.484375, "lr": 7.704918032786884e-07, "epoch": 0.6329346826126955, "percentage": 31.62, "elapsed_time": "0:15:57", "remaining_time": "0:34:30"}
44
+ {"current_steps": 44, "total_steps": 136, "loss": 1.001, "accuracy": 0.4375, "lr": 7.62295081967213e-07, "epoch": 0.6476540938362465, "percentage": 32.35, "elapsed_time": "0:16:19", "remaining_time": "0:34:07"}
45
+ {"current_steps": 45, "total_steps": 136, "loss": 0.9713, "accuracy": 0.5, "lr": 7.540983606557376e-07, "epoch": 0.6623735050597976, "percentage": 33.09, "elapsed_time": "0:16:40", "remaining_time": "0:33:43"}
46
+ {"current_steps": 46, "total_steps": 136, "loss": 0.9792, "accuracy": 0.546875, "lr": 7.459016393442623e-07, "epoch": 0.6770929162833487, "percentage": 33.82, "elapsed_time": "0:17:05", "remaining_time": "0:33:26"}
47
+ {"current_steps": 47, "total_steps": 136, "loss": 0.9677, "accuracy": 0.59375, "lr": 7.377049180327869e-07, "epoch": 0.6918123275068997, "percentage": 34.56, "elapsed_time": "0:17:27", "remaining_time": "0:33:03"}
48
+ {"current_steps": 48, "total_steps": 136, "loss": 0.9541, "accuracy": 0.640625, "lr": 7.295081967213115e-07, "epoch": 0.7065317387304508, "percentage": 35.29, "elapsed_time": "0:17:47", "remaining_time": "0:32:36"}
49
+ {"current_steps": 49, "total_steps": 136, "loss": 0.9716, "accuracy": 0.53125, "lr": 7.21311475409836e-07, "epoch": 0.7212511499540019, "percentage": 36.03, "elapsed_time": "0:18:07", "remaining_time": "0:32:10"}
50
+ {"current_steps": 50, "total_steps": 136, "loss": 0.9855, "accuracy": 0.484375, "lr": 7.131147540983606e-07, "epoch": 0.7359705611775529, "percentage": 36.76, "elapsed_time": "0:18:28", "remaining_time": "0:31:45"}
51
+ {"current_steps": 51, "total_steps": 136, "loss": 0.9597, "accuracy": 0.5625, "lr": 7.049180327868852e-07, "epoch": 0.7506899724011039, "percentage": 37.5, "elapsed_time": "0:18:51", "remaining_time": "0:31:26"}
52
+ {"current_steps": 52, "total_steps": 136, "loss": 0.9818, "accuracy": 0.59375, "lr": 6.967213114754098e-07, "epoch": 0.765409383624655, "percentage": 38.24, "elapsed_time": "0:19:12", "remaining_time": "0:31:01"}
53
+ {"current_steps": 53, "total_steps": 136, "loss": 0.9674, "accuracy": 0.53125, "lr": 6.885245901639343e-07, "epoch": 0.7801287948482061, "percentage": 38.97, "elapsed_time": "0:19:33", "remaining_time": "0:30:38"}
54
+ {"current_steps": 54, "total_steps": 136, "loss": 0.9664, "accuracy": 0.640625, "lr": 6.80327868852459e-07, "epoch": 0.7948482060717571, "percentage": 39.71, "elapsed_time": "0:19:55", "remaining_time": "0:30:15"}
55
+ {"current_steps": 55, "total_steps": 136, "loss": 0.9809, "accuracy": 0.484375, "lr": 6.721311475409835e-07, "epoch": 0.8095676172953082, "percentage": 40.44, "elapsed_time": "0:20:17", "remaining_time": "0:29:52"}
56
+ {"current_steps": 56, "total_steps": 136, "loss": 0.9797, "accuracy": 0.484375, "lr": 6.639344262295081e-07, "epoch": 0.8242870285188593, "percentage": 41.18, "elapsed_time": "0:20:40", "remaining_time": "0:29:31"}
57
+ {"current_steps": 57, "total_steps": 136, "loss": 0.9571, "accuracy": 0.640625, "lr": 6.557377049180327e-07, "epoch": 0.8390064397424103, "percentage": 41.91, "elapsed_time": "0:21:01", "remaining_time": "0:29:07"}
58
+ {"current_steps": 58, "total_steps": 136, "loss": 0.9727, "accuracy": 0.59375, "lr": 6.475409836065574e-07, "epoch": 0.8537258509659613, "percentage": 42.65, "elapsed_time": "0:21:22", "remaining_time": "0:28:44"}
59
+ {"current_steps": 59, "total_steps": 136, "loss": 0.9387, "accuracy": 0.703125, "lr": 6.393442622950819e-07, "epoch": 0.8684452621895125, "percentage": 43.38, "elapsed_time": "0:21:45", "remaining_time": "0:28:23"}
60
+ {"current_steps": 60, "total_steps": 136, "loss": 0.9652, "accuracy": 0.59375, "lr": 6.311475409836066e-07, "epoch": 0.8831646734130635, "percentage": 44.12, "elapsed_time": "0:22:06", "remaining_time": "0:27:59"}
61
+ {"current_steps": 61, "total_steps": 136, "loss": 0.9577, "accuracy": 0.546875, "lr": 6.229508196721311e-07, "epoch": 0.8978840846366145, "percentage": 44.85, "elapsed_time": "0:22:28", "remaining_time": "0:27:38"}
62
+ {"current_steps": 62, "total_steps": 136, "loss": 0.9812, "accuracy": 0.5625, "lr": 6.147540983606557e-07, "epoch": 0.9126034958601656, "percentage": 45.59, "elapsed_time": "0:22:50", "remaining_time": "0:27:16"}
63
+ {"current_steps": 63, "total_steps": 136, "loss": 0.9831, "accuracy": 0.484375, "lr": 6.065573770491803e-07, "epoch": 0.9273229070837167, "percentage": 46.32, "elapsed_time": "0:23:12", "remaining_time": "0:26:53"}
64
+ {"current_steps": 64, "total_steps": 136, "loss": 0.9533, "accuracy": 0.609375, "lr": 5.983606557377049e-07, "epoch": 0.9420423183072677, "percentage": 47.06, "elapsed_time": "0:23:34", "remaining_time": "0:26:30"}
65
+ {"current_steps": 65, "total_steps": 136, "loss": 0.9704, "accuracy": 0.546875, "lr": 5.901639344262294e-07, "epoch": 0.9567617295308187, "percentage": 47.79, "elapsed_time": "0:23:57", "remaining_time": "0:26:10"}
66
+ {"current_steps": 66, "total_steps": 136, "loss": 0.9792, "accuracy": 0.546875, "lr": 5.819672131147541e-07, "epoch": 0.9714811407543699, "percentage": 48.53, "elapsed_time": "0:24:20", "remaining_time": "0:25:48"}
67
+ {"current_steps": 67, "total_steps": 136, "loss": 0.9682, "accuracy": 0.5625, "lr": 5.737704918032786e-07, "epoch": 0.9862005519779209, "percentage": 49.26, "elapsed_time": "0:24:43", "remaining_time": "0:25:27"}
68
+ {"current_steps": 68, "total_steps": 136, "loss": 0.9114, "accuracy": 0.5500000715255737, "lr": 5.655737704918032e-07, "epoch": 1.0, "percentage": 50.0, "elapsed_time": "0:25:03", "remaining_time": "0:25:03"}
69
+ {"current_steps": 69, "total_steps": 136, "loss": 0.9459, "accuracy": 0.671875, "lr": 5.573770491803278e-07, "epoch": 1.0147194112235511, "percentage": 50.74, "elapsed_time": "0:25:56", "remaining_time": "0:25:11"}
70
+ {"current_steps": 70, "total_steps": 136, "loss": 0.9474, "accuracy": 0.671875, "lr": 5.491803278688525e-07, "epoch": 1.029438822447102, "percentage": 51.47, "elapsed_time": "0:26:21", "remaining_time": "0:24:50"}
71
+ {"current_steps": 71, "total_steps": 136, "loss": 0.9301, "accuracy": 0.640625, "lr": 5.40983606557377e-07, "epoch": 1.0441582336706532, "percentage": 52.21, "elapsed_time": "0:26:42", "remaining_time": "0:24:27"}
72
+ {"current_steps": 72, "total_steps": 136, "loss": 0.9439, "accuracy": 0.671875, "lr": 5.327868852459017e-07, "epoch": 1.0588776448942043, "percentage": 52.94, "elapsed_time": "0:27:04", "remaining_time": "0:24:03"}
73
+ {"current_steps": 73, "total_steps": 136, "loss": 0.9606, "accuracy": 0.609375, "lr": 5.245901639344262e-07, "epoch": 1.0735970561177552, "percentage": 53.68, "elapsed_time": "0:27:25", "remaining_time": "0:23:40"}
74
+ {"current_steps": 74, "total_steps": 136, "loss": 0.9235, "accuracy": 0.71875, "lr": 5.163934426229508e-07, "epoch": 1.0883164673413064, "percentage": 54.41, "elapsed_time": "0:27:48", "remaining_time": "0:23:17"}
75
+ {"current_steps": 75, "total_steps": 136, "loss": 0.9365, "accuracy": 0.71875, "lr": 5.081967213114754e-07, "epoch": 1.1030358785648575, "percentage": 55.15, "elapsed_time": "0:28:11", "remaining_time": "0:22:55"}
76
+ {"current_steps": 76, "total_steps": 136, "loss": 0.9397, "accuracy": 0.671875, "lr": 5e-07, "epoch": 1.1177552897884084, "percentage": 55.88, "elapsed_time": "0:28:32", "remaining_time": "0:22:31"}
77
+ {"current_steps": 77, "total_steps": 136, "loss": 0.9284, "accuracy": 0.625, "lr": 4.918032786885245e-07, "epoch": 1.1324747010119596, "percentage": 56.62, "elapsed_time": "0:28:55", "remaining_time": "0:22:09"}
78
+ {"current_steps": 78, "total_steps": 136, "loss": 0.9395, "accuracy": 0.671875, "lr": 4.836065573770492e-07, "epoch": 1.1471941122355105, "percentage": 57.35, "elapsed_time": "0:29:18", "remaining_time": "0:21:47"}
79
+ {"current_steps": 79, "total_steps": 136, "loss": 0.9467, "accuracy": 0.734375, "lr": 4.754098360655737e-07, "epoch": 1.1619135234590616, "percentage": 58.09, "elapsed_time": "0:29:40", "remaining_time": "0:21:24"}
80
+ {"current_steps": 80, "total_steps": 136, "loss": 0.9517, "accuracy": 0.640625, "lr": 4.672131147540984e-07, "epoch": 1.1766329346826128, "percentage": 58.82, "elapsed_time": "0:30:01", "remaining_time": "0:21:00"}
81
+ {"current_steps": 81, "total_steps": 136, "loss": 0.9425, "accuracy": 0.65625, "lr": 4.590163934426229e-07, "epoch": 1.1913523459061637, "percentage": 59.56, "elapsed_time": "0:30:23", "remaining_time": "0:20:37"}
82
+ {"current_steps": 82, "total_steps": 136, "loss": 0.9242, "accuracy": 0.703125, "lr": 4.508196721311475e-07, "epoch": 1.2060717571297148, "percentage": 60.29, "elapsed_time": "0:30:44", "remaining_time": "0:20:14"}
83
+ {"current_steps": 83, "total_steps": 136, "loss": 0.9412, "accuracy": 0.671875, "lr": 4.426229508196721e-07, "epoch": 1.220791168353266, "percentage": 61.03, "elapsed_time": "0:31:06", "remaining_time": "0:19:51"}
84
+ {"current_steps": 84, "total_steps": 136, "loss": 0.9396, "accuracy": 0.703125, "lr": 4.3442622950819667e-07, "epoch": 1.2355105795768169, "percentage": 61.76, "elapsed_time": "0:31:27", "remaining_time": "0:19:28"}
85
+ {"current_steps": 85, "total_steps": 136, "loss": 0.9275, "accuracy": 0.734375, "lr": 4.2622950819672127e-07, "epoch": 1.250229990800368, "percentage": 62.5, "elapsed_time": "0:31:47", "remaining_time": "0:19:04"}
86
+ {"current_steps": 86, "total_steps": 136, "loss": 0.9368, "accuracy": 0.671875, "lr": 4.180327868852459e-07, "epoch": 1.2649494020239191, "percentage": 63.24, "elapsed_time": "0:32:10", "remaining_time": "0:18:42"}
87
+ {"current_steps": 87, "total_steps": 136, "loss": 0.9295, "accuracy": 0.6875, "lr": 4.0983606557377047e-07, "epoch": 1.27966881324747, "percentage": 63.97, "elapsed_time": "0:32:36", "remaining_time": "0:18:21"}
88
+ {"current_steps": 88, "total_steps": 136, "loss": 0.9317, "accuracy": 0.75, "lr": 4.0163934426229507e-07, "epoch": 1.2943882244710212, "percentage": 64.71, "elapsed_time": "0:32:58", "remaining_time": "0:17:59"}
89
+ {"current_steps": 89, "total_steps": 136, "loss": 0.9391, "accuracy": 0.609375, "lr": 3.9344262295081967e-07, "epoch": 1.3091076356945721, "percentage": 65.44, "elapsed_time": "0:33:19", "remaining_time": "0:17:35"}
90
+ {"current_steps": 90, "total_steps": 136, "loss": 0.9191, "accuracy": 0.765625, "lr": 3.852459016393442e-07, "epoch": 1.3238270469181233, "percentage": 66.18, "elapsed_time": "0:33:41", "remaining_time": "0:17:13"}
91
+ {"current_steps": 91, "total_steps": 136, "loss": 0.9424, "accuracy": 0.609375, "lr": 3.770491803278688e-07, "epoch": 1.3385464581416744, "percentage": 66.91, "elapsed_time": "0:34:02", "remaining_time": "0:16:50"}
92
+ {"current_steps": 92, "total_steps": 136, "loss": 0.941, "accuracy": 0.640625, "lr": 3.6885245901639347e-07, "epoch": 1.3532658693652255, "percentage": 67.65, "elapsed_time": "0:34:23", "remaining_time": "0:16:26"}
93
+ {"current_steps": 93, "total_steps": 136, "loss": 0.9469, "accuracy": 0.625, "lr": 3.60655737704918e-07, "epoch": 1.3679852805887764, "percentage": 68.38, "elapsed_time": "0:34:44", "remaining_time": "0:16:03"}
94
+ {"current_steps": 94, "total_steps": 136, "loss": 0.9317, "accuracy": 0.6875, "lr": 3.524590163934426e-07, "epoch": 1.3827046918123276, "percentage": 69.12, "elapsed_time": "0:35:07", "remaining_time": "0:15:41"}
95
+ {"current_steps": 95, "total_steps": 136, "loss": 0.9165, "accuracy": 0.78125, "lr": 3.4426229508196717e-07, "epoch": 1.3974241030358785, "percentage": 69.85, "elapsed_time": "0:35:28", "remaining_time": "0:15:18"}
96
+ {"current_steps": 96, "total_steps": 136, "loss": 0.9495, "accuracy": 0.609375, "lr": 3.3606557377049177e-07, "epoch": 1.4121435142594296, "percentage": 70.59, "elapsed_time": "0:35:48", "remaining_time": "0:14:55"}
97
+ {"current_steps": 97, "total_steps": 136, "loss": 0.9238, "accuracy": 0.71875, "lr": 3.2786885245901637e-07, "epoch": 1.4268629254829808, "percentage": 71.32, "elapsed_time": "0:36:10", "remaining_time": "0:14:32"}
98
+ {"current_steps": 98, "total_steps": 136, "loss": 0.9244, "accuracy": 0.671875, "lr": 3.1967213114754097e-07, "epoch": 1.4415823367065317, "percentage": 72.06, "elapsed_time": "0:36:31", "remaining_time": "0:14:09"}
99
+ {"current_steps": 99, "total_steps": 136, "loss": 0.9296, "accuracy": 0.734375, "lr": 3.1147540983606557e-07, "epoch": 1.4563017479300828, "percentage": 72.79, "elapsed_time": "0:36:51", "remaining_time": "0:13:46"}
100
+ {"current_steps": 100, "total_steps": 136, "loss": 0.9216, "accuracy": 0.703125, "lr": 3.0327868852459017e-07, "epoch": 1.4710211591536337, "percentage": 73.53, "elapsed_time": "0:37:13", "remaining_time": "0:13:23"}
101
+ {"current_steps": 101, "total_steps": 136, "loss": 0.9231, "accuracy": 0.6875, "lr": 2.950819672131147e-07, "epoch": 1.4857405703771849, "percentage": 74.26, "elapsed_time": "0:37:32", "remaining_time": "0:13:00"}
102
+ {"current_steps": 102, "total_steps": 136, "loss": 0.929, "accuracy": 0.65625, "lr": 2.868852459016393e-07, "epoch": 1.500459981600736, "percentage": 75.0, "elapsed_time": "0:37:55", "remaining_time": "0:12:38"}
103
+ {"current_steps": 103, "total_steps": 136, "loss": 0.9235, "accuracy": 0.640625, "lr": 2.786885245901639e-07, "epoch": 1.5151793928242872, "percentage": 75.74, "elapsed_time": "0:38:16", "remaining_time": "0:12:15"}
104
+ {"current_steps": 104, "total_steps": 136, "loss": 0.9457, "accuracy": 0.703125, "lr": 2.704918032786885e-07, "epoch": 1.529898804047838, "percentage": 76.47, "elapsed_time": "0:38:39", "remaining_time": "0:11:53"}
105
+ {"current_steps": 105, "total_steps": 136, "loss": 0.9303, "accuracy": 0.71875, "lr": 2.622950819672131e-07, "epoch": 1.544618215271389, "percentage": 77.21, "elapsed_time": "0:39:02", "remaining_time": "0:11:31"}
106
+ {"current_steps": 106, "total_steps": 136, "loss": 0.9302, "accuracy": 0.765625, "lr": 2.540983606557377e-07, "epoch": 1.5593376264949401, "percentage": 77.94, "elapsed_time": "0:39:25", "remaining_time": "0:11:09"}
107
+ {"current_steps": 107, "total_steps": 136, "loss": 0.9072, "accuracy": 0.765625, "lr": 2.4590163934426226e-07, "epoch": 1.5740570377184913, "percentage": 78.68, "elapsed_time": "0:39:48", "remaining_time": "0:10:47"}
108
+ {"current_steps": 108, "total_steps": 136, "loss": 0.938, "accuracy": 0.640625, "lr": 2.3770491803278686e-07, "epoch": 1.5887764489420424, "percentage": 79.41, "elapsed_time": "0:40:10", "remaining_time": "0:10:25"}
109
+ {"current_steps": 109, "total_steps": 136, "loss": 0.9233, "accuracy": 0.671875, "lr": 2.2950819672131146e-07, "epoch": 1.6034958601655935, "percentage": 80.15, "elapsed_time": "0:40:33", "remaining_time": "0:10:02"}
110
+ {"current_steps": 110, "total_steps": 136, "loss": 0.9516, "accuracy": 0.625, "lr": 2.2131147540983606e-07, "epoch": 1.6182152713891444, "percentage": 80.88, "elapsed_time": "0:40:56", "remaining_time": "0:09:40"}
111
+ {"current_steps": 111, "total_steps": 136, "loss": 0.944, "accuracy": 0.609375, "lr": 2.1311475409836064e-07, "epoch": 1.6329346826126954, "percentage": 81.62, "elapsed_time": "0:41:18", "remaining_time": "0:09:18"}
112
+ {"current_steps": 112, "total_steps": 136, "loss": 0.9439, "accuracy": 0.59375, "lr": 2.0491803278688524e-07, "epoch": 1.6476540938362465, "percentage": 82.35, "elapsed_time": "0:41:39", "remaining_time": "0:08:55"}
113
+ {"current_steps": 113, "total_steps": 136, "loss": 0.9404, "accuracy": 0.5625, "lr": 1.9672131147540984e-07, "epoch": 1.6623735050597976, "percentage": 83.09, "elapsed_time": "0:42:00", "remaining_time": "0:08:33"}
114
+ {"current_steps": 114, "total_steps": 136, "loss": 0.9139, "accuracy": 0.6875, "lr": 1.885245901639344e-07, "epoch": 1.6770929162833488, "percentage": 83.82, "elapsed_time": "0:42:21", "remaining_time": "0:08:10"}
115
+ {"current_steps": 115, "total_steps": 136, "loss": 0.9206, "accuracy": 0.671875, "lr": 1.80327868852459e-07, "epoch": 1.6918123275068997, "percentage": 84.56, "elapsed_time": "0:42:44", "remaining_time": "0:07:48"}
116
+ {"current_steps": 116, "total_steps": 136, "loss": 0.9386, "accuracy": 0.609375, "lr": 1.7213114754098358e-07, "epoch": 1.7065317387304508, "percentage": 85.29, "elapsed_time": "0:43:05", "remaining_time": "0:07:25"}
117
+ {"current_steps": 117, "total_steps": 136, "loss": 0.9326, "accuracy": 0.765625, "lr": 1.6393442622950818e-07, "epoch": 1.7212511499540017, "percentage": 86.03, "elapsed_time": "0:43:27", "remaining_time": "0:07:03"}
118
+ {"current_steps": 118, "total_steps": 136, "loss": 0.9374, "accuracy": 0.640625, "lr": 1.5573770491803278e-07, "epoch": 1.7359705611775529, "percentage": 86.76, "elapsed_time": "0:43:48", "remaining_time": "0:06:40"}
119
+ {"current_steps": 119, "total_steps": 136, "loss": 0.9186, "accuracy": 0.6875, "lr": 1.4754098360655736e-07, "epoch": 1.750689972401104, "percentage": 87.5, "elapsed_time": "0:44:11", "remaining_time": "0:06:18"}
120
+ {"current_steps": 120, "total_steps": 136, "loss": 0.9394, "accuracy": 0.734375, "lr": 1.3934426229508196e-07, "epoch": 1.7654093836246552, "percentage": 88.24, "elapsed_time": "0:44:34", "remaining_time": "0:05:56"}
121
+ {"current_steps": 121, "total_steps": 136, "loss": 0.9452, "accuracy": 0.484375, "lr": 1.3114754098360656e-07, "epoch": 1.780128794848206, "percentage": 88.97, "elapsed_time": "0:44:56", "remaining_time": "0:05:34"}
122
+ {"current_steps": 122, "total_steps": 136, "loss": 0.9323, "accuracy": 0.671875, "lr": 1.2295081967213113e-07, "epoch": 1.794848206071757, "percentage": 89.71, "elapsed_time": "0:45:17", "remaining_time": "0:05:11"}
123
+ {"current_steps": 123, "total_steps": 136, "loss": 0.9348, "accuracy": 0.703125, "lr": 1.1475409836065573e-07, "epoch": 1.8095676172953081, "percentage": 90.44, "elapsed_time": "0:45:38", "remaining_time": "0:04:49"}
124
+ {"current_steps": 124, "total_steps": 136, "loss": 0.9163, "accuracy": 0.65625, "lr": 1.0655737704918032e-07, "epoch": 1.8242870285188593, "percentage": 91.18, "elapsed_time": "0:46:00", "remaining_time": "0:04:27"}
125
+ {"current_steps": 125, "total_steps": 136, "loss": 0.941, "accuracy": 0.703125, "lr": 9.836065573770492e-08, "epoch": 1.8390064397424104, "percentage": 91.91, "elapsed_time": "0:46:22", "remaining_time": "0:04:04"}
126
+ {"current_steps": 126, "total_steps": 136, "loss": 0.9525, "accuracy": 0.59375, "lr": 9.01639344262295e-08, "epoch": 1.8537258509659613, "percentage": 92.65, "elapsed_time": "0:46:44", "remaining_time": "0:03:42"}
127
+ {"current_steps": 127, "total_steps": 136, "loss": 0.9356, "accuracy": 0.65625, "lr": 8.196721311475409e-08, "epoch": 1.8684452621895125, "percentage": 93.38, "elapsed_time": "0:47:06", "remaining_time": "0:03:20"}
128
+ {"current_steps": 128, "total_steps": 136, "loss": 0.9164, "accuracy": 0.71875, "lr": 7.377049180327868e-08, "epoch": 1.8831646734130634, "percentage": 94.12, "elapsed_time": "0:47:26", "remaining_time": "0:02:57"}
129
+ {"current_steps": 129, "total_steps": 136, "loss": 0.9298, "accuracy": 0.6875, "lr": 6.557377049180328e-08, "epoch": 1.8978840846366145, "percentage": 94.85, "elapsed_time": "0:47:47", "remaining_time": "0:02:35"}
130
+ {"current_steps": 130, "total_steps": 136, "loss": 0.9354, "accuracy": 0.59375, "lr": 5.7377049180327866e-08, "epoch": 1.9126034958601656, "percentage": 95.59, "elapsed_time": "0:48:08", "remaining_time": "0:02:13"}
131
+ {"current_steps": 131, "total_steps": 136, "loss": 0.9359, "accuracy": 0.703125, "lr": 4.918032786885246e-08, "epoch": 1.9273229070837168, "percentage": 96.32, "elapsed_time": "0:48:30", "remaining_time": "0:01:51"}
132
+ {"current_steps": 132, "total_steps": 136, "loss": 0.9253, "accuracy": 0.65625, "lr": 4.0983606557377046e-08, "epoch": 1.9420423183072677, "percentage": 97.06, "elapsed_time": "0:48:52", "remaining_time": "0:01:28"}
133
+ {"current_steps": 133, "total_steps": 136, "loss": 0.9188, "accuracy": 0.765625, "lr": 3.278688524590164e-08, "epoch": 1.9567617295308186, "percentage": 97.79, "elapsed_time": "0:49:14", "remaining_time": "0:01:06"}
134
+ {"current_steps": 134, "total_steps": 136, "loss": 0.9204, "accuracy": 0.765625, "lr": 2.459016393442623e-08, "epoch": 1.9714811407543698, "percentage": 98.53, "elapsed_time": "0:49:38", "remaining_time": "0:00:44"}
135
+ {"current_steps": 135, "total_steps": 136, "loss": 0.9194, "accuracy": 0.765625, "lr": 1.639344262295082e-08, "epoch": 1.986200551977921, "percentage": 99.26, "elapsed_time": "0:50:00", "remaining_time": "0:00:22"}
136
+ {"current_steps": 136, "total_steps": 136, "loss": 0.8813, "accuracy": 0.6166666746139526, "lr": 8.19672131147541e-09, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "0:50:20", "remaining_time": "0:00:00"}
137
+ {"current_steps": 136, "total_steps": 136, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "0:50:50", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,2083 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 2.0,
6
+ "eval_steps": 500,
7
+ "global_step": 136,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.014719411223551058,
14
+ "grad_norm": 10.21645450592041,
15
+ "learning_rate": 0.0,
16
+ "logits/chosen": -3.0194568634033203,
17
+ "logits/rejected": -2.950680732727051,
18
+ "logps/chosen": -0.45742279291152954,
19
+ "logps/rejected": -0.4388114809989929,
20
+ "loss": 1.0051,
21
+ "rewards/accuracies": 0.4375,
22
+ "rewards/chosen": -0.9148455858230591,
23
+ "rewards/margins": -0.03722264617681503,
24
+ "rewards/rejected": -0.8776229619979858,
25
+ "step": 1
26
+ },
27
+ {
28
+ "epoch": 0.029438822447102116,
29
+ "grad_norm": 9.920907020568848,
30
+ "learning_rate": 7.142857142857142e-08,
31
+ "logits/chosen": -3.3881869316101074,
32
+ "logits/rejected": -3.4658308029174805,
33
+ "logps/chosen": -0.4393010139465332,
34
+ "logps/rejected": -0.40908509492874146,
35
+ "loss": 1.0176,
36
+ "rewards/accuracies": 0.359375,
37
+ "rewards/chosen": -0.8786020278930664,
38
+ "rewards/margins": -0.06043180450797081,
39
+ "rewards/rejected": -0.8181700706481934,
40
+ "step": 2
41
+ },
42
+ {
43
+ "epoch": 0.04415823367065318,
44
+ "grad_norm": 9.453459739685059,
45
+ "learning_rate": 1.4285714285714285e-07,
46
+ "logits/chosen": -3.019883155822754,
47
+ "logits/rejected": -3.200739860534668,
48
+ "logps/chosen": -0.41646909713745117,
49
+ "logps/rejected": -0.4280118942260742,
50
+ "loss": 0.9677,
51
+ "rewards/accuracies": 0.4375,
52
+ "rewards/chosen": -0.8329381942749023,
53
+ "rewards/margins": 0.0230855755507946,
54
+ "rewards/rejected": -0.8560237884521484,
55
+ "step": 3
56
+ },
57
+ {
58
+ "epoch": 0.05887764489420423,
59
+ "grad_norm": 10.635061264038086,
60
+ "learning_rate": 2.1428571428571426e-07,
61
+ "logits/chosen": -2.7175631523132324,
62
+ "logits/rejected": -2.886113166809082,
63
+ "logps/chosen": -0.4779108166694641,
64
+ "logps/rejected": -0.45090264081954956,
65
+ "loss": 1.0147,
66
+ "rewards/accuracies": 0.5,
67
+ "rewards/chosen": -0.9558216333389282,
68
+ "rewards/margins": -0.054016292095184326,
69
+ "rewards/rejected": -0.9018052816390991,
70
+ "step": 4
71
+ },
72
+ {
73
+ "epoch": 0.07359705611775529,
74
+ "grad_norm": 10.923693656921387,
75
+ "learning_rate": 2.857142857142857e-07,
76
+ "logits/chosen": -2.7420175075531006,
77
+ "logits/rejected": -2.9430060386657715,
78
+ "logps/chosen": -0.4806134104728699,
79
+ "logps/rejected": -0.4588547945022583,
80
+ "loss": 1.0084,
81
+ "rewards/accuracies": 0.453125,
82
+ "rewards/chosen": -0.9612268209457397,
83
+ "rewards/margins": -0.04351712763309479,
84
+ "rewards/rejected": -0.9177095890045166,
85
+ "step": 5
86
+ },
87
+ {
88
+ "epoch": 0.08831646734130635,
89
+ "grad_norm": 10.25864028930664,
90
+ "learning_rate": 3.5714285714285716e-07,
91
+ "logits/chosen": -2.754415273666382,
92
+ "logits/rejected": -3.0695841312408447,
93
+ "logps/chosen": -0.46477335691452026,
94
+ "logps/rejected": -0.44464749097824097,
95
+ "loss": 1.0025,
96
+ "rewards/accuracies": 0.359375,
97
+ "rewards/chosen": -0.9295467138290405,
98
+ "rewards/margins": -0.04025167599320412,
99
+ "rewards/rejected": -0.8892950415611267,
100
+ "step": 6
101
+ },
102
+ {
103
+ "epoch": 0.10303587856485741,
104
+ "grad_norm": 11.24145793914795,
105
+ "learning_rate": 4.285714285714285e-07,
106
+ "logits/chosen": -2.9528307914733887,
107
+ "logits/rejected": -3.126439094543457,
108
+ "logps/chosen": -0.4697887897491455,
109
+ "logps/rejected": -0.4457703232765198,
110
+ "loss": 1.0125,
111
+ "rewards/accuracies": 0.453125,
112
+ "rewards/chosen": -0.9395775198936462,
113
+ "rewards/margins": -0.048036910593509674,
114
+ "rewards/rejected": -0.8915406465530396,
115
+ "step": 7
116
+ },
117
+ {
118
+ "epoch": 0.11775528978840846,
119
+ "grad_norm": 9.375962257385254,
120
+ "learning_rate": 5e-07,
121
+ "logits/chosen": -3.262545585632324,
122
+ "logits/rejected": -3.296149969100952,
123
+ "logps/chosen": -0.43063342571258545,
124
+ "logps/rejected": -0.4382564425468445,
125
+ "loss": 0.9824,
126
+ "rewards/accuracies": 0.421875,
127
+ "rewards/chosen": -0.8612668514251709,
128
+ "rewards/margins": 0.015246011316776276,
129
+ "rewards/rejected": -0.876512885093689,
130
+ "step": 8
131
+ },
132
+ {
133
+ "epoch": 0.13247470101195952,
134
+ "grad_norm": 10.173127174377441,
135
+ "learning_rate": 5.714285714285714e-07,
136
+ "logits/chosen": -2.697115898132324,
137
+ "logits/rejected": -3.103816509246826,
138
+ "logps/chosen": -0.4361271262168884,
139
+ "logps/rejected": -0.43252134323120117,
140
+ "loss": 0.9864,
141
+ "rewards/accuracies": 0.40625,
142
+ "rewards/chosen": -0.8722542524337769,
143
+ "rewards/margins": -0.007211566902697086,
144
+ "rewards/rejected": -0.8650426864624023,
145
+ "step": 9
146
+ },
147
+ {
148
+ "epoch": 0.14719411223551057,
149
+ "grad_norm": 9.083308219909668,
150
+ "learning_rate": 6.428571428571429e-07,
151
+ "logits/chosen": -3.46915864944458,
152
+ "logits/rejected": -3.5064210891723633,
153
+ "logps/chosen": -0.41334497928619385,
154
+ "logps/rejected": -0.4007585346698761,
155
+ "loss": 0.9971,
156
+ "rewards/accuracies": 0.46875,
157
+ "rewards/chosen": -0.8266898393630981,
158
+ "rewards/margins": -0.02517283335328102,
159
+ "rewards/rejected": -0.8015170097351074,
160
+ "step": 10
161
+ },
162
+ {
163
+ "epoch": 0.16191352345906163,
164
+ "grad_norm": 10.834942817687988,
165
+ "learning_rate": 7.142857142857143e-07,
166
+ "logits/chosen": -2.949937343597412,
167
+ "logits/rejected": -2.860502243041992,
168
+ "logps/chosen": -0.4562839865684509,
169
+ "logps/rejected": -0.4406251907348633,
170
+ "loss": 1.0003,
171
+ "rewards/accuracies": 0.453125,
172
+ "rewards/chosen": -0.9125679731369019,
173
+ "rewards/margins": -0.031317513436079025,
174
+ "rewards/rejected": -0.8812504410743713,
175
+ "step": 11
176
+ },
177
+ {
178
+ "epoch": 0.1766329346826127,
179
+ "grad_norm": 11.5885591506958,
180
+ "learning_rate": 7.857142857142856e-07,
181
+ "logits/chosen": -2.7742319107055664,
182
+ "logits/rejected": -2.8857803344726562,
183
+ "logps/chosen": -0.4672906696796417,
184
+ "logps/rejected": -0.434725821018219,
185
+ "loss": 1.0228,
186
+ "rewards/accuracies": 0.390625,
187
+ "rewards/chosen": -0.9345813393592834,
188
+ "rewards/margins": -0.06512977182865143,
189
+ "rewards/rejected": -0.869451642036438,
190
+ "step": 12
191
+ },
192
+ {
193
+ "epoch": 0.19135234590616376,
194
+ "grad_norm": 11.761482238769531,
195
+ "learning_rate": 8.57142857142857e-07,
196
+ "logits/chosen": -3.4971909523010254,
197
+ "logits/rejected": -3.3080244064331055,
198
+ "logps/chosen": -0.43734604120254517,
199
+ "logps/rejected": -0.40988171100616455,
200
+ "loss": 1.0224,
201
+ "rewards/accuracies": 0.4375,
202
+ "rewards/chosen": -0.8746920824050903,
203
+ "rewards/margins": -0.05492864176630974,
204
+ "rewards/rejected": -0.8197634220123291,
205
+ "step": 13
206
+ },
207
+ {
208
+ "epoch": 0.20607175712971482,
209
+ "grad_norm": 11.107159614562988,
210
+ "learning_rate": 9.285714285714285e-07,
211
+ "logits/chosen": -3.167203426361084,
212
+ "logits/rejected": -2.89117431640625,
213
+ "logps/chosen": -0.447818398475647,
214
+ "logps/rejected": -0.40924957394599915,
215
+ "loss": 1.0276,
216
+ "rewards/accuracies": 0.28125,
217
+ "rewards/chosen": -0.895636796951294,
218
+ "rewards/margins": -0.07713763415813446,
219
+ "rewards/rejected": -0.8184992074966431,
220
+ "step": 14
221
+ },
222
+ {
223
+ "epoch": 0.22079116835326587,
224
+ "grad_norm": 10.545300483703613,
225
+ "learning_rate": 1e-06,
226
+ "logits/chosen": -2.7851405143737793,
227
+ "logits/rejected": -3.0399112701416016,
228
+ "logps/chosen": -0.457728773355484,
229
+ "logps/rejected": -0.43849730491638184,
230
+ "loss": 1.0038,
231
+ "rewards/accuracies": 0.375,
232
+ "rewards/chosen": -0.915457546710968,
233
+ "rewards/margins": -0.038462862372398376,
234
+ "rewards/rejected": -0.8769946694374084,
235
+ "step": 15
236
+ },
237
+ {
238
+ "epoch": 0.23551057957681693,
239
+ "grad_norm": 10.076902389526367,
240
+ "learning_rate": 9.918032786885245e-07,
241
+ "logits/chosen": -2.3796284198760986,
242
+ "logits/rejected": -2.758613109588623,
243
+ "logps/chosen": -0.4450618028640747,
244
+ "logps/rejected": -0.4445981979370117,
245
+ "loss": 0.9802,
246
+ "rewards/accuracies": 0.59375,
247
+ "rewards/chosen": -0.8901236057281494,
248
+ "rewards/margins": -0.0009272526949644089,
249
+ "rewards/rejected": -0.8891963958740234,
250
+ "step": 16
251
+ },
252
+ {
253
+ "epoch": 0.250229990800368,
254
+ "grad_norm": 9.330389976501465,
255
+ "learning_rate": 9.83606557377049e-07,
256
+ "logits/chosen": -3.1862754821777344,
257
+ "logits/rejected": -3.2526628971099854,
258
+ "logps/chosen": -0.3827851116657257,
259
+ "logps/rejected": -0.36282622814178467,
260
+ "loss": 1.0029,
261
+ "rewards/accuracies": 0.546875,
262
+ "rewards/chosen": -0.7655701637268066,
263
+ "rewards/margins": -0.039917729794979095,
264
+ "rewards/rejected": -0.7256523966789246,
265
+ "step": 17
266
+ },
267
+ {
268
+ "epoch": 0.26494940202391903,
269
+ "grad_norm": 11.906604766845703,
270
+ "learning_rate": 9.754098360655736e-07,
271
+ "logits/chosen": -2.927328586578369,
272
+ "logits/rejected": -3.00978422164917,
273
+ "logps/chosen": -0.43598276376724243,
274
+ "logps/rejected": -0.4090351462364197,
275
+ "loss": 1.0136,
276
+ "rewards/accuracies": 0.453125,
277
+ "rewards/chosen": -0.8719655275344849,
278
+ "rewards/margins": -0.053895253688097,
279
+ "rewards/rejected": -0.8180702328681946,
280
+ "step": 18
281
+ },
282
+ {
283
+ "epoch": 0.2796688132474701,
284
+ "grad_norm": 9.152690887451172,
285
+ "learning_rate": 9.672131147540984e-07,
286
+ "logits/chosen": -2.607306718826294,
287
+ "logits/rejected": -2.6976051330566406,
288
+ "logps/chosen": -0.4554919898509979,
289
+ "logps/rejected": -0.4455527663230896,
290
+ "loss": 0.9901,
291
+ "rewards/accuracies": 0.40625,
292
+ "rewards/chosen": -0.9109839200973511,
293
+ "rewards/margins": -0.019878461956977844,
294
+ "rewards/rejected": -0.8911055326461792,
295
+ "step": 19
296
+ },
297
+ {
298
+ "epoch": 0.29438822447102114,
299
+ "grad_norm": 8.773446083068848,
300
+ "learning_rate": 9.59016393442623e-07,
301
+ "logits/chosen": -2.664100408554077,
302
+ "logits/rejected": -2.849198341369629,
303
+ "logps/chosen": -0.4018637239933014,
304
+ "logps/rejected": -0.3902263939380646,
305
+ "loss": 0.9931,
306
+ "rewards/accuracies": 0.484375,
307
+ "rewards/chosen": -0.8037275075912476,
308
+ "rewards/margins": -0.023274658247828484,
309
+ "rewards/rejected": -0.7804527282714844,
310
+ "step": 20
311
+ },
312
+ {
313
+ "epoch": 0.3091076356945722,
314
+ "grad_norm": 9.694635391235352,
315
+ "learning_rate": 9.508196721311474e-07,
316
+ "logits/chosen": -2.7708492279052734,
317
+ "logits/rejected": -2.630173683166504,
318
+ "logps/chosen": -0.44786471128463745,
319
+ "logps/rejected": -0.43551987409591675,
320
+ "loss": 0.9931,
321
+ "rewards/accuracies": 0.453125,
322
+ "rewards/chosen": -0.8957293629646301,
323
+ "rewards/margins": -0.024689704179763794,
324
+ "rewards/rejected": -0.8710396885871887,
325
+ "step": 21
326
+ },
327
+ {
328
+ "epoch": 0.32382704691812325,
329
+ "grad_norm": 8.240747451782227,
330
+ "learning_rate": 9.426229508196721e-07,
331
+ "logits/chosen": -3.098659038543701,
332
+ "logits/rejected": -3.123976230621338,
333
+ "logps/chosen": -0.38691607117652893,
334
+ "logps/rejected": -0.3664763569831848,
335
+ "loss": 1.0038,
336
+ "rewards/accuracies": 0.421875,
337
+ "rewards/chosen": -0.7738322019577026,
338
+ "rewards/margins": -0.040879447013139725,
339
+ "rewards/rejected": -0.7329527139663696,
340
+ "step": 22
341
+ },
342
+ {
343
+ "epoch": 0.33854645814167433,
344
+ "grad_norm": 8.561594009399414,
345
+ "learning_rate": 9.344262295081968e-07,
346
+ "logits/chosen": -2.6878695487976074,
347
+ "logits/rejected": -2.800431489944458,
348
+ "logps/chosen": -0.3922501802444458,
349
+ "logps/rejected": -0.38519686460494995,
350
+ "loss": 0.9865,
351
+ "rewards/accuracies": 0.484375,
352
+ "rewards/chosen": -0.7845003008842468,
353
+ "rewards/margins": -0.01410655677318573,
354
+ "rewards/rejected": -0.7703937888145447,
355
+ "step": 23
356
+ },
357
+ {
358
+ "epoch": 0.3532658693652254,
359
+ "grad_norm": 8.64904499053955,
360
+ "learning_rate": 9.262295081967213e-07,
361
+ "logits/chosen": -3.2959938049316406,
362
+ "logits/rejected": -3.0946803092956543,
363
+ "logps/chosen": -0.3919060230255127,
364
+ "logps/rejected": -0.39727240800857544,
365
+ "loss": 0.9727,
366
+ "rewards/accuracies": 0.53125,
367
+ "rewards/chosen": -0.7838120460510254,
368
+ "rewards/margins": 0.010732805356383324,
369
+ "rewards/rejected": -0.7945448160171509,
370
+ "step": 24
371
+ },
372
+ {
373
+ "epoch": 0.36798528058877644,
374
+ "grad_norm": 11.180377006530762,
375
+ "learning_rate": 9.180327868852458e-07,
376
+ "logits/chosen": -2.71470046043396,
377
+ "logits/rejected": -2.9183754920959473,
378
+ "logps/chosen": -0.35864555835723877,
379
+ "logps/rejected": -0.33973294496536255,
380
+ "loss": 1.0008,
381
+ "rewards/accuracies": 0.4375,
382
+ "rewards/chosen": -0.7172911167144775,
383
+ "rewards/margins": -0.03782520815730095,
384
+ "rewards/rejected": -0.6794658899307251,
385
+ "step": 25
386
+ },
387
+ {
388
+ "epoch": 0.3827046918123275,
389
+ "grad_norm": 7.819973468780518,
390
+ "learning_rate": 9.098360655737705e-07,
391
+ "logits/chosen": -3.1228315830230713,
392
+ "logits/rejected": -2.978816032409668,
393
+ "logps/chosen": -0.3795928955078125,
394
+ "logps/rejected": -0.38733017444610596,
395
+ "loss": 0.9675,
396
+ "rewards/accuracies": 0.421875,
397
+ "rewards/chosen": -0.759185791015625,
398
+ "rewards/margins": 0.015474595129489899,
399
+ "rewards/rejected": -0.7746603488922119,
400
+ "step": 26
401
+ },
402
+ {
403
+ "epoch": 0.39742410303587855,
404
+ "grad_norm": 8.191493034362793,
405
+ "learning_rate": 9.01639344262295e-07,
406
+ "logits/chosen": -2.4528238773345947,
407
+ "logits/rejected": -2.5398683547973633,
408
+ "logps/chosen": -0.40499553084373474,
409
+ "logps/rejected": -0.4100068509578705,
410
+ "loss": 0.9707,
411
+ "rewards/accuracies": 0.515625,
412
+ "rewards/chosen": -0.8099910616874695,
413
+ "rewards/margins": 0.010022655129432678,
414
+ "rewards/rejected": -0.820013701915741,
415
+ "step": 27
416
+ },
417
+ {
418
+ "epoch": 0.41214351425942963,
419
+ "grad_norm": 7.093197345733643,
420
+ "learning_rate": 8.934426229508196e-07,
421
+ "logits/chosen": -2.4629902839660645,
422
+ "logits/rejected": -2.5782675743103027,
423
+ "logps/chosen": -0.3413301706314087,
424
+ "logps/rejected": -0.3495180010795593,
425
+ "loss": 0.9673,
426
+ "rewards/accuracies": 0.5625,
427
+ "rewards/chosen": -0.6826603412628174,
428
+ "rewards/margins": 0.016375690698623657,
429
+ "rewards/rejected": -0.6990360617637634,
430
+ "step": 28
431
+ },
432
+ {
433
+ "epoch": 0.42686292548298066,
434
+ "grad_norm": 7.324411869049072,
435
+ "learning_rate": 8.852459016393443e-07,
436
+ "logits/chosen": -2.8721837997436523,
437
+ "logits/rejected": -2.7182796001434326,
438
+ "logps/chosen": -0.3609670400619507,
439
+ "logps/rejected": -0.3633623719215393,
440
+ "loss": 0.9737,
441
+ "rewards/accuracies": 0.53125,
442
+ "rewards/chosen": -0.7219340801239014,
443
+ "rewards/margins": 0.004790660459548235,
444
+ "rewards/rejected": -0.7267247438430786,
445
+ "step": 29
446
+ },
447
+ {
448
+ "epoch": 0.44158233670653174,
449
+ "grad_norm": 6.33624267578125,
450
+ "learning_rate": 8.770491803278688e-07,
451
+ "logits/chosen": -2.2950024604797363,
452
+ "logits/rejected": -2.551114082336426,
453
+ "logps/chosen": -0.33408358693122864,
454
+ "logps/rejected": -0.3281135559082031,
455
+ "loss": 0.9833,
456
+ "rewards/accuracies": 0.515625,
457
+ "rewards/chosen": -0.6681671738624573,
458
+ "rewards/margins": -0.011940006166696548,
459
+ "rewards/rejected": -0.6562271118164062,
460
+ "step": 30
461
+ },
462
+ {
463
+ "epoch": 0.4563017479300828,
464
+ "grad_norm": 7.2210211753845215,
465
+ "learning_rate": 8.688524590163933e-07,
466
+ "logits/chosen": -2.146829605102539,
467
+ "logits/rejected": -2.417693614959717,
468
+ "logps/chosen": -0.36344075202941895,
469
+ "logps/rejected": -0.35178840160369873,
470
+ "loss": 0.9906,
471
+ "rewards/accuracies": 0.46875,
472
+ "rewards/chosen": -0.7268815040588379,
473
+ "rewards/margins": -0.023304661735892296,
474
+ "rewards/rejected": -0.7035768032073975,
475
+ "step": 31
476
+ },
477
+ {
478
+ "epoch": 0.47102115915363385,
479
+ "grad_norm": 6.0392303466796875,
480
+ "learning_rate": 8.60655737704918e-07,
481
+ "logits/chosen": -2.360600471496582,
482
+ "logits/rejected": -2.4811832904815674,
483
+ "logps/chosen": -0.3448009788990021,
484
+ "logps/rejected": -0.3456505537033081,
485
+ "loss": 0.9748,
486
+ "rewards/accuracies": 0.453125,
487
+ "rewards/chosen": -0.6896018981933594,
488
+ "rewards/margins": 0.0016991421580314636,
489
+ "rewards/rejected": -0.6913011074066162,
490
+ "step": 32
491
+ },
492
+ {
493
+ "epoch": 0.48574057037718493,
494
+ "grad_norm": 6.579920291900635,
495
+ "learning_rate": 8.524590163934425e-07,
496
+ "logits/chosen": -2.5944058895111084,
497
+ "logits/rejected": -2.590837240219116,
498
+ "logps/chosen": -0.3355923593044281,
499
+ "logps/rejected": -0.3367539346218109,
500
+ "loss": 0.9736,
501
+ "rewards/accuracies": 0.53125,
502
+ "rewards/chosen": -0.6711846590042114,
503
+ "rewards/margins": 0.00232316879555583,
504
+ "rewards/rejected": -0.6735078692436218,
505
+ "step": 33
506
+ },
507
+ {
508
+ "epoch": 0.500459981600736,
509
+ "grad_norm": 5.514021873474121,
510
+ "learning_rate": 8.442622950819672e-07,
511
+ "logits/chosen": -2.2286832332611084,
512
+ "logits/rejected": -2.334184169769287,
513
+ "logps/chosen": -0.33515220880508423,
514
+ "logps/rejected": -0.3512744903564453,
515
+ "loss": 0.9562,
516
+ "rewards/accuracies": 0.5625,
517
+ "rewards/chosen": -0.6703044176101685,
518
+ "rewards/margins": 0.032244522124528885,
519
+ "rewards/rejected": -0.7025489807128906,
520
+ "step": 34
521
+ },
522
+ {
523
+ "epoch": 0.515179392824287,
524
+ "grad_norm": 5.068162441253662,
525
+ "learning_rate": 8.360655737704919e-07,
526
+ "logits/chosen": -1.8708163499832153,
527
+ "logits/rejected": -2.1949517726898193,
528
+ "logps/chosen": -0.319760799407959,
529
+ "logps/rejected": -0.32897067070007324,
530
+ "loss": 0.965,
531
+ "rewards/accuracies": 0.625,
532
+ "rewards/chosen": -0.639521598815918,
533
+ "rewards/margins": 0.018419764935970306,
534
+ "rewards/rejected": -0.6579413414001465,
535
+ "step": 35
536
+ },
537
+ {
538
+ "epoch": 0.5298988040478381,
539
+ "grad_norm": 6.008891582489014,
540
+ "learning_rate": 8.278688524590164e-07,
541
+ "logits/chosen": -2.1621382236480713,
542
+ "logits/rejected": -2.482567310333252,
543
+ "logps/chosen": -0.32818323373794556,
544
+ "logps/rejected": -0.3238987326622009,
545
+ "loss": 0.9815,
546
+ "rewards/accuracies": 0.453125,
547
+ "rewards/chosen": -0.6563664674758911,
548
+ "rewards/margins": -0.008569043129682541,
549
+ "rewards/rejected": -0.6477974653244019,
550
+ "step": 36
551
+ },
552
+ {
553
+ "epoch": 0.5446182152713891,
554
+ "grad_norm": 5.96389627456665,
555
+ "learning_rate": 8.196721311475409e-07,
556
+ "logits/chosen": -1.713577389717102,
557
+ "logits/rejected": -2.0453085899353027,
558
+ "logps/chosen": -0.35343289375305176,
559
+ "logps/rejected": -0.3477077782154083,
560
+ "loss": 0.9836,
561
+ "rewards/accuracies": 0.453125,
562
+ "rewards/chosen": -0.7068657875061035,
563
+ "rewards/margins": -0.0114502664655447,
564
+ "rewards/rejected": -0.6954155564308167,
565
+ "step": 37
566
+ },
567
+ {
568
+ "epoch": 0.5593376264949402,
569
+ "grad_norm": 5.4984941482543945,
570
+ "learning_rate": 8.114754098360656e-07,
571
+ "logits/chosen": -2.169618606567383,
572
+ "logits/rejected": -2.206155776977539,
573
+ "logps/chosen": -0.3445884585380554,
574
+ "logps/rejected": -0.3500184118747711,
575
+ "loss": 0.9685,
576
+ "rewards/accuracies": 0.515625,
577
+ "rewards/chosen": -0.6891769170761108,
578
+ "rewards/margins": 0.010859915986657143,
579
+ "rewards/rejected": -0.7000367641448975,
580
+ "step": 38
581
+ },
582
+ {
583
+ "epoch": 0.5740570377184913,
584
+ "grad_norm": 5.409811973571777,
585
+ "learning_rate": 8.032786885245901e-07,
586
+ "logits/chosen": -2.129136562347412,
587
+ "logits/rejected": -2.266310214996338,
588
+ "logps/chosen": -0.32456666231155396,
589
+ "logps/rejected": -0.3323541283607483,
590
+ "loss": 0.9656,
591
+ "rewards/accuracies": 0.53125,
592
+ "rewards/chosen": -0.6491333246231079,
593
+ "rewards/margins": 0.015574917197227478,
594
+ "rewards/rejected": -0.6647082567214966,
595
+ "step": 39
596
+ },
597
+ {
598
+ "epoch": 0.5887764489420423,
599
+ "grad_norm": 6.400082588195801,
600
+ "learning_rate": 7.950819672131147e-07,
601
+ "logits/chosen": -2.295382261276245,
602
+ "logits/rejected": -2.189863443374634,
603
+ "logps/chosen": -0.3625165820121765,
604
+ "logps/rejected": -0.35861217975616455,
605
+ "loss": 0.9814,
606
+ "rewards/accuracies": 0.46875,
607
+ "rewards/chosen": -0.725033164024353,
608
+ "rewards/margins": -0.007808740250766277,
609
+ "rewards/rejected": -0.7172243595123291,
610
+ "step": 40
611
+ },
612
+ {
613
+ "epoch": 0.6034958601655934,
614
+ "grad_norm": 6.307469367980957,
615
+ "learning_rate": 7.868852459016393e-07,
616
+ "logits/chosen": -1.7075982093811035,
617
+ "logits/rejected": -1.929357886314392,
618
+ "logps/chosen": -0.3901516795158386,
619
+ "logps/rejected": -0.36571019887924194,
620
+ "loss": 1.008,
621
+ "rewards/accuracies": 0.40625,
622
+ "rewards/chosen": -0.7803032994270325,
623
+ "rewards/margins": -0.04888291284441948,
624
+ "rewards/rejected": -0.7314203977584839,
625
+ "step": 41
626
+ },
627
+ {
628
+ "epoch": 0.6182152713891444,
629
+ "grad_norm": 6.286430835723877,
630
+ "learning_rate": 7.786885245901639e-07,
631
+ "logits/chosen": -1.617011547088623,
632
+ "logits/rejected": -1.9047297239303589,
633
+ "logps/chosen": -0.3546293377876282,
634
+ "logps/rejected": -0.34627604484558105,
635
+ "loss": 0.9866,
636
+ "rewards/accuracies": 0.484375,
637
+ "rewards/chosen": -0.7092586755752563,
638
+ "rewards/margins": -0.01670663431286812,
639
+ "rewards/rejected": -0.6925520896911621,
640
+ "step": 42
641
+ },
642
+ {
643
+ "epoch": 0.6329346826126955,
644
+ "grad_norm": 6.484315872192383,
645
+ "learning_rate": 7.704918032786884e-07,
646
+ "logits/chosen": -1.8632745742797852,
647
+ "logits/rejected": -2.245807409286499,
648
+ "logps/chosen": -0.36142784357070923,
649
+ "logps/rejected": -0.3599178194999695,
650
+ "loss": 0.9796,
651
+ "rewards/accuracies": 0.484375,
652
+ "rewards/chosen": -0.7228556871414185,
653
+ "rewards/margins": -0.003020005766302347,
654
+ "rewards/rejected": -0.719835638999939,
655
+ "step": 43
656
+ },
657
+ {
658
+ "epoch": 0.6476540938362465,
659
+ "grad_norm": 7.016959190368652,
660
+ "learning_rate": 7.62295081967213e-07,
661
+ "logits/chosen": -1.63113534450531,
662
+ "logits/rejected": -2.070380926132202,
663
+ "logps/chosen": -0.35701748728752136,
664
+ "logps/rejected": -0.337637335062027,
665
+ "loss": 1.001,
666
+ "rewards/accuracies": 0.4375,
667
+ "rewards/chosen": -0.7140350341796875,
668
+ "rewards/margins": -0.03876036778092384,
669
+ "rewards/rejected": -0.6752746105194092,
670
+ "step": 44
671
+ },
672
+ {
673
+ "epoch": 0.6623735050597976,
674
+ "grad_norm": 6.19422721862793,
675
+ "learning_rate": 7.540983606557376e-07,
676
+ "logits/chosen": -2.178931713104248,
677
+ "logits/rejected": -2.422787666320801,
678
+ "logps/chosen": -0.3165867328643799,
679
+ "logps/rejected": -0.32036393880844116,
680
+ "loss": 0.9713,
681
+ "rewards/accuracies": 0.5,
682
+ "rewards/chosen": -0.6331734657287598,
683
+ "rewards/margins": 0.007554395589977503,
684
+ "rewards/rejected": -0.6407278776168823,
685
+ "step": 45
686
+ },
687
+ {
688
+ "epoch": 0.6770929162833487,
689
+ "grad_norm": 5.218748092651367,
690
+ "learning_rate": 7.459016393442623e-07,
691
+ "logits/chosen": -2.395150661468506,
692
+ "logits/rejected": -2.530268907546997,
693
+ "logps/chosen": -0.32682573795318604,
694
+ "logps/rejected": -0.32383865118026733,
695
+ "loss": 0.9792,
696
+ "rewards/accuracies": 0.546875,
697
+ "rewards/chosen": -0.6536514163017273,
698
+ "rewards/margins": -0.005974169354885817,
699
+ "rewards/rejected": -0.6476773023605347,
700
+ "step": 46
701
+ },
702
+ {
703
+ "epoch": 0.6918123275068997,
704
+ "grad_norm": 4.995746612548828,
705
+ "learning_rate": 7.377049180327869e-07,
706
+ "logits/chosen": -2.11518931388855,
707
+ "logits/rejected": -2.542222738265991,
708
+ "logps/chosen": -0.31639549136161804,
709
+ "logps/rejected": -0.32215774059295654,
710
+ "loss": 0.9677,
711
+ "rewards/accuracies": 0.59375,
712
+ "rewards/chosen": -0.6327909827232361,
713
+ "rewards/margins": 0.011524486355483532,
714
+ "rewards/rejected": -0.6443154811859131,
715
+ "step": 47
716
+ },
717
+ {
718
+ "epoch": 0.7065317387304508,
719
+ "grad_norm": 8.630620956420898,
720
+ "learning_rate": 7.295081967213115e-07,
721
+ "logits/chosen": -2.2222516536712646,
722
+ "logits/rejected": -2.3696556091308594,
723
+ "logps/chosen": -0.33347755670547485,
724
+ "logps/rejected": -0.35106533765792847,
725
+ "loss": 0.9541,
726
+ "rewards/accuracies": 0.640625,
727
+ "rewards/chosen": -0.6669551134109497,
728
+ "rewards/margins": 0.03517553582787514,
729
+ "rewards/rejected": -0.7021306753158569,
730
+ "step": 48
731
+ },
732
+ {
733
+ "epoch": 0.7212511499540019,
734
+ "grad_norm": 6.239405155181885,
735
+ "learning_rate": 7.21311475409836e-07,
736
+ "logits/chosen": -2.3627071380615234,
737
+ "logits/rejected": -2.582144260406494,
738
+ "logps/chosen": -0.34942522644996643,
739
+ "logps/rejected": -0.3538520336151123,
740
+ "loss": 0.9716,
741
+ "rewards/accuracies": 0.53125,
742
+ "rewards/chosen": -0.6988504528999329,
743
+ "rewards/margins": 0.008853599429130554,
744
+ "rewards/rejected": -0.7077040672302246,
745
+ "step": 49
746
+ },
747
+ {
748
+ "epoch": 0.7359705611775529,
749
+ "grad_norm": 5.054786682128906,
750
+ "learning_rate": 7.131147540983606e-07,
751
+ "logits/chosen": -2.7542896270751953,
752
+ "logits/rejected": -2.8403894901275635,
753
+ "logps/chosen": -0.31503599882125854,
754
+ "logps/rejected": -0.3082734942436218,
755
+ "loss": 0.9855,
756
+ "rewards/accuracies": 0.484375,
757
+ "rewards/chosen": -0.6300719976425171,
758
+ "rewards/margins": -0.01352505013346672,
759
+ "rewards/rejected": -0.6165469884872437,
760
+ "step": 50
761
+ },
762
+ {
763
+ "epoch": 0.7506899724011039,
764
+ "grad_norm": 5.122381687164307,
765
+ "learning_rate": 7.049180327868852e-07,
766
+ "logits/chosen": -2.8105831146240234,
767
+ "logits/rejected": -2.856302499771118,
768
+ "logps/chosen": -0.32194629311561584,
769
+ "logps/rejected": -0.3357662558555603,
770
+ "loss": 0.9597,
771
+ "rewards/accuracies": 0.5625,
772
+ "rewards/chosen": -0.6438925862312317,
773
+ "rewards/margins": 0.027639910578727722,
774
+ "rewards/rejected": -0.6715325117111206,
775
+ "step": 51
776
+ },
777
+ {
778
+ "epoch": 0.765409383624655,
779
+ "grad_norm": 5.99705696105957,
780
+ "learning_rate": 6.967213114754098e-07,
781
+ "logits/chosen": -3.04000186920166,
782
+ "logits/rejected": -2.8034870624542236,
783
+ "logps/chosen": -0.33719968795776367,
784
+ "logps/rejected": -0.33283817768096924,
785
+ "loss": 0.9818,
786
+ "rewards/accuracies": 0.59375,
787
+ "rewards/chosen": -0.6743993759155273,
788
+ "rewards/margins": -0.008722986094653606,
789
+ "rewards/rejected": -0.6656763553619385,
790
+ "step": 52
791
+ },
792
+ {
793
+ "epoch": 0.7801287948482061,
794
+ "grad_norm": 4.380398273468018,
795
+ "learning_rate": 6.885245901639343e-07,
796
+ "logits/chosen": -3.1191301345825195,
797
+ "logits/rejected": -3.2772610187530518,
798
+ "logps/chosen": -0.2899811267852783,
799
+ "logps/rejected": -0.29639169573783875,
800
+ "loss": 0.9674,
801
+ "rewards/accuracies": 0.53125,
802
+ "rewards/chosen": -0.5799622535705566,
803
+ "rewards/margins": 0.012821085751056671,
804
+ "rewards/rejected": -0.5927833914756775,
805
+ "step": 53
806
+ },
807
+ {
808
+ "epoch": 0.7948482060717571,
809
+ "grad_norm": 6.095130920410156,
810
+ "learning_rate": 6.80327868852459e-07,
811
+ "logits/chosen": -2.5826096534729004,
812
+ "logits/rejected": -2.715819835662842,
813
+ "logps/chosen": -0.3463515341281891,
814
+ "logps/rejected": -0.35433435440063477,
815
+ "loss": 0.9664,
816
+ "rewards/accuracies": 0.640625,
817
+ "rewards/chosen": -0.692703127861023,
818
+ "rewards/margins": 0.01596560701727867,
819
+ "rewards/rejected": -0.7086687088012695,
820
+ "step": 54
821
+ },
822
+ {
823
+ "epoch": 0.8095676172953082,
824
+ "grad_norm": 5.1041951179504395,
825
+ "learning_rate": 6.721311475409835e-07,
826
+ "logits/chosen": -3.31288480758667,
827
+ "logits/rejected": -3.182924747467041,
828
+ "logps/chosen": -0.31364989280700684,
829
+ "logps/rejected": -0.3094671964645386,
830
+ "loss": 0.9809,
831
+ "rewards/accuracies": 0.484375,
832
+ "rewards/chosen": -0.6272997856140137,
833
+ "rewards/margins": -0.008365361019968987,
834
+ "rewards/rejected": -0.6189343929290771,
835
+ "step": 55
836
+ },
837
+ {
838
+ "epoch": 0.8242870285188593,
839
+ "grad_norm": 5.590980052947998,
840
+ "learning_rate": 6.639344262295081e-07,
841
+ "logits/chosen": -2.5883703231811523,
842
+ "logits/rejected": -2.679048538208008,
843
+ "logps/chosen": -0.3323555290699005,
844
+ "logps/rejected": -0.32933926582336426,
845
+ "loss": 0.9797,
846
+ "rewards/accuracies": 0.484375,
847
+ "rewards/chosen": -0.664711058139801,
848
+ "rewards/margins": -0.006032535340636969,
849
+ "rewards/rejected": -0.6586785316467285,
850
+ "step": 56
851
+ },
852
+ {
853
+ "epoch": 0.8390064397424103,
854
+ "grad_norm": 5.798619747161865,
855
+ "learning_rate": 6.557377049180327e-07,
856
+ "logits/chosen": -2.8983383178710938,
857
+ "logits/rejected": -3.162022113800049,
858
+ "logps/chosen": -0.3113633394241333,
859
+ "logps/rejected": -0.3264388144016266,
860
+ "loss": 0.9571,
861
+ "rewards/accuracies": 0.640625,
862
+ "rewards/chosen": -0.6227266788482666,
863
+ "rewards/margins": 0.030150914564728737,
864
+ "rewards/rejected": -0.6528776288032532,
865
+ "step": 57
866
+ },
867
+ {
868
+ "epoch": 0.8537258509659613,
869
+ "grad_norm": 6.8788862228393555,
870
+ "learning_rate": 6.475409836065574e-07,
871
+ "logits/chosen": -3.176103115081787,
872
+ "logits/rejected": -3.0832886695861816,
873
+ "logps/chosen": -0.3261672556400299,
874
+ "logps/rejected": -0.32974904775619507,
875
+ "loss": 0.9727,
876
+ "rewards/accuracies": 0.59375,
877
+ "rewards/chosen": -0.6523345708847046,
878
+ "rewards/margins": 0.0071635497733950615,
879
+ "rewards/rejected": -0.6594980955123901,
880
+ "step": 58
881
+ },
882
+ {
883
+ "epoch": 0.8684452621895125,
884
+ "grad_norm": 5.570962429046631,
885
+ "learning_rate": 6.393442622950819e-07,
886
+ "logits/chosen": -2.4592795372009277,
887
+ "logits/rejected": -2.800849437713623,
888
+ "logps/chosen": -0.3107580542564392,
889
+ "logps/rejected": -0.34081941843032837,
890
+ "loss": 0.9387,
891
+ "rewards/accuracies": 0.703125,
892
+ "rewards/chosen": -0.6215161085128784,
893
+ "rewards/margins": 0.060122713446617126,
894
+ "rewards/rejected": -0.681638777256012,
895
+ "step": 59
896
+ },
897
+ {
898
+ "epoch": 0.8831646734130635,
899
+ "grad_norm": 6.777514457702637,
900
+ "learning_rate": 6.311475409836066e-07,
901
+ "logits/chosen": -2.9372458457946777,
902
+ "logits/rejected": -3.095715045928955,
903
+ "logps/chosen": -0.330186128616333,
904
+ "logps/rejected": -0.3394075632095337,
905
+ "loss": 0.9652,
906
+ "rewards/accuracies": 0.59375,
907
+ "rewards/chosen": -0.660372257232666,
908
+ "rewards/margins": 0.01844284124672413,
909
+ "rewards/rejected": -0.6788151264190674,
910
+ "step": 60
911
+ },
912
+ {
913
+ "epoch": 0.8978840846366145,
914
+ "grad_norm": 6.811728477478027,
915
+ "learning_rate": 6.229508196721311e-07,
916
+ "logits/chosen": -3.002053737640381,
917
+ "logits/rejected": -3.0411558151245117,
918
+ "logps/chosen": -0.3336254358291626,
919
+ "logps/rejected": -0.34820815920829773,
920
+ "loss": 0.9577,
921
+ "rewards/accuracies": 0.546875,
922
+ "rewards/chosen": -0.6672508716583252,
923
+ "rewards/margins": 0.029165498912334442,
924
+ "rewards/rejected": -0.6964163184165955,
925
+ "step": 61
926
+ },
927
+ {
928
+ "epoch": 0.9126034958601656,
929
+ "grad_norm": 6.526244163513184,
930
+ "learning_rate": 6.147540983606557e-07,
931
+ "logits/chosen": -2.9746174812316895,
932
+ "logits/rejected": -3.024784564971924,
933
+ "logps/chosen": -0.3487308621406555,
934
+ "logps/rejected": -0.344043493270874,
935
+ "loss": 0.9812,
936
+ "rewards/accuracies": 0.5625,
937
+ "rewards/chosen": -0.697461724281311,
938
+ "rewards/margins": -0.009374706074595451,
939
+ "rewards/rejected": -0.688086986541748,
940
+ "step": 62
941
+ },
942
+ {
943
+ "epoch": 0.9273229070837167,
944
+ "grad_norm": 6.0199785232543945,
945
+ "learning_rate": 6.065573770491803e-07,
946
+ "logits/chosen": -3.471893548965454,
947
+ "logits/rejected": -3.154780864715576,
948
+ "logps/chosen": -0.3194906711578369,
949
+ "logps/rejected": -0.31385791301727295,
950
+ "loss": 0.9831,
951
+ "rewards/accuracies": 0.484375,
952
+ "rewards/chosen": -0.6389813423156738,
953
+ "rewards/margins": -0.011265482753515244,
954
+ "rewards/rejected": -0.6277158856391907,
955
+ "step": 63
956
+ },
957
+ {
958
+ "epoch": 0.9420423183072677,
959
+ "grad_norm": 5.7925262451171875,
960
+ "learning_rate": 5.983606557377049e-07,
961
+ "logits/chosen": -2.928650379180908,
962
+ "logits/rejected": -3.0489816665649414,
963
+ "logps/chosen": -0.3156253695487976,
964
+ "logps/rejected": -0.33363330364227295,
965
+ "loss": 0.9533,
966
+ "rewards/accuracies": 0.609375,
967
+ "rewards/chosen": -0.6312507390975952,
968
+ "rewards/margins": 0.0360158234834671,
969
+ "rewards/rejected": -0.6672665476799011,
970
+ "step": 64
971
+ },
972
+ {
973
+ "epoch": 0.9567617295308187,
974
+ "grad_norm": 5.304396152496338,
975
+ "learning_rate": 5.901639344262294e-07,
976
+ "logits/chosen": -2.7677812576293945,
977
+ "logits/rejected": -2.918884754180908,
978
+ "logps/chosen": -0.3248683512210846,
979
+ "logps/rejected": -0.3287356197834015,
980
+ "loss": 0.9704,
981
+ "rewards/accuracies": 0.546875,
982
+ "rewards/chosen": -0.6497367024421692,
983
+ "rewards/margins": 0.0077345771715044975,
984
+ "rewards/rejected": -0.6574712991714478,
985
+ "step": 65
986
+ },
987
+ {
988
+ "epoch": 0.9714811407543699,
989
+ "grad_norm": 6.466002464294434,
990
+ "learning_rate": 5.819672131147541e-07,
991
+ "logits/chosen": -2.5215635299682617,
992
+ "logits/rejected": -3.0989413261413574,
993
+ "logps/chosen": -0.34512242674827576,
994
+ "logps/rejected": -0.34232115745544434,
995
+ "loss": 0.9792,
996
+ "rewards/accuracies": 0.546875,
997
+ "rewards/chosen": -0.6902447938919067,
998
+ "rewards/margins": -0.00560246966779232,
999
+ "rewards/rejected": -0.6846423745155334,
1000
+ "step": 66
1001
+ },
1002
+ {
1003
+ "epoch": 0.9862005519779209,
1004
+ "grad_norm": 5.751663684844971,
1005
+ "learning_rate": 5.737704918032786e-07,
1006
+ "logits/chosen": -2.967613697052002,
1007
+ "logits/rejected": -3.0896427631378174,
1008
+ "logps/chosen": -0.32376572489738464,
1009
+ "logps/rejected": -0.3302830159664154,
1010
+ "loss": 0.9682,
1011
+ "rewards/accuracies": 0.5625,
1012
+ "rewards/chosen": -0.6475313901901245,
1013
+ "rewards/margins": 0.013034624047577381,
1014
+ "rewards/rejected": -0.6605660319328308,
1015
+ "step": 67
1016
+ },
1017
+ {
1018
+ "epoch": 1.0,
1019
+ "grad_norm": 6.688215732574463,
1020
+ "learning_rate": 5.655737704918032e-07,
1021
+ "logits/chosen": -2.593557119369507,
1022
+ "logits/rejected": -2.572288990020752,
1023
+ "logps/chosen": -0.35654735565185547,
1024
+ "logps/rejected": -0.36155587434768677,
1025
+ "loss": 0.9114,
1026
+ "rewards/accuracies": 0.5500000715255737,
1027
+ "rewards/chosen": -0.7130947113037109,
1028
+ "rewards/margins": 0.010017056949436665,
1029
+ "rewards/rejected": -0.7231118083000183,
1030
+ "step": 68
1031
+ },
1032
+ {
1033
+ "epoch": 1.0147194112235511,
1034
+ "grad_norm": 5.89481782913208,
1035
+ "learning_rate": 5.573770491803278e-07,
1036
+ "logits/chosen": -2.667405605316162,
1037
+ "logits/rejected": -2.9011430740356445,
1038
+ "logps/chosen": -0.3504785895347595,
1039
+ "logps/rejected": -0.3745538592338562,
1040
+ "loss": 0.9459,
1041
+ "rewards/accuracies": 0.671875,
1042
+ "rewards/chosen": -0.700957179069519,
1043
+ "rewards/margins": 0.048150498420000076,
1044
+ "rewards/rejected": -0.7491077184677124,
1045
+ "step": 69
1046
+ },
1047
+ {
1048
+ "epoch": 1.029438822447102,
1049
+ "grad_norm": 5.436809062957764,
1050
+ "learning_rate": 5.491803278688525e-07,
1051
+ "logits/chosen": -3.7802786827087402,
1052
+ "logits/rejected": -3.2489778995513916,
1053
+ "logps/chosen": -0.3161553144454956,
1054
+ "logps/rejected": -0.3399825096130371,
1055
+ "loss": 0.9474,
1056
+ "rewards/accuracies": 0.671875,
1057
+ "rewards/chosen": -0.6323106288909912,
1058
+ "rewards/margins": 0.04765439033508301,
1059
+ "rewards/rejected": -0.6799650192260742,
1060
+ "step": 70
1061
+ },
1062
+ {
1063
+ "epoch": 1.0441582336706532,
1064
+ "grad_norm": 5.984342575073242,
1065
+ "learning_rate": 5.40983606557377e-07,
1066
+ "logits/chosen": -3.4014768600463867,
1067
+ "logits/rejected": -3.056406021118164,
1068
+ "logps/chosen": -0.3482527434825897,
1069
+ "logps/rejected": -0.3865743577480316,
1070
+ "loss": 0.9301,
1071
+ "rewards/accuracies": 0.640625,
1072
+ "rewards/chosen": -0.6965054273605347,
1073
+ "rewards/margins": 0.07664325833320618,
1074
+ "rewards/rejected": -0.7731487154960632,
1075
+ "step": 71
1076
+ },
1077
+ {
1078
+ "epoch": 1.0588776448942043,
1079
+ "grad_norm": 4.9069623947143555,
1080
+ "learning_rate": 5.327868852459017e-07,
1081
+ "logits/chosen": -3.8456344604492188,
1082
+ "logits/rejected": -3.4291088581085205,
1083
+ "logps/chosen": -0.3101879954338074,
1084
+ "logps/rejected": -0.3360385298728943,
1085
+ "loss": 0.9439,
1086
+ "rewards/accuracies": 0.671875,
1087
+ "rewards/chosen": -0.6203759908676147,
1088
+ "rewards/margins": 0.05170107260346413,
1089
+ "rewards/rejected": -0.6720770597457886,
1090
+ "step": 72
1091
+ },
1092
+ {
1093
+ "epoch": 1.0735970561177552,
1094
+ "grad_norm": 5.373537540435791,
1095
+ "learning_rate": 5.245901639344262e-07,
1096
+ "logits/chosen": -3.189006805419922,
1097
+ "logits/rejected": -3.131258249282837,
1098
+ "logps/chosen": -0.3355609178543091,
1099
+ "logps/rejected": -0.3480716943740845,
1100
+ "loss": 0.9606,
1101
+ "rewards/accuracies": 0.609375,
1102
+ "rewards/chosen": -0.6711218357086182,
1103
+ "rewards/margins": 0.025021512061357498,
1104
+ "rewards/rejected": -0.6961433291435242,
1105
+ "step": 73
1106
+ },
1107
+ {
1108
+ "epoch": 1.0883164673413064,
1109
+ "grad_norm": 5.561800479888916,
1110
+ "learning_rate": 5.163934426229508e-07,
1111
+ "logits/chosen": -2.8683199882507324,
1112
+ "logits/rejected": -2.8707809448242188,
1113
+ "logps/chosen": -0.31854379177093506,
1114
+ "logps/rejected": -0.36209046840667725,
1115
+ "loss": 0.9235,
1116
+ "rewards/accuracies": 0.71875,
1117
+ "rewards/chosen": -0.6370875835418701,
1118
+ "rewards/margins": 0.0870932936668396,
1119
+ "rewards/rejected": -0.7241808772087097,
1120
+ "step": 74
1121
+ },
1122
+ {
1123
+ "epoch": 1.1030358785648575,
1124
+ "grad_norm": 4.990414142608643,
1125
+ "learning_rate": 5.081967213114754e-07,
1126
+ "logits/chosen": -3.194347858428955,
1127
+ "logits/rejected": -3.092930793762207,
1128
+ "logps/chosen": -0.32476598024368286,
1129
+ "logps/rejected": -0.35717323422431946,
1130
+ "loss": 0.9365,
1131
+ "rewards/accuracies": 0.71875,
1132
+ "rewards/chosen": -0.6495320200920105,
1133
+ "rewards/margins": 0.0648144781589508,
1134
+ "rewards/rejected": -0.7143464088439941,
1135
+ "step": 75
1136
+ },
1137
+ {
1138
+ "epoch": 1.1177552897884084,
1139
+ "grad_norm": 4.96919584274292,
1140
+ "learning_rate": 5e-07,
1141
+ "logits/chosen": -3.0481834411621094,
1142
+ "logits/rejected": -3.0204482078552246,
1143
+ "logps/chosen": -0.31437885761260986,
1144
+ "logps/rejected": -0.34356942772865295,
1145
+ "loss": 0.9397,
1146
+ "rewards/accuracies": 0.671875,
1147
+ "rewards/chosen": -0.6287577152252197,
1148
+ "rewards/margins": 0.05838116630911827,
1149
+ "rewards/rejected": -0.6871389150619507,
1150
+ "step": 76
1151
+ },
1152
+ {
1153
+ "epoch": 1.1324747010119596,
1154
+ "grad_norm": 5.780066013336182,
1155
+ "learning_rate": 4.918032786885245e-07,
1156
+ "logits/chosen": -3.1936581134796143,
1157
+ "logits/rejected": -3.0110840797424316,
1158
+ "logps/chosen": -0.3270755112171173,
1159
+ "logps/rejected": -0.36666250228881836,
1160
+ "loss": 0.9284,
1161
+ "rewards/accuracies": 0.625,
1162
+ "rewards/chosen": -0.6541510224342346,
1163
+ "rewards/margins": 0.0791739821434021,
1164
+ "rewards/rejected": -0.7333249449729919,
1165
+ "step": 77
1166
+ },
1167
+ {
1168
+ "epoch": 1.1471941122355105,
1169
+ "grad_norm": 5.684655666351318,
1170
+ "learning_rate": 4.836065573770492e-07,
1171
+ "logits/chosen": -2.4972476959228516,
1172
+ "logits/rejected": -2.833545207977295,
1173
+ "logps/chosen": -0.3543640375137329,
1174
+ "logps/rejected": -0.38385263085365295,
1175
+ "loss": 0.9395,
1176
+ "rewards/accuracies": 0.671875,
1177
+ "rewards/chosen": -0.708728015422821,
1178
+ "rewards/margins": 0.05897724628448486,
1179
+ "rewards/rejected": -0.7677052617073059,
1180
+ "step": 78
1181
+ },
1182
+ {
1183
+ "epoch": 1.1619135234590616,
1184
+ "grad_norm": 4.93048095703125,
1185
+ "learning_rate": 4.754098360655737e-07,
1186
+ "logits/chosen": -3.211378812789917,
1187
+ "logits/rejected": -3.1379148960113525,
1188
+ "logps/chosen": -0.31901848316192627,
1189
+ "logps/rejected": -0.3422248363494873,
1190
+ "loss": 0.9467,
1191
+ "rewards/accuracies": 0.734375,
1192
+ "rewards/chosen": -0.6380370259284973,
1193
+ "rewards/margins": 0.046412669122219086,
1194
+ "rewards/rejected": -0.6844496726989746,
1195
+ "step": 79
1196
+ },
1197
+ {
1198
+ "epoch": 1.1766329346826128,
1199
+ "grad_norm": 5.538037300109863,
1200
+ "learning_rate": 4.672131147540984e-07,
1201
+ "logits/chosen": -2.736569404602051,
1202
+ "logits/rejected": -2.8682150840759277,
1203
+ "logps/chosen": -0.34923046827316284,
1204
+ "logps/rejected": -0.3685604929924011,
1205
+ "loss": 0.9517,
1206
+ "rewards/accuracies": 0.640625,
1207
+ "rewards/chosen": -0.6984609365463257,
1208
+ "rewards/margins": 0.038660045713186264,
1209
+ "rewards/rejected": -0.7371209859848022,
1210
+ "step": 80
1211
+ },
1212
+ {
1213
+ "epoch": 1.1913523459061637,
1214
+ "grad_norm": 5.049704074859619,
1215
+ "learning_rate": 4.590163934426229e-07,
1216
+ "logits/chosen": -2.698671340942383,
1217
+ "logits/rejected": -3.129981517791748,
1218
+ "logps/chosen": -0.3192306160926819,
1219
+ "logps/rejected": -0.34711503982543945,
1220
+ "loss": 0.9425,
1221
+ "rewards/accuracies": 0.65625,
1222
+ "rewards/chosen": -0.6384612321853638,
1223
+ "rewards/margins": 0.05576884746551514,
1224
+ "rewards/rejected": -0.6942300796508789,
1225
+ "step": 81
1226
+ },
1227
+ {
1228
+ "epoch": 1.2060717571297148,
1229
+ "grad_norm": 6.073709487915039,
1230
+ "learning_rate": 4.508196721311475e-07,
1231
+ "logits/chosen": -3.247349739074707,
1232
+ "logits/rejected": -3.0602288246154785,
1233
+ "logps/chosen": -0.34253838658332825,
1234
+ "logps/rejected": -0.3857520818710327,
1235
+ "loss": 0.9242,
1236
+ "rewards/accuracies": 0.703125,
1237
+ "rewards/chosen": -0.6850768327713013,
1238
+ "rewards/margins": 0.08642742037773132,
1239
+ "rewards/rejected": -0.7715041637420654,
1240
+ "step": 82
1241
+ },
1242
+ {
1243
+ "epoch": 1.220791168353266,
1244
+ "grad_norm": 5.996784210205078,
1245
+ "learning_rate": 4.426229508196721e-07,
1246
+ "logits/chosen": -2.3361268043518066,
1247
+ "logits/rejected": -2.6752443313598633,
1248
+ "logps/chosen": -0.34017816185951233,
1249
+ "logps/rejected": -0.36880531907081604,
1250
+ "loss": 0.9412,
1251
+ "rewards/accuracies": 0.671875,
1252
+ "rewards/chosen": -0.6803563237190247,
1253
+ "rewards/margins": 0.05725434422492981,
1254
+ "rewards/rejected": -0.7376106381416321,
1255
+ "step": 83
1256
+ },
1257
+ {
1258
+ "epoch": 1.2355105795768169,
1259
+ "grad_norm": 5.233430862426758,
1260
+ "learning_rate": 4.3442622950819667e-07,
1261
+ "logits/chosen": -3.2010231018066406,
1262
+ "logits/rejected": -3.1768579483032227,
1263
+ "logps/chosen": -0.33099696040153503,
1264
+ "logps/rejected": -0.3604615032672882,
1265
+ "loss": 0.9396,
1266
+ "rewards/accuracies": 0.703125,
1267
+ "rewards/chosen": -0.6619938611984253,
1268
+ "rewards/margins": 0.058929093182086945,
1269
+ "rewards/rejected": -0.7209229469299316,
1270
+ "step": 84
1271
+ },
1272
+ {
1273
+ "epoch": 1.250229990800368,
1274
+ "grad_norm": 5.6451311111450195,
1275
+ "learning_rate": 4.2622950819672127e-07,
1276
+ "logits/chosen": -2.11002254486084,
1277
+ "logits/rejected": -2.832841634750366,
1278
+ "logps/chosen": -0.31466031074523926,
1279
+ "logps/rejected": -0.3550466299057007,
1280
+ "loss": 0.9275,
1281
+ "rewards/accuracies": 0.734375,
1282
+ "rewards/chosen": -0.6293206214904785,
1283
+ "rewards/margins": 0.08077271282672882,
1284
+ "rewards/rejected": -0.7100933194160461,
1285
+ "step": 85
1286
+ },
1287
+ {
1288
+ "epoch": 1.2649494020239191,
1289
+ "grad_norm": 6.1917877197265625,
1290
+ "learning_rate": 4.180327868852459e-07,
1291
+ "logits/chosen": -2.6735427379608154,
1292
+ "logits/rejected": -2.9040236473083496,
1293
+ "logps/chosen": -0.3471025228500366,
1294
+ "logps/rejected": -0.3789517283439636,
1295
+ "loss": 0.9368,
1296
+ "rewards/accuracies": 0.671875,
1297
+ "rewards/chosen": -0.6942050457000732,
1298
+ "rewards/margins": 0.063698410987854,
1299
+ "rewards/rejected": -0.7579034566879272,
1300
+ "step": 86
1301
+ },
1302
+ {
1303
+ "epoch": 1.27966881324747,
1304
+ "grad_norm": 5.865265369415283,
1305
+ "learning_rate": 4.0983606557377047e-07,
1306
+ "logits/chosen": -2.7643613815307617,
1307
+ "logits/rejected": -2.8559346199035645,
1308
+ "logps/chosen": -0.33007344603538513,
1309
+ "logps/rejected": -0.36891335248947144,
1310
+ "loss": 0.9295,
1311
+ "rewards/accuracies": 0.6875,
1312
+ "rewards/chosen": -0.6601468920707703,
1313
+ "rewards/margins": 0.07767981290817261,
1314
+ "rewards/rejected": -0.7378267049789429,
1315
+ "step": 87
1316
+ },
1317
+ {
1318
+ "epoch": 1.2943882244710212,
1319
+ "grad_norm": 5.28799295425415,
1320
+ "learning_rate": 4.0163934426229507e-07,
1321
+ "logits/chosen": -3.2653579711914062,
1322
+ "logits/rejected": -3.030381679534912,
1323
+ "logps/chosen": -0.3344501852989197,
1324
+ "logps/rejected": -0.371614933013916,
1325
+ "loss": 0.9317,
1326
+ "rewards/accuracies": 0.75,
1327
+ "rewards/chosen": -0.6689003705978394,
1328
+ "rewards/margins": 0.07432954013347626,
1329
+ "rewards/rejected": -0.743229866027832,
1330
+ "step": 88
1331
+ },
1332
+ {
1333
+ "epoch": 1.3091076356945721,
1334
+ "grad_norm": 5.84768533706665,
1335
+ "learning_rate": 3.9344262295081967e-07,
1336
+ "logits/chosen": -2.64261531829834,
1337
+ "logits/rejected": -2.9684834480285645,
1338
+ "logps/chosen": -0.34116652607917786,
1339
+ "logps/rejected": -0.37209397554397583,
1340
+ "loss": 0.9391,
1341
+ "rewards/accuracies": 0.609375,
1342
+ "rewards/chosen": -0.6823331117630005,
1343
+ "rewards/margins": 0.06185486912727356,
1344
+ "rewards/rejected": -0.7441879510879517,
1345
+ "step": 89
1346
+ },
1347
+ {
1348
+ "epoch": 1.3238270469181233,
1349
+ "grad_norm": 6.20886754989624,
1350
+ "learning_rate": 3.852459016393442e-07,
1351
+ "logits/chosen": -2.8721776008605957,
1352
+ "logits/rejected": -2.935757637023926,
1353
+ "logps/chosen": -0.32490894198417664,
1354
+ "logps/rejected": -0.3719860315322876,
1355
+ "loss": 0.9191,
1356
+ "rewards/accuracies": 0.765625,
1357
+ "rewards/chosen": -0.6498178839683533,
1358
+ "rewards/margins": 0.0941542536020279,
1359
+ "rewards/rejected": -0.74397212266922,
1360
+ "step": 90
1361
+ },
1362
+ {
1363
+ "epoch": 1.3385464581416744,
1364
+ "grad_norm": 5.540692329406738,
1365
+ "learning_rate": 3.770491803278688e-07,
1366
+ "logits/chosen": -2.457730293273926,
1367
+ "logits/rejected": -2.8065922260284424,
1368
+ "logps/chosen": -0.363670289516449,
1369
+ "logps/rejected": -0.39088112115859985,
1370
+ "loss": 0.9424,
1371
+ "rewards/accuracies": 0.609375,
1372
+ "rewards/chosen": -0.7273405194282532,
1373
+ "rewards/margins": 0.05442170053720474,
1374
+ "rewards/rejected": -0.7817622423171997,
1375
+ "step": 91
1376
+ },
1377
+ {
1378
+ "epoch": 1.3532658693652255,
1379
+ "grad_norm": 4.544013977050781,
1380
+ "learning_rate": 3.6885245901639347e-07,
1381
+ "logits/chosen": -3.041391134262085,
1382
+ "logits/rejected": -3.200744390487671,
1383
+ "logps/chosen": -0.30500367283821106,
1384
+ "logps/rejected": -0.3335219621658325,
1385
+ "loss": 0.941,
1386
+ "rewards/accuracies": 0.640625,
1387
+ "rewards/chosen": -0.6100073456764221,
1388
+ "rewards/margins": 0.05703658610582352,
1389
+ "rewards/rejected": -0.667043924331665,
1390
+ "step": 92
1391
+ },
1392
+ {
1393
+ "epoch": 1.3679852805887764,
1394
+ "grad_norm": 5.325432300567627,
1395
+ "learning_rate": 3.60655737704918e-07,
1396
+ "logits/chosen": -3.339791774749756,
1397
+ "logits/rejected": -3.3530054092407227,
1398
+ "logps/chosen": -0.32377439737319946,
1399
+ "logps/rejected": -0.347787469625473,
1400
+ "loss": 0.9469,
1401
+ "rewards/accuracies": 0.625,
1402
+ "rewards/chosen": -0.6475488543510437,
1403
+ "rewards/margins": 0.048026055097579956,
1404
+ "rewards/rejected": -0.6955748796463013,
1405
+ "step": 93
1406
+ },
1407
+ {
1408
+ "epoch": 1.3827046918123276,
1409
+ "grad_norm": 4.834715843200684,
1410
+ "learning_rate": 3.524590163934426e-07,
1411
+ "logits/chosen": -2.9240317344665527,
1412
+ "logits/rejected": -3.0863239765167236,
1413
+ "logps/chosen": -0.30060070753097534,
1414
+ "logps/rejected": -0.33696314692497253,
1415
+ "loss": 0.9317,
1416
+ "rewards/accuracies": 0.6875,
1417
+ "rewards/chosen": -0.6012013554573059,
1418
+ "rewards/margins": 0.07272489368915558,
1419
+ "rewards/rejected": -0.6739262938499451,
1420
+ "step": 94
1421
+ },
1422
+ {
1423
+ "epoch": 1.3974241030358785,
1424
+ "grad_norm": 6.076785564422607,
1425
+ "learning_rate": 3.4426229508196717e-07,
1426
+ "logits/chosen": -2.790525436401367,
1427
+ "logits/rejected": -2.9921529293060303,
1428
+ "logps/chosen": -0.31479692459106445,
1429
+ "logps/rejected": -0.3640579581260681,
1430
+ "loss": 0.9165,
1431
+ "rewards/accuracies": 0.78125,
1432
+ "rewards/chosen": -0.6295938491821289,
1433
+ "rewards/margins": 0.09852204471826553,
1434
+ "rewards/rejected": -0.7281159162521362,
1435
+ "step": 95
1436
+ },
1437
+ {
1438
+ "epoch": 1.4121435142594296,
1439
+ "grad_norm": 6.245033264160156,
1440
+ "learning_rate": 3.3606557377049177e-07,
1441
+ "logits/chosen": -2.711641788482666,
1442
+ "logits/rejected": -2.811826705932617,
1443
+ "logps/chosen": -0.368450790643692,
1444
+ "logps/rejected": -0.39056143164634705,
1445
+ "loss": 0.9495,
1446
+ "rewards/accuracies": 0.609375,
1447
+ "rewards/chosen": -0.7369015216827393,
1448
+ "rewards/margins": 0.04422131925821304,
1449
+ "rewards/rejected": -0.7811228036880493,
1450
+ "step": 96
1451
+ },
1452
+ {
1453
+ "epoch": 1.4268629254829808,
1454
+ "grad_norm": 7.023751258850098,
1455
+ "learning_rate": 3.2786885245901637e-07,
1456
+ "logits/chosen": -2.939385175704956,
1457
+ "logits/rejected": -2.9760098457336426,
1458
+ "logps/chosen": -0.35348090529441833,
1459
+ "logps/rejected": -0.39679813385009766,
1460
+ "loss": 0.9238,
1461
+ "rewards/accuracies": 0.71875,
1462
+ "rewards/chosen": -0.7069618105888367,
1463
+ "rewards/margins": 0.08663442730903625,
1464
+ "rewards/rejected": -0.7935962677001953,
1465
+ "step": 97
1466
+ },
1467
+ {
1468
+ "epoch": 1.4415823367065317,
1469
+ "grad_norm": 12.996735572814941,
1470
+ "learning_rate": 3.1967213114754097e-07,
1471
+ "logits/chosen": -2.7201128005981445,
1472
+ "logits/rejected": -2.9376893043518066,
1473
+ "logps/chosen": -0.33448997139930725,
1474
+ "logps/rejected": -0.3776443600654602,
1475
+ "loss": 0.9244,
1476
+ "rewards/accuracies": 0.671875,
1477
+ "rewards/chosen": -0.6689799427986145,
1478
+ "rewards/margins": 0.08630875498056412,
1479
+ "rewards/rejected": -0.7552887201309204,
1480
+ "step": 98
1481
+ },
1482
+ {
1483
+ "epoch": 1.4563017479300828,
1484
+ "grad_norm": 5.664161682128906,
1485
+ "learning_rate": 3.1147540983606557e-07,
1486
+ "logits/chosen": -3.10605525970459,
1487
+ "logits/rejected": -3.1280431747436523,
1488
+ "logps/chosen": -0.3306281268596649,
1489
+ "logps/rejected": -0.3685624599456787,
1490
+ "loss": 0.9296,
1491
+ "rewards/accuracies": 0.734375,
1492
+ "rewards/chosen": -0.6612562537193298,
1493
+ "rewards/margins": 0.07586869597434998,
1494
+ "rewards/rejected": -0.7371249198913574,
1495
+ "step": 99
1496
+ },
1497
+ {
1498
+ "epoch": 1.4710211591536337,
1499
+ "grad_norm": 5.440978050231934,
1500
+ "learning_rate": 3.0327868852459017e-07,
1501
+ "logits/chosen": -3.1456902027130127,
1502
+ "logits/rejected": -3.1661486625671387,
1503
+ "logps/chosen": -0.33667102456092834,
1504
+ "logps/rejected": -0.38138431310653687,
1505
+ "loss": 0.9216,
1506
+ "rewards/accuracies": 0.703125,
1507
+ "rewards/chosen": -0.6733420491218567,
1508
+ "rewards/margins": 0.08942660689353943,
1509
+ "rewards/rejected": -0.7627686262130737,
1510
+ "step": 100
1511
+ },
1512
+ {
1513
+ "epoch": 1.4857405703771849,
1514
+ "grad_norm": 5.968519687652588,
1515
+ "learning_rate": 2.950819672131147e-07,
1516
+ "logits/chosen": -2.4848227500915527,
1517
+ "logits/rejected": -2.7680301666259766,
1518
+ "logps/chosen": -0.3308497965335846,
1519
+ "logps/rejected": -0.37560707330703735,
1520
+ "loss": 0.9231,
1521
+ "rewards/accuracies": 0.6875,
1522
+ "rewards/chosen": -0.661699652671814,
1523
+ "rewards/margins": 0.08951465040445328,
1524
+ "rewards/rejected": -0.7512142062187195,
1525
+ "step": 101
1526
+ },
1527
+ {
1528
+ "epoch": 1.500459981600736,
1529
+ "grad_norm": 5.029065132141113,
1530
+ "learning_rate": 2.868852459016393e-07,
1531
+ "logits/chosen": -2.8473854064941406,
1532
+ "logits/rejected": -3.1287717819213867,
1533
+ "logps/chosen": -0.307930052280426,
1534
+ "logps/rejected": -0.3468787968158722,
1535
+ "loss": 0.929,
1536
+ "rewards/accuracies": 0.65625,
1537
+ "rewards/chosen": -0.615860104560852,
1538
+ "rewards/margins": 0.07789750397205353,
1539
+ "rewards/rejected": -0.6937576532363892,
1540
+ "step": 102
1541
+ },
1542
+ {
1543
+ "epoch": 1.5151793928242872,
1544
+ "grad_norm": 5.741458892822266,
1545
+ "learning_rate": 2.786885245901639e-07,
1546
+ "logits/chosen": -3.060265302658081,
1547
+ "logits/rejected": -3.086451768875122,
1548
+ "logps/chosen": -0.3198442757129669,
1549
+ "logps/rejected": -0.3637600839138031,
1550
+ "loss": 0.9235,
1551
+ "rewards/accuracies": 0.640625,
1552
+ "rewards/chosen": -0.6396886110305786,
1553
+ "rewards/margins": 0.08783160150051117,
1554
+ "rewards/rejected": -0.7275201678276062,
1555
+ "step": 103
1556
+ },
1557
+ {
1558
+ "epoch": 1.529898804047838,
1559
+ "grad_norm": 5.51586389541626,
1560
+ "learning_rate": 2.704918032786885e-07,
1561
+ "logits/chosen": -2.7921199798583984,
1562
+ "logits/rejected": -2.949281692504883,
1563
+ "logps/chosen": -0.35858044028282166,
1564
+ "logps/rejected": -0.38384395837783813,
1565
+ "loss": 0.9457,
1566
+ "rewards/accuracies": 0.703125,
1567
+ "rewards/chosen": -0.7171608805656433,
1568
+ "rewards/margins": 0.05052705854177475,
1569
+ "rewards/rejected": -0.7676879167556763,
1570
+ "step": 104
1571
+ },
1572
+ {
1573
+ "epoch": 1.544618215271389,
1574
+ "grad_norm": 6.046347618103027,
1575
+ "learning_rate": 2.622950819672131e-07,
1576
+ "logits/chosen": -2.7023696899414062,
1577
+ "logits/rejected": -2.9308128356933594,
1578
+ "logps/chosen": -0.35011446475982666,
1579
+ "logps/rejected": -0.3880847096443176,
1580
+ "loss": 0.9303,
1581
+ "rewards/accuracies": 0.71875,
1582
+ "rewards/chosen": -0.7002289295196533,
1583
+ "rewards/margins": 0.07594041526317596,
1584
+ "rewards/rejected": -0.7761694192886353,
1585
+ "step": 105
1586
+ },
1587
+ {
1588
+ "epoch": 1.5593376264949401,
1589
+ "grad_norm": 5.501741886138916,
1590
+ "learning_rate": 2.540983606557377e-07,
1591
+ "logits/chosen": -2.6256351470947266,
1592
+ "logits/rejected": -3.093425750732422,
1593
+ "logps/chosen": -0.3132556080818176,
1594
+ "logps/rejected": -0.3510013222694397,
1595
+ "loss": 0.9302,
1596
+ "rewards/accuracies": 0.765625,
1597
+ "rewards/chosen": -0.62651127576828,
1598
+ "rewards/margins": 0.07549147307872772,
1599
+ "rewards/rejected": -0.702002763748169,
1600
+ "step": 106
1601
+ },
1602
+ {
1603
+ "epoch": 1.5740570377184913,
1604
+ "grad_norm": 6.958146572113037,
1605
+ "learning_rate": 2.4590163934426226e-07,
1606
+ "logits/chosen": -2.88631010055542,
1607
+ "logits/rejected": -2.882737636566162,
1608
+ "logps/chosen": -0.33777421712875366,
1609
+ "logps/rejected": -0.3951508700847626,
1610
+ "loss": 0.9072,
1611
+ "rewards/accuracies": 0.765625,
1612
+ "rewards/chosen": -0.6755484342575073,
1613
+ "rewards/margins": 0.1147533506155014,
1614
+ "rewards/rejected": -0.7903016805648804,
1615
+ "step": 107
1616
+ },
1617
+ {
1618
+ "epoch": 1.5887764489420424,
1619
+ "grad_norm": 5.5533623695373535,
1620
+ "learning_rate": 2.3770491803278686e-07,
1621
+ "logits/chosen": -2.6153483390808105,
1622
+ "logits/rejected": -2.914055347442627,
1623
+ "logps/chosen": -0.33533841371536255,
1624
+ "logps/rejected": -0.3664292097091675,
1625
+ "loss": 0.938,
1626
+ "rewards/accuracies": 0.640625,
1627
+ "rewards/chosen": -0.6706768274307251,
1628
+ "rewards/margins": 0.062181562185287476,
1629
+ "rewards/rejected": -0.732858419418335,
1630
+ "step": 108
1631
+ },
1632
+ {
1633
+ "epoch": 1.6034958601655935,
1634
+ "grad_norm": 6.642143249511719,
1635
+ "learning_rate": 2.2950819672131146e-07,
1636
+ "logits/chosen": -2.60856294631958,
1637
+ "logits/rejected": -2.7908551692962646,
1638
+ "logps/chosen": -0.3444288969039917,
1639
+ "logps/rejected": -0.38877052068710327,
1640
+ "loss": 0.9233,
1641
+ "rewards/accuracies": 0.671875,
1642
+ "rewards/chosen": -0.6888577938079834,
1643
+ "rewards/margins": 0.0886833518743515,
1644
+ "rewards/rejected": -0.7775410413742065,
1645
+ "step": 109
1646
+ },
1647
+ {
1648
+ "epoch": 1.6182152713891444,
1649
+ "grad_norm": 5.208906173706055,
1650
+ "learning_rate": 2.2131147540983606e-07,
1651
+ "logits/chosen": -3.353736162185669,
1652
+ "logits/rejected": -3.3088865280151367,
1653
+ "logps/chosen": -0.32407790422439575,
1654
+ "logps/rejected": -0.3433798551559448,
1655
+ "loss": 0.9516,
1656
+ "rewards/accuracies": 0.625,
1657
+ "rewards/chosen": -0.6481558084487915,
1658
+ "rewards/margins": 0.03860398009419441,
1659
+ "rewards/rejected": -0.6867598295211792,
1660
+ "step": 110
1661
+ },
1662
+ {
1663
+ "epoch": 1.6329346826126954,
1664
+ "grad_norm": 5.580318927764893,
1665
+ "learning_rate": 2.1311475409836064e-07,
1666
+ "logits/chosen": -2.904496908187866,
1667
+ "logits/rejected": -2.884878635406494,
1668
+ "logps/chosen": -0.32563793659210205,
1669
+ "logps/rejected": -0.3524249792098999,
1670
+ "loss": 0.944,
1671
+ "rewards/accuracies": 0.609375,
1672
+ "rewards/chosen": -0.6512758731842041,
1673
+ "rewards/margins": 0.05357404053211212,
1674
+ "rewards/rejected": -0.704849898815155,
1675
+ "step": 111
1676
+ },
1677
+ {
1678
+ "epoch": 1.6476540938362465,
1679
+ "grad_norm": 5.989292621612549,
1680
+ "learning_rate": 2.0491803278688524e-07,
1681
+ "logits/chosen": -2.916060209274292,
1682
+ "logits/rejected": -3.008087635040283,
1683
+ "logps/chosen": -0.3634881377220154,
1684
+ "logps/rejected": -0.39244192838668823,
1685
+ "loss": 0.9439,
1686
+ "rewards/accuracies": 0.59375,
1687
+ "rewards/chosen": -0.7269763350486755,
1688
+ "rewards/margins": 0.05790755897760391,
1689
+ "rewards/rejected": -0.7848838567733765,
1690
+ "step": 112
1691
+ },
1692
+ {
1693
+ "epoch": 1.6623735050597976,
1694
+ "grad_norm": 6.971791744232178,
1695
+ "learning_rate": 1.9672131147540984e-07,
1696
+ "logits/chosen": -2.5411109924316406,
1697
+ "logits/rejected": -2.6886777877807617,
1698
+ "logps/chosen": -0.37233471870422363,
1699
+ "logps/rejected": -0.4020453989505768,
1700
+ "loss": 0.9404,
1701
+ "rewards/accuracies": 0.5625,
1702
+ "rewards/chosen": -0.7446693778038025,
1703
+ "rewards/margins": 0.059421420097351074,
1704
+ "rewards/rejected": -0.8040907979011536,
1705
+ "step": 113
1706
+ },
1707
+ {
1708
+ "epoch": 1.6770929162833488,
1709
+ "grad_norm": 6.173042297363281,
1710
+ "learning_rate": 1.885245901639344e-07,
1711
+ "logits/chosen": -2.249495506286621,
1712
+ "logits/rejected": -2.596745491027832,
1713
+ "logps/chosen": -0.3725017309188843,
1714
+ "logps/rejected": -0.43299970030784607,
1715
+ "loss": 0.9139,
1716
+ "rewards/accuracies": 0.6875,
1717
+ "rewards/chosen": -0.7450034618377686,
1718
+ "rewards/margins": 0.12099599093198776,
1719
+ "rewards/rejected": -0.8659994602203369,
1720
+ "step": 114
1721
+ },
1722
+ {
1723
+ "epoch": 1.6918123275068997,
1724
+ "grad_norm": 5.885793209075928,
1725
+ "learning_rate": 1.80327868852459e-07,
1726
+ "logits/chosen": -2.219787359237671,
1727
+ "logits/rejected": -2.843722105026245,
1728
+ "logps/chosen": -0.3552049398422241,
1729
+ "logps/rejected": -0.4021163582801819,
1730
+ "loss": 0.9206,
1731
+ "rewards/accuracies": 0.671875,
1732
+ "rewards/chosen": -0.7104098796844482,
1733
+ "rewards/margins": 0.09382288157939911,
1734
+ "rewards/rejected": -0.8042327761650085,
1735
+ "step": 115
1736
+ },
1737
+ {
1738
+ "epoch": 1.7065317387304508,
1739
+ "grad_norm": 6.8436665534973145,
1740
+ "learning_rate": 1.7213114754098358e-07,
1741
+ "logits/chosen": -2.7685060501098633,
1742
+ "logits/rejected": -2.891371726989746,
1743
+ "logps/chosen": -0.35419946908950806,
1744
+ "logps/rejected": -0.38518092036247253,
1745
+ "loss": 0.9386,
1746
+ "rewards/accuracies": 0.609375,
1747
+ "rewards/chosen": -0.7083989381790161,
1748
+ "rewards/margins": 0.06196289137005806,
1749
+ "rewards/rejected": -0.7703619003295898,
1750
+ "step": 116
1751
+ },
1752
+ {
1753
+ "epoch": 1.7212511499540017,
1754
+ "grad_norm": 6.383307456970215,
1755
+ "learning_rate": 1.6393442622950818e-07,
1756
+ "logits/chosen": -2.6302192211151123,
1757
+ "logits/rejected": -2.7831718921661377,
1758
+ "logps/chosen": -0.3403049409389496,
1759
+ "logps/rejected": -0.37697887420654297,
1760
+ "loss": 0.9326,
1761
+ "rewards/accuracies": 0.765625,
1762
+ "rewards/chosen": -0.6806098818778992,
1763
+ "rewards/margins": 0.07334783673286438,
1764
+ "rewards/rejected": -0.7539578080177307,
1765
+ "step": 117
1766
+ },
1767
+ {
1768
+ "epoch": 1.7359705611775529,
1769
+ "grad_norm": 6.035743713378906,
1770
+ "learning_rate": 1.5573770491803278e-07,
1771
+ "logits/chosen": -3.153184175491333,
1772
+ "logits/rejected": -3.0809226036071777,
1773
+ "logps/chosen": -0.3405628204345703,
1774
+ "logps/rejected": -0.37279361486434937,
1775
+ "loss": 0.9374,
1776
+ "rewards/accuracies": 0.640625,
1777
+ "rewards/chosen": -0.6811257004737854,
1778
+ "rewards/margins": 0.06446149945259094,
1779
+ "rewards/rejected": -0.7455872297286987,
1780
+ "step": 118
1781
+ },
1782
+ {
1783
+ "epoch": 1.750689972401104,
1784
+ "grad_norm": 6.280840873718262,
1785
+ "learning_rate": 1.4754098360655736e-07,
1786
+ "logits/chosen": -2.436659097671509,
1787
+ "logits/rejected": -2.6618547439575195,
1788
+ "logps/chosen": -0.35082828998565674,
1789
+ "logps/rejected": -0.3988627791404724,
1790
+ "loss": 0.9186,
1791
+ "rewards/accuracies": 0.6875,
1792
+ "rewards/chosen": -0.7016565799713135,
1793
+ "rewards/margins": 0.09606888145208359,
1794
+ "rewards/rejected": -0.7977255582809448,
1795
+ "step": 119
1796
+ },
1797
+ {
1798
+ "epoch": 1.7654093836246552,
1799
+ "grad_norm": 6.461415767669678,
1800
+ "learning_rate": 1.3934426229508196e-07,
1801
+ "logits/chosen": -2.553762674331665,
1802
+ "logits/rejected": -2.8313937187194824,
1803
+ "logps/chosen": -0.36426275968551636,
1804
+ "logps/rejected": -0.3945803642272949,
1805
+ "loss": 0.9394,
1806
+ "rewards/accuracies": 0.734375,
1807
+ "rewards/chosen": -0.7285255193710327,
1808
+ "rewards/margins": 0.06063529849052429,
1809
+ "rewards/rejected": -0.7891607880592346,
1810
+ "step": 120
1811
+ },
1812
+ {
1813
+ "epoch": 1.780128794848206,
1814
+ "grad_norm": 5.7529778480529785,
1815
+ "learning_rate": 1.3114754098360656e-07,
1816
+ "logits/chosen": -3.030463933944702,
1817
+ "logits/rejected": -3.133709192276001,
1818
+ "logps/chosen": -0.33007901906967163,
1819
+ "logps/rejected": -0.3561002016067505,
1820
+ "loss": 0.9452,
1821
+ "rewards/accuracies": 0.484375,
1822
+ "rewards/chosen": -0.6601580381393433,
1823
+ "rewards/margins": 0.05204241722822189,
1824
+ "rewards/rejected": -0.7122004628181458,
1825
+ "step": 121
1826
+ },
1827
+ {
1828
+ "epoch": 1.794848206071757,
1829
+ "grad_norm": 6.557502269744873,
1830
+ "learning_rate": 1.2295081967213113e-07,
1831
+ "logits/chosen": -2.496763229370117,
1832
+ "logits/rejected": -2.722606658935547,
1833
+ "logps/chosen": -0.3488333821296692,
1834
+ "logps/rejected": -0.3847794532775879,
1835
+ "loss": 0.9323,
1836
+ "rewards/accuracies": 0.671875,
1837
+ "rewards/chosen": -0.6976668238639832,
1838
+ "rewards/margins": 0.07189209759235382,
1839
+ "rewards/rejected": -0.769558846950531,
1840
+ "step": 122
1841
+ },
1842
+ {
1843
+ "epoch": 1.8095676172953081,
1844
+ "grad_norm": 6.7690277099609375,
1845
+ "learning_rate": 1.1475409836065573e-07,
1846
+ "logits/chosen": -2.468517303466797,
1847
+ "logits/rejected": -2.8185667991638184,
1848
+ "logps/chosen": -0.3616328537464142,
1849
+ "logps/rejected": -0.3974328637123108,
1850
+ "loss": 0.9348,
1851
+ "rewards/accuracies": 0.703125,
1852
+ "rewards/chosen": -0.7232657074928284,
1853
+ "rewards/margins": 0.0716000348329544,
1854
+ "rewards/rejected": -0.7948658466339111,
1855
+ "step": 123
1856
+ },
1857
+ {
1858
+ "epoch": 1.8242870285188593,
1859
+ "grad_norm": 5.662026405334473,
1860
+ "learning_rate": 1.0655737704918032e-07,
1861
+ "logits/chosen": -2.563282012939453,
1862
+ "logits/rejected": -3.0523743629455566,
1863
+ "logps/chosen": -0.3303174078464508,
1864
+ "logps/rejected": -0.3804895877838135,
1865
+ "loss": 0.9163,
1866
+ "rewards/accuracies": 0.65625,
1867
+ "rewards/chosen": -0.6606348156929016,
1868
+ "rewards/margins": 0.10034434497356415,
1869
+ "rewards/rejected": -0.760979175567627,
1870
+ "step": 124
1871
+ },
1872
+ {
1873
+ "epoch": 1.8390064397424104,
1874
+ "grad_norm": 5.673758506774902,
1875
+ "learning_rate": 9.836065573770492e-08,
1876
+ "logits/chosen": -2.9448227882385254,
1877
+ "logits/rejected": -2.9346063137054443,
1878
+ "logps/chosen": -0.32242119312286377,
1879
+ "logps/rejected": -0.350904643535614,
1880
+ "loss": 0.941,
1881
+ "rewards/accuracies": 0.703125,
1882
+ "rewards/chosen": -0.6448423862457275,
1883
+ "rewards/margins": 0.05696691572666168,
1884
+ "rewards/rejected": -0.701809287071228,
1885
+ "step": 125
1886
+ },
1887
+ {
1888
+ "epoch": 1.8537258509659613,
1889
+ "grad_norm": 5.837872505187988,
1890
+ "learning_rate": 9.01639344262295e-08,
1891
+ "logits/chosen": -3.052157402038574,
1892
+ "logits/rejected": -3.0036213397979736,
1893
+ "logps/chosen": -0.35162875056266785,
1894
+ "logps/rejected": -0.3706502318382263,
1895
+ "loss": 0.9525,
1896
+ "rewards/accuracies": 0.59375,
1897
+ "rewards/chosen": -0.7032575011253357,
1898
+ "rewards/margins": 0.03804299607872963,
1899
+ "rewards/rejected": -0.7413004636764526,
1900
+ "step": 126
1901
+ },
1902
+ {
1903
+ "epoch": 1.8684452621895125,
1904
+ "grad_norm": 6.1533613204956055,
1905
+ "learning_rate": 8.196721311475409e-08,
1906
+ "logits/chosen": -2.717515230178833,
1907
+ "logits/rejected": -2.8103432655334473,
1908
+ "logps/chosen": -0.36272749304771423,
1909
+ "logps/rejected": -0.39639797806739807,
1910
+ "loss": 0.9356,
1911
+ "rewards/accuracies": 0.65625,
1912
+ "rewards/chosen": -0.7254549860954285,
1913
+ "rewards/margins": 0.06734100729227066,
1914
+ "rewards/rejected": -0.7927959561347961,
1915
+ "step": 127
1916
+ },
1917
+ {
1918
+ "epoch": 1.8831646734130634,
1919
+ "grad_norm": 7.13803243637085,
1920
+ "learning_rate": 7.377049180327868e-08,
1921
+ "logits/chosen": -2.4307665824890137,
1922
+ "logits/rejected": -2.7688546180725098,
1923
+ "logps/chosen": -0.3650929629802704,
1924
+ "logps/rejected": -0.4157169759273529,
1925
+ "loss": 0.9164,
1926
+ "rewards/accuracies": 0.71875,
1927
+ "rewards/chosen": -0.7301859259605408,
1928
+ "rewards/margins": 0.10124801099300385,
1929
+ "rewards/rejected": -0.8314339518547058,
1930
+ "step": 128
1931
+ },
1932
+ {
1933
+ "epoch": 1.8978840846366145,
1934
+ "grad_norm": 6.0640716552734375,
1935
+ "learning_rate": 6.557377049180328e-08,
1936
+ "logits/chosen": -2.6504528522491455,
1937
+ "logits/rejected": -2.816641092300415,
1938
+ "logps/chosen": -0.35474810004234314,
1939
+ "logps/rejected": -0.3936282992362976,
1940
+ "loss": 0.9298,
1941
+ "rewards/accuracies": 0.6875,
1942
+ "rewards/chosen": -0.7094961404800415,
1943
+ "rewards/margins": 0.0777604728937149,
1944
+ "rewards/rejected": -0.7872565984725952,
1945
+ "step": 129
1946
+ },
1947
+ {
1948
+ "epoch": 1.9126034958601656,
1949
+ "grad_norm": 5.700585842132568,
1950
+ "learning_rate": 5.7377049180327866e-08,
1951
+ "logits/chosen": -2.6422119140625,
1952
+ "logits/rejected": -2.972996711730957,
1953
+ "logps/chosen": -0.3482939600944519,
1954
+ "logps/rejected": -0.3821781575679779,
1955
+ "loss": 0.9354,
1956
+ "rewards/accuracies": 0.59375,
1957
+ "rewards/chosen": -0.6965879201889038,
1958
+ "rewards/margins": 0.06776841729879379,
1959
+ "rewards/rejected": -0.7643563747406006,
1960
+ "step": 130
1961
+ },
1962
+ {
1963
+ "epoch": 1.9273229070837168,
1964
+ "grad_norm": 8.303826332092285,
1965
+ "learning_rate": 4.918032786885246e-08,
1966
+ "logits/chosen": -2.9017281532287598,
1967
+ "logits/rejected": -2.9597935676574707,
1968
+ "logps/chosen": -0.3572656810283661,
1969
+ "logps/rejected": -0.3903231620788574,
1970
+ "loss": 0.9359,
1971
+ "rewards/accuracies": 0.703125,
1972
+ "rewards/chosen": -0.7145313620567322,
1973
+ "rewards/margins": 0.06611500680446625,
1974
+ "rewards/rejected": -0.7806463837623596,
1975
+ "step": 131
1976
+ },
1977
+ {
1978
+ "epoch": 1.9420423183072677,
1979
+ "grad_norm": 6.356776714324951,
1980
+ "learning_rate": 4.0983606557377046e-08,
1981
+ "logits/chosen": -2.724161386489868,
1982
+ "logits/rejected": -2.8108153343200684,
1983
+ "logps/chosen": -0.35084962844848633,
1984
+ "logps/rejected": -0.39324939250946045,
1985
+ "loss": 0.9253,
1986
+ "rewards/accuracies": 0.65625,
1987
+ "rewards/chosen": -0.7016993165016174,
1988
+ "rewards/margins": 0.08479949086904526,
1989
+ "rewards/rejected": -0.7864987850189209,
1990
+ "step": 132
1991
+ },
1992
+ {
1993
+ "epoch": 1.9567617295308186,
1994
+ "grad_norm": 5.675929069519043,
1995
+ "learning_rate": 3.278688524590164e-08,
1996
+ "logits/chosen": -2.453089714050293,
1997
+ "logits/rejected": -2.8991174697875977,
1998
+ "logps/chosen": -0.3394642770290375,
1999
+ "logps/rejected": -0.38700026273727417,
2000
+ "loss": 0.9188,
2001
+ "rewards/accuracies": 0.765625,
2002
+ "rewards/chosen": -0.6789284944534302,
2003
+ "rewards/margins": 0.09507202357053757,
2004
+ "rewards/rejected": -0.7740005850791931,
2005
+ "step": 133
2006
+ },
2007
+ {
2008
+ "epoch": 1.9714811407543698,
2009
+ "grad_norm": 6.218247413635254,
2010
+ "learning_rate": 2.459016393442623e-08,
2011
+ "logits/chosen": -2.6867618560791016,
2012
+ "logits/rejected": -2.796144962310791,
2013
+ "logps/chosen": -0.36361926794052124,
2014
+ "logps/rejected": -0.4098837971687317,
2015
+ "loss": 0.9204,
2016
+ "rewards/accuracies": 0.765625,
2017
+ "rewards/chosen": -0.7272385358810425,
2018
+ "rewards/margins": 0.09252903610467911,
2019
+ "rewards/rejected": -0.8197675943374634,
2020
+ "step": 134
2021
+ },
2022
+ {
2023
+ "epoch": 1.986200551977921,
2024
+ "grad_norm": 6.011038303375244,
2025
+ "learning_rate": 1.639344262295082e-08,
2026
+ "logits/chosen": -2.8169169425964355,
2027
+ "logits/rejected": -2.9251537322998047,
2028
+ "logps/chosen": -0.3600449562072754,
2029
+ "logps/rejected": -0.40678220987319946,
2030
+ "loss": 0.9194,
2031
+ "rewards/accuracies": 0.765625,
2032
+ "rewards/chosen": -0.7200899124145508,
2033
+ "rewards/margins": 0.09347459673881531,
2034
+ "rewards/rejected": -0.8135644197463989,
2035
+ "step": 135
2036
+ },
2037
+ {
2038
+ "epoch": 2.0,
2039
+ "grad_norm": 5.664433479309082,
2040
+ "learning_rate": 8.19672131147541e-09,
2041
+ "logits/chosen": -2.7600553035736084,
2042
+ "logits/rejected": -2.893031597137451,
2043
+ "logps/chosen": -0.37107592821121216,
2044
+ "logps/rejected": -0.40161800384521484,
2045
+ "loss": 0.8813,
2046
+ "rewards/accuracies": 0.6166666746139526,
2047
+ "rewards/chosen": -0.7421518564224243,
2048
+ "rewards/margins": 0.06108412146568298,
2049
+ "rewards/rejected": -0.8032360076904297,
2050
+ "step": 136
2051
+ },
2052
+ {
2053
+ "epoch": 2.0,
2054
+ "step": 136,
2055
+ "total_flos": 44833908572160.0,
2056
+ "train_loss": 0.9574610433157753,
2057
+ "train_runtime": 3050.2608,
2058
+ "train_samples_per_second": 2.849,
2059
+ "train_steps_per_second": 0.045
2060
+ }
2061
+ ],
2062
+ "logging_steps": 1,
2063
+ "max_steps": 136,
2064
+ "num_input_tokens_seen": 0,
2065
+ "num_train_epochs": 2,
2066
+ "save_steps": 500,
2067
+ "stateful_callbacks": {
2068
+ "TrainerControl": {
2069
+ "args": {
2070
+ "should_epoch_stop": false,
2071
+ "should_evaluate": false,
2072
+ "should_log": false,
2073
+ "should_save": true,
2074
+ "should_training_stop": true
2075
+ },
2076
+ "attributes": {}
2077
+ }
2078
+ },
2079
+ "total_flos": 44833908572160.0,
2080
+ "train_batch_size": 1,
2081
+ "trial_name": null,
2082
+ "trial_params": null
2083
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff7e472145be8d6e1e2851533d82765b018879f2c5c1356794cbc3649e225f78
3
+ size 8145
training_loss.png ADDED
training_rewards_accuracies.png ADDED
vocab.json ADDED
The diff for this file is too large to render. See raw diff