penfever commited on
Commit
d6f553d
·
verified ·
1 Parent(s): 9b398cb

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: Qwen/Qwen3-8B
5
+ tags:
6
+ - llama-factory
7
+ - full
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: sft__exp_rpt_stack-bash-withtests_glm_4-7_traces_jupiter__Qwen3-8B
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # sft__exp_rpt_stack-bash-withtests_glm_4-7_traces_jupiter__Qwen3-8B
18
+
19
+ This model is a fine-tuned version of [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B) on the /e/data1/datasets/playground/ot/hf_hub/datasets--DCAgent--exp_rpt_stack-bash-withtests_glm_4.7_traces_jupiter/snapshots/82b5d32a246ba8ef7438586e6e27da58d585345f_thinking_preprocessed dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 4e-05
39
+ - train_batch_size: 1
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 32
44
+ - gradient_accumulation_steps: 3
45
+ - total_train_batch_size: 96
46
+ - total_eval_batch_size: 256
47
+ - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.98) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - num_epochs: 7.0
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.57.6
59
+ - Pytorch 2.9.1+cu130
60
+ - Datasets 4.4.1
61
+ - Tokenizers 0.22.2
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "achieved_tflops_per_gpu": 42526.70396900443,
3
+ "achieved_tflops_per_gpu_theoretical": 1871630.9362088453,
4
+ "epoch": 7.0,
5
+ "loss_nan_ranks": 0,
6
+ "loss_rank_avg": 0.09868818521499634,
7
+ "mfu_percent": 3005.4207751946597,
8
+ "mfu_percent_theoretical": 132270.73754126116,
9
+ "total_flos": 1.9140418922369516e+18,
10
+ "train_loss": 0.0,
11
+ "train_runtime": 1.4065,
12
+ "train_samples_per_second": 53019.754,
13
+ "train_steps_per_second": 552.445,
14
+ "valid_targets_mean": 4527.2,
15
+ "valid_targets_min": 593
16
+ }
chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "dtype": "bfloat16",
8
+ "eos_token_id": 151645,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 12288,
14
+ "layer_types": [
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention"
51
+ ],
52
+ "max_position_embeddings": 40960,
53
+ "max_window_layers": 36,
54
+ "model_type": "qwen3",
55
+ "num_attention_heads": 32,
56
+ "num_hidden_layers": 36,
57
+ "num_key_value_heads": 8,
58
+ "pad_token_id": 151643,
59
+ "rms_norm_eps": 1e-06,
60
+ "rope_scaling": null,
61
+ "rope_theta": 1000000,
62
+ "sliding_window": null,
63
+ "tie_word_embeddings": false,
64
+ "transformers_version": "4.57.6",
65
+ "use_cache": false,
66
+ "use_sliding_window": false,
67
+ "vocab_size": 151936
68
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_sample": true,
3
+ "eos_token_id": [
4
+ 151645,
5
+ 151643
6
+ ],
7
+ "pad_token_id": 151643,
8
+ "temperature": 0.6,
9
+ "top_k": 20,
10
+ "top_p": 0.95,
11
+ "transformers_version": "4.57.6"
12
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8690d3233df422290e9039a1a7739315992fa528b84cbe87e9839608ba4d69e3
3
+ size 4902257696
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:100423744d6201792ad80bea1a62ff19f4722a8ea4b2fa32aa60fb55267f7526
3
+ size 4915960368
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c52c0ec1251c41a8c9c30e2993e426fe847600a029f0a58df35794be7a493682
3
+ size 4983068496
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0a8f1801a6dac42ca14507f648b4e4584da6273485f3bfae03afa4704677a8c
3
+ size 1580230264
model.safetensors.index.json ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 308224,
4
+ "total_size": 16381470720
5
+ },
6
+ "weight_map": {
7
+ "lm_head.weight": "model-00004-of-00004.safetensors",
8
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
27
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
28
+ "model.layers.1.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
29
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
30
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
31
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
32
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.10.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.10.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.11.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.11.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.12.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.12.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.13.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.13.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.14.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.14.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
91
+ "model.layers.15.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.15.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
98
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
101
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
102
+ "model.layers.16.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
103
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.16.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
109
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
110
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
112
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
113
+ "model.layers.17.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
114
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
115
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.17.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
117
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
118
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
119
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
120
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
121
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
122
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
123
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
124
+ "model.layers.18.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
125
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
126
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
127
+ "model.layers.18.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
128
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
129
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
130
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
131
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
132
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
133
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
134
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
135
+ "model.layers.19.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
136
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
137
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
138
+ "model.layers.19.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
139
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
140
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
141
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
142
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
143
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
144
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
145
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
146
+ "model.layers.2.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
147
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
148
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
149
+ "model.layers.2.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
150
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
151
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
152
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
153
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
154
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
155
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
156
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
157
+ "model.layers.20.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
158
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
159
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
160
+ "model.layers.20.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
161
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
162
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
163
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
164
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
165
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
166
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
167
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
168
+ "model.layers.21.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
169
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
170
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
171
+ "model.layers.21.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
172
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
173
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
174
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.22.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
180
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
181
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
182
+ "model.layers.22.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
183
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
184
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
185
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
187
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.23.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.23.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
194
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
197
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
198
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
199
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
200
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.24.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.24.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
206
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
207
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
209
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
210
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
211
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.25.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.25.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
217
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
218
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
219
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
220
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
221
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
222
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
223
+ "model.layers.26.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
224
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
225
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.26.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
229
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
230
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
232
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
233
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
234
+ "model.layers.27.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
235
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
236
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.27.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
238
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
239
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
241
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
242
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
243
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
244
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
245
+ "model.layers.28.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
246
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
247
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
248
+ "model.layers.28.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
249
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
250
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
251
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
252
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
253
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
254
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
255
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
256
+ "model.layers.29.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
257
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
258
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
259
+ "model.layers.29.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
260
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
261
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
262
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
263
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
264
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
265
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
266
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
267
+ "model.layers.3.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
268
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
269
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
270
+ "model.layers.3.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
271
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
272
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
273
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
274
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
275
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
276
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
277
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
278
+ "model.layers.30.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
279
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
280
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
281
+ "model.layers.30.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
282
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
283
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
284
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
285
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
286
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
287
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
288
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
289
+ "model.layers.31.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
290
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
291
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
292
+ "model.layers.31.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
293
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
294
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
295
+ "model.layers.32.input_layernorm.weight": "model-00003-of-00004.safetensors",
296
+ "model.layers.32.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
297
+ "model.layers.32.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
298
+ "model.layers.32.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
299
+ "model.layers.32.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
300
+ "model.layers.32.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
301
+ "model.layers.32.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
302
+ "model.layers.32.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
303
+ "model.layers.32.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
304
+ "model.layers.32.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
305
+ "model.layers.32.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
306
+ "model.layers.33.input_layernorm.weight": "model-00003-of-00004.safetensors",
307
+ "model.layers.33.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
308
+ "model.layers.33.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
309
+ "model.layers.33.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
310
+ "model.layers.33.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
311
+ "model.layers.33.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
312
+ "model.layers.33.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
313
+ "model.layers.33.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
314
+ "model.layers.33.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
315
+ "model.layers.33.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
316
+ "model.layers.33.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
317
+ "model.layers.34.input_layernorm.weight": "model-00003-of-00004.safetensors",
318
+ "model.layers.34.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
319
+ "model.layers.34.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
320
+ "model.layers.34.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
321
+ "model.layers.34.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
322
+ "model.layers.34.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
323
+ "model.layers.34.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
324
+ "model.layers.34.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
325
+ "model.layers.34.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
326
+ "model.layers.34.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
327
+ "model.layers.34.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
328
+ "model.layers.35.input_layernorm.weight": "model-00004-of-00004.safetensors",
329
+ "model.layers.35.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
330
+ "model.layers.35.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
331
+ "model.layers.35.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
332
+ "model.layers.35.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
333
+ "model.layers.35.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
334
+ "model.layers.35.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
335
+ "model.layers.35.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
336
+ "model.layers.35.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
337
+ "model.layers.35.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
338
+ "model.layers.35.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
339
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
340
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
341
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
342
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
343
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
344
+ "model.layers.4.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
345
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
346
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
347
+ "model.layers.4.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
348
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
349
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
350
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
351
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
352
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
353
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
354
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
355
+ "model.layers.5.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
356
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
357
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
358
+ "model.layers.5.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
359
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
360
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
361
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
362
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
363
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
364
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
365
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
366
+ "model.layers.6.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
367
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
368
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
369
+ "model.layers.6.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
370
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
371
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
372
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
373
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
374
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
375
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
376
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
377
+ "model.layers.7.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
378
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
379
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
380
+ "model.layers.7.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
381
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
382
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
383
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
384
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
385
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
386
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
387
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
388
+ "model.layers.8.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
389
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
390
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
391
+ "model.layers.8.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
392
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
393
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
394
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
395
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
396
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
397
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
398
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
399
+ "model.layers.9.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
400
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
401
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
402
+ "model.layers.9.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
403
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
404
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
405
+ "model.norm.weight": "model-00004-of-00004.safetensors"
406
+ }
407
+ }
run_summary.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "agent_name": "82b5d32a246ba8ef7438586e6e27da58d585345f_thinking_preprocessed",
3
+ "training_start": null,
4
+ "training_end": null,
5
+ "created_by": "DCAgent",
6
+ "base_model_name": "Qwen/Qwen3-8B",
7
+ "dataset_name": "/e/data1/datasets/playground/ot/hf_hub/datasets--DCAgent--exp_rpt_stack-bash-withtests_glm_4.7_traces_jupiter/snapshots/82b5d32a246ba8ef7438586e6e27da58d585345f_thinking_preprocessed",
8
+ "training_type": "SFT",
9
+ "training_parameters": "https://huggingface.co/laion/exp_rpt_stack-bash-withtests_glm_4_7_traces_jupiter/blob/main/config.json",
10
+ "wandb_link": null,
11
+ "traces_location_s3": null
12
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 32768,
235
+ "pad_token": "<|endoftext|>",
236
+ "padding_side": "right",
237
+ "split_special_tokens": false,
238
+ "tokenizer_class": "Qwen2Tokenizer",
239
+ "unk_token": null
240
+ }
train_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "achieved_tflops_per_gpu": 42526.70396900443,
3
+ "achieved_tflops_per_gpu_theoretical": 1871630.9362088453,
4
+ "epoch": 7.0,
5
+ "mfu_percent": 3005.4207751946597,
6
+ "mfu_percent_theoretical": 132270.73754126116,
7
+ "total_flos": 1.9140418922369516e+18,
8
+ "train_loss": 0.0,
9
+ "train_runtime": 1.4065,
10
+ "train_samples_per_second": 53019.754,
11
+ "train_steps_per_second": 552.445
12
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 5, "total_steps": 777, "loss": 0.6951, "lr": 2.0512820512820513e-06, "epoch": 0.04504504504504504, "percentage": 0.64, "elapsed_time": "0:02:02", "remaining_time": "5:16:16"}
2
+ {"current_steps": 10, "total_steps": 777, "loss": 0.6634, "lr": 4.615384615384616e-06, "epoch": 0.09009009009009009, "percentage": 1.29, "elapsed_time": "0:04:07", "remaining_time": "5:16:18"}
3
+ {"current_steps": 15, "total_steps": 777, "loss": 0.5718, "lr": 7.17948717948718e-06, "epoch": 0.13513513513513514, "percentage": 1.93, "elapsed_time": "0:06:10", "remaining_time": "5:13:28"}
4
+ {"current_steps": 20, "total_steps": 777, "loss": 0.5115, "lr": 9.743589743589744e-06, "epoch": 0.18018018018018017, "percentage": 2.57, "elapsed_time": "0:08:05", "remaining_time": "5:06:17"}
5
+ {"current_steps": 25, "total_steps": 777, "loss": 0.4871, "lr": 1.230769230769231e-05, "epoch": 0.22522522522522523, "percentage": 3.22, "elapsed_time": "0:10:04", "remaining_time": "5:03:00"}
6
+ {"current_steps": 30, "total_steps": 777, "loss": 0.4791, "lr": 1.4871794871794874e-05, "epoch": 0.2702702702702703, "percentage": 3.86, "elapsed_time": "0:12:02", "remaining_time": "4:59:57"}
7
+ {"current_steps": 35, "total_steps": 777, "loss": 0.4542, "lr": 1.7435897435897438e-05, "epoch": 0.3153153153153153, "percentage": 4.5, "elapsed_time": "0:14:05", "remaining_time": "4:58:36"}
8
+ {"current_steps": 40, "total_steps": 777, "loss": 0.4362, "lr": 2e-05, "epoch": 0.36036036036036034, "percentage": 5.15, "elapsed_time": "0:16:00", "remaining_time": "4:54:53"}
9
+ {"current_steps": 45, "total_steps": 777, "loss": 0.4108, "lr": 2.2564102564102566e-05, "epoch": 0.40540540540540543, "percentage": 5.79, "elapsed_time": "0:18:05", "remaining_time": "4:54:11"}
10
+ {"current_steps": 50, "total_steps": 777, "loss": 0.4218, "lr": 2.512820512820513e-05, "epoch": 0.45045045045045046, "percentage": 6.44, "elapsed_time": "0:19:56", "remaining_time": "4:50:02"}
11
+ {"current_steps": 55, "total_steps": 777, "loss": 0.4003, "lr": 2.7692307692307694e-05, "epoch": 0.4954954954954955, "percentage": 7.08, "elapsed_time": "0:22:01", "remaining_time": "4:49:04"}
12
+ {"current_steps": 60, "total_steps": 777, "loss": 0.3955, "lr": 3.0256410256410257e-05, "epoch": 0.5405405405405406, "percentage": 7.72, "elapsed_time": "0:24:00", "remaining_time": "4:46:53"}
13
+ {"current_steps": 65, "total_steps": 777, "loss": 0.3757, "lr": 3.282051282051282e-05, "epoch": 0.5855855855855856, "percentage": 8.37, "elapsed_time": "0:26:04", "remaining_time": "4:45:36"}
14
+ {"current_steps": 70, "total_steps": 777, "loss": 0.3825, "lr": 3.538461538461539e-05, "epoch": 0.6306306306306306, "percentage": 9.01, "elapsed_time": "0:28:00", "remaining_time": "4:42:56"}
15
+ {"current_steps": 75, "total_steps": 777, "loss": 0.3694, "lr": 3.794871794871795e-05, "epoch": 0.6756756756756757, "percentage": 9.65, "elapsed_time": "0:30:04", "remaining_time": "4:41:30"}
16
+ {"current_steps": 80, "total_steps": 777, "loss": 0.356, "lr": 3.999979800311937e-05, "epoch": 0.7207207207207207, "percentage": 10.3, "elapsed_time": "0:32:06", "remaining_time": "4:39:46"}
17
+ {"current_steps": 85, "total_steps": 777, "loss": 0.3409, "lr": 3.999272854071669e-05, "epoch": 0.7657657657657657, "percentage": 10.94, "elapsed_time": "0:34:05", "remaining_time": "4:37:33"}
18
+ {"current_steps": 90, "total_steps": 777, "loss": 0.347, "lr": 3.99755633141858e-05, "epoch": 0.8108108108108109, "percentage": 11.58, "elapsed_time": "0:36:01", "remaining_time": "4:34:59"}
19
+ {"current_steps": 95, "total_steps": 777, "loss": 0.3339, "lr": 3.994831099148205e-05, "epoch": 0.8558558558558559, "percentage": 12.23, "elapsed_time": "0:37:56", "remaining_time": "4:32:20"}
20
+ {"current_steps": 100, "total_steps": 777, "loss": 0.3304, "lr": 3.991098533425988e-05, "epoch": 0.9009009009009009, "percentage": 12.87, "elapsed_time": "0:39:45", "remaining_time": "4:29:10"}
21
+ {"current_steps": 105, "total_steps": 777, "loss": 0.3353, "lr": 3.9863605190923655e-05, "epoch": 0.9459459459459459, "percentage": 13.51, "elapsed_time": "0:41:40", "remaining_time": "4:26:43"}
22
+ {"current_steps": 110, "total_steps": 777, "loss": 0.3293, "lr": 3.9806194487109636e-05, "epoch": 0.990990990990991, "percentage": 14.16, "elapsed_time": "0:43:40", "remaining_time": "4:24:47"}
23
+ {"current_steps": 115, "total_steps": 777, "loss": 0.3461, "lr": 3.9738782213604305e-05, "epoch": 1.0360360360360361, "percentage": 14.8, "elapsed_time": "0:45:42", "remaining_time": "4:23:06"}
24
+ {"current_steps": 120, "total_steps": 777, "loss": 0.3399, "lr": 3.9661402411704794e-05, "epoch": 1.0810810810810811, "percentage": 15.44, "elapsed_time": "0:47:45", "remaining_time": "4:21:27"}
25
+ {"current_steps": 125, "total_steps": 777, "loss": 0.3244, "lr": 3.957409415602899e-05, "epoch": 1.1261261261261262, "percentage": 16.09, "elapsed_time": "0:49:38", "remaining_time": "4:18:56"}
26
+ {"current_steps": 130, "total_steps": 777, "loss": 0.3128, "lr": 3.947690153478396e-05, "epoch": 1.1711711711711712, "percentage": 16.73, "elapsed_time": "0:51:26", "remaining_time": "4:16:03"}
27
+ {"current_steps": 135, "total_steps": 777, "loss": 0.3319, "lr": 3.936987362750266e-05, "epoch": 1.2162162162162162, "percentage": 17.37, "elapsed_time": "0:53:26", "remaining_time": "4:14:06"}
28
+ {"current_steps": 140, "total_steps": 777, "loss": 0.3272, "lr": 3.925306448026011e-05, "epoch": 1.2612612612612613, "percentage": 18.02, "elapsed_time": "0:55:16", "remaining_time": "4:11:31"}
29
+ {"current_steps": 145, "total_steps": 777, "loss": 0.3191, "lr": 3.912653307838173e-05, "epoch": 1.3063063063063063, "percentage": 18.66, "elapsed_time": "0:57:21", "remaining_time": "4:10:01"}
30
+ {"current_steps": 150, "total_steps": 777, "loss": 0.3132, "lr": 3.899034331665733e-05, "epoch": 1.3513513513513513, "percentage": 19.31, "elapsed_time": "0:59:16", "remaining_time": "4:07:47"}
31
+ {"current_steps": 155, "total_steps": 777, "loss": 0.3172, "lr": 3.884456396707611e-05, "epoch": 1.3963963963963963, "percentage": 19.95, "elapsed_time": "1:01:15", "remaining_time": "4:05:50"}
32
+ {"current_steps": 160, "total_steps": 777, "loss": 0.3205, "lr": 3.8689268644098715e-05, "epoch": 1.4414414414414414, "percentage": 20.59, "elapsed_time": "1:03:13", "remaining_time": "4:03:48"}
33
+ {"current_steps": 165, "total_steps": 777, "loss": 0.3006, "lr": 3.852453576748397e-05, "epoch": 1.4864864864864864, "percentage": 21.24, "elapsed_time": "1:05:13", "remaining_time": "4:01:54"}
34
+ {"current_steps": 170, "total_steps": 777, "loss": 0.3053, "lr": 3.835044852268921e-05, "epoch": 1.5315315315315314, "percentage": 21.88, "elapsed_time": "1:07:10", "remaining_time": "3:59:50"}
35
+ {"current_steps": 175, "total_steps": 777, "loss": 0.3197, "lr": 3.816709481886386e-05, "epoch": 1.5765765765765765, "percentage": 22.52, "elapsed_time": "1:09:09", "remaining_time": "3:57:54"}
36
+ {"current_steps": 180, "total_steps": 777, "loss": 0.3112, "lr": 3.7974567244457886e-05, "epoch": 1.6216216216216215, "percentage": 23.17, "elapsed_time": "1:11:09", "remaining_time": "3:56:01"}
37
+ {"current_steps": 185, "total_steps": 777, "loss": 0.3096, "lr": 3.777296302046719e-05, "epoch": 1.6666666666666665, "percentage": 23.81, "elapsed_time": "1:13:07", "remaining_time": "3:54:01"}
38
+ {"current_steps": 190, "total_steps": 777, "loss": 0.302, "lr": 3.756238395133972e-05, "epoch": 1.7117117117117115, "percentage": 24.45, "elapsed_time": "1:15:02", "remaining_time": "3:51:51"}
39
+ {"current_steps": 195, "total_steps": 777, "loss": 0.3059, "lr": 3.734293637356719e-05, "epoch": 1.7567567567567568, "percentage": 25.1, "elapsed_time": "1:17:02", "remaining_time": "3:49:55"}
40
+ {"current_steps": 200, "total_steps": 777, "loss": 0.3168, "lr": 3.711473110198805e-05, "epoch": 1.8018018018018018, "percentage": 25.74, "elapsed_time": "1:19:06", "remaining_time": "3:48:14"}
41
+ {"current_steps": 205, "total_steps": 777, "loss": 0.2959, "lr": 3.687788337382918e-05, "epoch": 1.8468468468468469, "percentage": 26.38, "elapsed_time": "1:20:59", "remaining_time": "3:45:59"}
42
+ {"current_steps": 210, "total_steps": 777, "loss": 0.2993, "lr": 3.663251279051431e-05, "epoch": 1.8918918918918919, "percentage": 27.03, "elapsed_time": "1:22:51", "remaining_time": "3:43:41"}
43
+ {"current_steps": 215, "total_steps": 777, "loss": 0.2999, "lr": 3.6378743257268696e-05, "epoch": 1.936936936936937, "percentage": 27.67, "elapsed_time": "1:24:48", "remaining_time": "3:41:41"}
44
+ {"current_steps": 220, "total_steps": 777, "loss": 0.3047, "lr": 3.6116702920550445e-05, "epoch": 1.981981981981982, "percentage": 28.31, "elapsed_time": "1:26:40", "remaining_time": "3:39:27"}
45
+ {"current_steps": 225, "total_steps": 777, "loss": 0.2942, "lr": 3.58465241033402e-05, "epoch": 2.027027027027027, "percentage": 28.96, "elapsed_time": "1:28:32", "remaining_time": "3:37:13"}
46
+ {"current_steps": 230, "total_steps": 777, "loss": 0.2942, "lr": 3.556834323832174e-05, "epoch": 2.0720720720720722, "percentage": 29.6, "elapsed_time": "1:30:31", "remaining_time": "3:35:18"}
47
+ {"current_steps": 235, "total_steps": 777, "loss": 0.2918, "lr": 3.528230079898734e-05, "epoch": 2.1171171171171173, "percentage": 30.24, "elapsed_time": "1:32:31", "remaining_time": "3:33:23"}
48
+ {"current_steps": 240, "total_steps": 777, "loss": 0.2903, "lr": 3.498854122870263e-05, "epoch": 2.1621621621621623, "percentage": 30.89, "elapsed_time": "1:34:30", "remaining_time": "3:31:27"}
49
+ {"current_steps": 245, "total_steps": 777, "loss": 0.2968, "lr": 3.4687212867766696e-05, "epoch": 2.2072072072072073, "percentage": 31.53, "elapsed_time": "1:36:30", "remaining_time": "3:29:33"}
50
+ {"current_steps": 250, "total_steps": 777, "loss": 0.3055, "lr": 3.437846787850454e-05, "epoch": 2.2522522522522523, "percentage": 32.18, "elapsed_time": "1:38:28", "remaining_time": "3:27:35"}
51
+ {"current_steps": 255, "total_steps": 777, "loss": 0.2857, "lr": 3.4062462168429267e-05, "epoch": 2.2972972972972974, "percentage": 32.82, "elapsed_time": "1:40:17", "remaining_time": "3:25:17"}
52
+ {"current_steps": 260, "total_steps": 777, "loss": 0.2905, "lr": 3.373935531151326e-05, "epoch": 2.3423423423423424, "percentage": 33.46, "elapsed_time": "1:42:18", "remaining_time": "3:23:25"}
53
+ {"current_steps": 265, "total_steps": 777, "loss": 0.2858, "lr": 3.3409310467607824e-05, "epoch": 2.3873873873873874, "percentage": 34.11, "elapsed_time": "1:44:13", "remaining_time": "3:21:23"}
54
+ {"current_steps": 270, "total_steps": 777, "loss": 0.2982, "lr": 3.307249430005203e-05, "epoch": 2.4324324324324325, "percentage": 34.75, "elapsed_time": "1:46:10", "remaining_time": "3:19:23"}
55
+ {"current_steps": 275, "total_steps": 777, "loss": 0.3072, "lr": 3.272907689151245e-05, "epoch": 2.4774774774774775, "percentage": 35.39, "elapsed_time": "1:48:06", "remaining_time": "3:17:20"}
56
+ {"current_steps": 280, "total_steps": 777, "loss": 0.2958, "lr": 3.237923165809619e-05, "epoch": 2.5225225225225225, "percentage": 36.04, "elapsed_time": "1:50:06", "remaining_time": "3:15:26"}
57
+ {"current_steps": 285, "total_steps": 777, "loss": 0.2995, "lr": 3.202313526178067e-05, "epoch": 2.5675675675675675, "percentage": 36.68, "elapsed_time": "1:52:03", "remaining_time": "3:13:26"}
58
+ {"current_steps": 290, "total_steps": 777, "loss": 0.2818, "lr": 3.16609675212043e-05, "epoch": 2.6126126126126126, "percentage": 37.32, "elapsed_time": "1:53:52", "remaining_time": "3:11:14"}
59
+ {"current_steps": 295, "total_steps": 777, "loss": 0.2959, "lr": 3.1292911320863104e-05, "epoch": 2.6576576576576576, "percentage": 37.97, "elapsed_time": "1:55:43", "remaining_time": "3:09:05"}
60
+ {"current_steps": 300, "total_steps": 777, "loss": 0.2936, "lr": 3.091915251875928e-05, "epoch": 2.7027027027027026, "percentage": 38.61, "elapsed_time": "1:57:47", "remaining_time": "3:07:16"}
61
+ {"current_steps": 305, "total_steps": 777, "loss": 0.2841, "lr": 3.053987985254806e-05, "epoch": 2.7477477477477477, "percentage": 39.25, "elapsed_time": "1:59:48", "remaining_time": "3:05:24"}
62
+ {"current_steps": 310, "total_steps": 777, "loss": 0.2828, "lr": 3.015528484423059e-05, "epoch": 2.7927927927927927, "percentage": 39.9, "elapsed_time": "2:01:42", "remaining_time": "3:03:21"}
63
+ {"current_steps": 315, "total_steps": 777, "loss": 0.293, "lr": 2.9765561703440688e-05, "epoch": 2.8378378378378377, "percentage": 40.54, "elapsed_time": "2:03:45", "remaining_time": "3:01:31"}
64
+ {"current_steps": 320, "total_steps": 777, "loss": 0.2753, "lr": 2.937090722937446e-05, "epoch": 2.8828828828828827, "percentage": 41.18, "elapsed_time": "2:05:42", "remaining_time": "2:59:31"}
65
+ {"current_steps": 325, "total_steps": 777, "loss": 0.2873, "lr": 2.897152071141225e-05, "epoch": 2.9279279279279278, "percentage": 41.83, "elapsed_time": "2:07:33", "remaining_time": "2:57:24"}
66
+ {"current_steps": 330, "total_steps": 777, "loss": 0.2975, "lr": 2.8567603828483125e-05, "epoch": 2.972972972972973, "percentage": 42.47, "elapsed_time": "2:09:38", "remaining_time": "2:55:35"}
67
+ {"current_steps": 335, "total_steps": 777, "loss": 0.2851, "lr": 2.8159360547222716e-05, "epoch": 3.018018018018018, "percentage": 43.11, "elapsed_time": "2:11:34", "remaining_time": "2:53:36"}
68
+ {"current_steps": 340, "total_steps": 777, "loss": 0.2825, "lr": 2.7746997018975804e-05, "epoch": 3.063063063063063, "percentage": 43.76, "elapsed_time": "2:13:40", "remaining_time": "2:51:48"}
69
+ {"current_steps": 345, "total_steps": 777, "loss": 0.2749, "lr": 2.733072147569572e-05, "epoch": 3.108108108108108, "percentage": 44.4, "elapsed_time": "2:15:35", "remaining_time": "2:49:47"}
70
+ {"current_steps": 350, "total_steps": 777, "loss": 0.2788, "lr": 2.6910744124793046e-05, "epoch": 3.153153153153153, "percentage": 45.05, "elapsed_time": "2:17:30", "remaining_time": "2:47:45"}
71
+ {"current_steps": 355, "total_steps": 777, "loss": 0.2794, "lr": 2.648727704298685e-05, "epoch": 3.1981981981981984, "percentage": 45.69, "elapsed_time": "2:19:27", "remaining_time": "2:45:46"}
72
+ {"current_steps": 360, "total_steps": 777, "loss": 0.2876, "lr": 2.6060534069211877e-05, "epoch": 3.2432432432432434, "percentage": 46.33, "elapsed_time": "2:21:24", "remaining_time": "2:43:47"}
73
+ {"current_steps": 365, "total_steps": 777, "loss": 0.2773, "lr": 2.5630730696635953e-05, "epoch": 3.2882882882882885, "percentage": 46.98, "elapsed_time": "2:23:18", "remaining_time": "2:41:45"}
74
+ {"current_steps": 370, "total_steps": 777, "loss": 0.2824, "lr": 2.5198083963841988e-05, "epoch": 3.3333333333333335, "percentage": 47.62, "elapsed_time": "2:25:15", "remaining_time": "2:39:47"}
75
+ {"current_steps": 375, "total_steps": 777, "loss": 0.2846, "lr": 2.4762812345229622e-05, "epoch": 3.3783783783783785, "percentage": 48.26, "elapsed_time": "2:27:10", "remaining_time": "2:37:46"}
76
+ {"current_steps": 380, "total_steps": 777, "loss": 0.2821, "lr": 2.4325135640691823e-05, "epoch": 3.4234234234234235, "percentage": 48.91, "elapsed_time": "2:29:06", "remaining_time": "2:35:46"}
77
+ {"current_steps": 385, "total_steps": 777, "loss": 0.2693, "lr": 2.388527486462212e-05, "epoch": 3.4684684684684686, "percentage": 49.55, "elapsed_time": "2:31:00", "remaining_time": "2:33:45"}
78
+ {"current_steps": 390, "total_steps": 777, "loss": 0.2775, "lr": 2.3443452134308565e-05, "epoch": 3.5135135135135136, "percentage": 50.19, "elapsed_time": "2:32:59", "remaining_time": "2:31:49"}
79
+ {"current_steps": 395, "total_steps": 777, "loss": 0.276, "lr": 2.299989055777079e-05, "epoch": 3.5585585585585586, "percentage": 50.84, "elapsed_time": "2:34:56", "remaining_time": "2:29:50"}
80
+ {"current_steps": 400, "total_steps": 777, "loss": 0.2924, "lr": 2.2554814121096748e-05, "epoch": 3.6036036036036037, "percentage": 51.48, "elapsed_time": "2:36:50", "remaining_time": "2:27:49"}
81
+ {"current_steps": 405, "total_steps": 777, "loss": 0.2783, "lr": 2.2108447575336015e-05, "epoch": 3.6486486486486487, "percentage": 52.12, "elapsed_time": "2:38:41", "remaining_time": "2:25:46"}
82
+ {"current_steps": 410, "total_steps": 777, "loss": 0.2841, "lr": 2.16610163230069e-05, "epoch": 3.6936936936936937, "percentage": 52.77, "elapsed_time": "2:40:38", "remaining_time": "2:23:47"}
83
+ {"current_steps": 415, "total_steps": 777, "loss": 0.2709, "lr": 2.1212746304274482e-05, "epoch": 3.7387387387387387, "percentage": 53.41, "elapsed_time": "2:42:38", "remaining_time": "2:21:52"}
84
+ {"current_steps": 420, "total_steps": 777, "loss": 0.2802, "lr": 2.0763863882857242e-05, "epoch": 3.7837837837837838, "percentage": 54.05, "elapsed_time": "2:44:37", "remaining_time": "2:19:55"}
85
+ {"current_steps": 425, "total_steps": 777, "loss": 0.2764, "lr": 2.031459573171973e-05, "epoch": 3.828828828828829, "percentage": 54.7, "elapsed_time": "2:46:33", "remaining_time": "2:17:56"}
86
+ {"current_steps": 430, "total_steps": 777, "loss": 0.2753, "lr": 1.9865168718609142e-05, "epoch": 3.873873873873874, "percentage": 55.34, "elapsed_time": "2:48:30", "remaining_time": "2:15:58"}
87
+ {"current_steps": 435, "total_steps": 777, "loss": 0.2599, "lr": 1.9415809791493484e-05, "epoch": 3.918918918918919, "percentage": 55.98, "elapsed_time": "2:50:25", "remaining_time": "2:13:59"}
88
+ {"current_steps": 440, "total_steps": 777, "loss": 0.29, "lr": 1.8966745863959256e-05, "epoch": 3.963963963963964, "percentage": 56.63, "elapsed_time": "2:52:10", "remaining_time": "2:11:51"}
89
+ {"current_steps": 445, "total_steps": 777, "loss": 0.2694, "lr": 1.851820370062648e-05, "epoch": 4.009009009009009, "percentage": 57.27, "elapsed_time": "2:54:05", "remaining_time": "2:09:53"}
90
+ {"current_steps": 450, "total_steps": 777, "loss": 0.2819, "lr": 1.8070409802638985e-05, "epoch": 4.054054054054054, "percentage": 57.92, "elapsed_time": "2:55:57", "remaining_time": "2:07:52"}
91
+ {"current_steps": 455, "total_steps": 777, "loss": 0.2679, "lr": 1.762359029328768e-05, "epoch": 4.099099099099099, "percentage": 58.56, "elapsed_time": "2:57:47", "remaining_time": "2:05:49"}
92
+ {"current_steps": 460, "total_steps": 777, "loss": 0.2655, "lr": 1.7177970803824714e-05, "epoch": 4.1441441441441444, "percentage": 59.2, "elapsed_time": "2:59:43", "remaining_time": "2:03:50"}
93
+ {"current_steps": 465, "total_steps": 777, "loss": 0.273, "lr": 1.6733776359526024e-05, "epoch": 4.1891891891891895, "percentage": 59.85, "elapsed_time": "3:01:34", "remaining_time": "2:01:49"}
94
+ {"current_steps": 470, "total_steps": 777, "loss": 0.2758, "lr": 1.6291231266059912e-05, "epoch": 4.2342342342342345, "percentage": 60.49, "elapsed_time": "3:03:34", "remaining_time": "1:59:54"}
95
+ {"current_steps": 475, "total_steps": 777, "loss": 0.2771, "lr": 1.585055899621904e-05, "epoch": 4.2792792792792795, "percentage": 61.13, "elapsed_time": "3:05:32", "remaining_time": "1:57:57"}
96
+ {"current_steps": 480, "total_steps": 777, "loss": 0.2736, "lr": 1.5411982077072925e-05, "epoch": 4.324324324324325, "percentage": 61.78, "elapsed_time": "3:07:30", "remaining_time": "1:56:01"}
97
+ {"current_steps": 485, "total_steps": 777, "loss": 0.2636, "lr": 1.497572197759811e-05, "epoch": 4.36936936936937, "percentage": 62.42, "elapsed_time": "3:09:32", "remaining_time": "1:54:06"}
98
+ {"current_steps": 490, "total_steps": 777, "loss": 0.2721, "lr": 1.4541998996842503e-05, "epoch": 4.414414414414415, "percentage": 63.06, "elapsed_time": "3:11:27", "remaining_time": "1:52:08"}
99
+ {"current_steps": 495, "total_steps": 777, "loss": 0.2669, "lr": 1.4111032152680621e-05, "epoch": 4.45945945945946, "percentage": 63.71, "elapsed_time": "3:13:24", "remaining_time": "1:50:10"}
100
+ {"current_steps": 500, "total_steps": 777, "loss": 0.2704, "lr": 1.3683039071215717e-05, "epoch": 4.504504504504505, "percentage": 64.35, "elapsed_time": "3:15:23", "remaining_time": "1:48:14"}
101
+ {"current_steps": 505, "total_steps": 777, "loss": 0.2746, "lr": 1.3258235876884735e-05, "epoch": 4.54954954954955, "percentage": 64.99, "elapsed_time": "3:17:25", "remaining_time": "1:46:20"}
102
+ {"current_steps": 510, "total_steps": 777, "loss": 0.2572, "lr": 1.283683708332159e-05, "epoch": 4.594594594594595, "percentage": 65.64, "elapsed_time": "3:19:12", "remaining_time": "1:44:17"}
103
+ {"current_steps": 515, "total_steps": 777, "loss": 0.2538, "lr": 1.2419055485033788e-05, "epoch": 4.63963963963964, "percentage": 66.28, "elapsed_time": "3:21:04", "remaining_time": "1:42:17"}
104
+ {"current_steps": 520, "total_steps": 777, "loss": 0.2671, "lr": 1.200510204994724e-05, "epoch": 4.684684684684685, "percentage": 66.92, "elapsed_time": "3:22:58", "remaining_time": "1:40:18"}
105
+ {"current_steps": 525, "total_steps": 777, "loss": 0.283, "lr": 1.1595185812873382e-05, "epoch": 4.72972972972973, "percentage": 67.57, "elapsed_time": "3:24:57", "remaining_time": "1:38:23"}
106
+ {"current_steps": 530, "total_steps": 777, "loss": 0.269, "lr": 1.118951376995251e-05, "epoch": 4.774774774774775, "percentage": 68.21, "elapsed_time": "3:26:57", "remaining_time": "1:36:27"}
107
+ {"current_steps": 535, "total_steps": 777, "loss": 0.2757, "lr": 1.0788290774126549e-05, "epoch": 4.81981981981982, "percentage": 68.85, "elapsed_time": "3:28:55", "remaining_time": "1:34:30"}
108
+ {"current_steps": 540, "total_steps": 777, "loss": 0.2697, "lr": 1.039171943169411e-05, "epoch": 4.864864864864865, "percentage": 69.5, "elapsed_time": "3:30:45", "remaining_time": "1:32:30"}
109
+ {"current_steps": 545, "total_steps": 777, "loss": 0.2582, "lr": 1.0000000000000006e-05, "epoch": 4.90990990990991, "percentage": 70.14, "elapsed_time": "3:32:39", "remaining_time": "1:30:31"}
110
+ {"current_steps": 550, "total_steps": 777, "loss": 0.2702, "lr": 9.613330286310952e-06, "epoch": 4.954954954954955, "percentage": 70.79, "elapsed_time": "3:34:35", "remaining_time": "1:28:34"}
111
+ {"current_steps": 555, "total_steps": 777, "loss": 0.2727, "lr": 9.23190554792847e-06, "epoch": 5.0, "percentage": 71.43, "elapsed_time": "3:36:33", "remaining_time": "1:26:37"}
112
+ {"current_steps": 560, "total_steps": 777, "loss": 0.2649, "lr": 8.855918393589462e-06, "epoch": 5.045045045045045, "percentage": 72.07, "elapsed_time": "3:38:24", "remaining_time": "1:24:37"}
113
+ {"current_steps": 565, "total_steps": 777, "loss": 0.2687, "lr": 8.485558686204215e-06, "epoch": 5.09009009009009, "percentage": 72.72, "elapsed_time": "3:40:13", "remaining_time": "1:22:37"}
114
+ {"current_steps": 570, "total_steps": 777, "loss": 0.2547, "lr": 8.121013446981004e-06, "epoch": 5.135135135135135, "percentage": 73.36, "elapsed_time": "3:42:16", "remaining_time": "1:20:43"}
115
+ {"current_steps": 575, "total_steps": 777, "loss": 0.2613, "lr": 7.762466760985651e-06, "epoch": 5.18018018018018, "percentage": 74.0, "elapsed_time": "3:44:06", "remaining_time": "1:18:43"}
116
+ {"current_steps": 580, "total_steps": 777, "loss": 0.2692, "lr": 7.410099684183738e-06, "epoch": 5.225225225225225, "percentage": 74.65, "elapsed_time": "3:46:09", "remaining_time": "1:16:49"}
117
+ {"current_steps": 585, "total_steps": 777, "loss": 0.2625, "lr": 7.064090152012488e-06, "epoch": 5.27027027027027, "percentage": 75.29, "elapsed_time": "3:48:05", "remaining_time": "1:14:51"}
118
+ {"current_steps": 590, "total_steps": 777, "loss": 0.2592, "lr": 6.72461288952835e-06, "epoch": 5.315315315315315, "percentage": 75.93, "elapsed_time": "3:50:02", "remaining_time": "1:12:54"}
119
+ {"current_steps": 595, "total_steps": 777, "loss": 0.263, "lr": 6.391839323175788e-06, "epoch": 5.36036036036036, "percentage": 76.58, "elapsed_time": "3:51:59", "remaining_time": "1:10:57"}
120
+ {"current_steps": 600, "total_steps": 777, "loss": 0.277, "lr": 6.065937494221763e-06, "epoch": 5.405405405405405, "percentage": 77.22, "elapsed_time": "3:53:56", "remaining_time": "1:09:00"}
121
+ {"current_steps": 605, "total_steps": 777, "loss": 0.2712, "lr": 5.747071973899634e-06, "epoch": 5.45045045045045, "percentage": 77.86, "elapsed_time": "3:55:59", "remaining_time": "1:07:05"}
122
+ {"current_steps": 610, "total_steps": 777, "loss": 0.2633, "lr": 5.4354037803053124e-06, "epoch": 5.495495495495495, "percentage": 78.51, "elapsed_time": "3:57:52", "remaining_time": "1:05:07"}
123
+ {"current_steps": 615, "total_steps": 777, "loss": 0.2467, "lr": 5.131090297087682e-06, "epoch": 5.54054054054054, "percentage": 79.15, "elapsed_time": "3:59:46", "remaining_time": "1:03:09"}
124
+ {"current_steps": 620, "total_steps": 777, "loss": 0.2828, "lr": 4.834285193974277e-06, "epoch": 5.585585585585585, "percentage": 79.79, "elapsed_time": "4:01:36", "remaining_time": "1:01:10"}
125
+ {"current_steps": 625, "total_steps": 777, "loss": 0.267, "lr": 4.545138349172418e-06, "epoch": 5.63063063063063, "percentage": 80.44, "elapsed_time": "4:03:22", "remaining_time": "0:59:11"}
126
+ {"current_steps": 630, "total_steps": 777, "loss": 0.2685, "lr": 4.263795773684929e-06, "epoch": 5.675675675675675, "percentage": 81.08, "elapsed_time": "4:05:18", "remaining_time": "0:57:14"}
127
+ {"current_steps": 635, "total_steps": 777, "loss": 0.2682, "lr": 3.9903995375787245e-06, "epoch": 5.7207207207207205, "percentage": 81.72, "elapsed_time": "4:07:10", "remaining_time": "0:55:16"}
128
+ {"current_steps": 640, "total_steps": 777, "loss": 0.2712, "lr": 3.7250876982433947e-06, "epoch": 5.7657657657657655, "percentage": 82.37, "elapsed_time": "4:09:06", "remaining_time": "0:53:19"}
129
+ {"current_steps": 645, "total_steps": 777, "loss": 0.2673, "lr": 3.4679942306761484e-06, "epoch": 5.8108108108108105, "percentage": 83.01, "elapsed_time": "4:10:58", "remaining_time": "0:51:21"}
130
+ {"current_steps": 650, "total_steps": 777, "loss": 0.2626, "lr": 3.219248959828196e-06, "epoch": 5.8558558558558556, "percentage": 83.66, "elapsed_time": "4:13:03", "remaining_time": "0:49:26"}
131
+ {"current_steps": 655, "total_steps": 777, "loss": 0.2614, "lr": 2.9789774950468265e-06, "epoch": 5.900900900900901, "percentage": 84.3, "elapsed_time": "4:14:49", "remaining_time": "0:47:27"}
132
+ {"current_steps": 660, "total_steps": 777, "loss": 0.2732, "lr": 2.747301166646221e-06, "epoch": 5.945945945945946, "percentage": 84.94, "elapsed_time": "4:16:48", "remaining_time": "0:45:31"}
133
+ {"current_steps": 665, "total_steps": 777, "loss": 0.2579, "lr": 2.524336964639067e-06, "epoch": 5.990990990990991, "percentage": 85.59, "elapsed_time": "4:18:42", "remaining_time": "0:43:34"}
134
+ {"current_steps": 670, "total_steps": 777, "loss": 0.2628, "lr": 2.3101974796599015e-06, "epoch": 6.036036036036036, "percentage": 86.23, "elapsed_time": "4:20:34", "remaining_time": "0:41:36"}
135
+ {"current_steps": 675, "total_steps": 777, "loss": 0.2705, "lr": 2.1049908461100086e-06, "epoch": 6.081081081081081, "percentage": 86.87, "elapsed_time": "4:22:36", "remaining_time": "0:39:40"}
136
+ {"current_steps": 680, "total_steps": 777, "loss": 0.2652, "lr": 1.9088206875526128e-06, "epoch": 6.126126126126126, "percentage": 87.52, "elapsed_time": "4:24:39", "remaining_time": "0:37:45"}
137
+ {"current_steps": 685, "total_steps": 777, "loss": 0.2655, "lr": 1.7217860643858797e-06, "epoch": 6.171171171171171, "percentage": 88.16, "elapsed_time": "4:26:35", "remaining_time": "0:35:48"}
138
+ {"current_steps": 690, "total_steps": 777, "loss": 0.2625, "lr": 1.5439814238202356e-06, "epoch": 6.216216216216216, "percentage": 88.8, "elapsed_time": "4:28:20", "remaining_time": "0:33:50"}
139
+ {"current_steps": 695, "total_steps": 777, "loss": 0.2657, "lr": 1.3754965521851582e-06, "epoch": 6.261261261261261, "percentage": 89.45, "elapsed_time": "4:30:13", "remaining_time": "0:31:52"}
140
+ {"current_steps": 700, "total_steps": 777, "loss": 0.2658, "lr": 1.2164165295896392e-06, "epoch": 6.306306306306306, "percentage": 90.09, "elapsed_time": "4:32:12", "remaining_time": "0:29:56"}
141
+ {"current_steps": 705, "total_steps": 777, "loss": 0.268, "lr": 1.0668216869591098e-06, "epoch": 6.351351351351352, "percentage": 90.73, "elapsed_time": "4:34:06", "remaining_time": "0:27:59"}
142
+ {"current_steps": 710, "total_steps": 777, "loss": 0.2607, "lr": 9.267875654706015e-07, "epoch": 6.396396396396397, "percentage": 91.38, "elapsed_time": "4:35:55", "remaining_time": "0:26:02"}
143
+ {"current_steps": 715, "total_steps": 777, "loss": 0.2523, "lr": 7.963848784065753e-07, "epoch": 6.441441441441442, "percentage": 92.02, "elapsed_time": "4:37:56", "remaining_time": "0:24:06"}
144
+ {"current_steps": 720, "total_steps": 777, "loss": 0.2642, "lr": 6.756794754467045e-07, "epoch": 6.486486486486487, "percentage": 92.66, "elapsed_time": "4:39:52", "remaining_time": "0:22:09"}
145
+ {"current_steps": 725, "total_steps": 777, "loss": 0.2579, "lr": 5.647323094156565e-07, "epoch": 6.531531531531532, "percentage": 93.31, "elapsed_time": "4:41:42", "remaining_time": "0:20:12"}
146
+ {"current_steps": 730, "total_steps": 777, "loss": 0.2618, "lr": 4.635994055036208e-07, "epoch": 6.576576576576577, "percentage": 93.95, "elapsed_time": "4:43:37", "remaining_time": "0:18:15"}
147
+ {"current_steps": 735, "total_steps": 777, "loss": 0.264, "lr": 3.723318329751746e-07, "epoch": 6.621621621621622, "percentage": 94.59, "elapsed_time": "4:45:30", "remaining_time": "0:16:18"}
148
+ {"current_steps": 740, "total_steps": 777, "loss": 0.2668, "lr": 2.9097567938074943e-07, "epoch": 6.666666666666667, "percentage": 95.24, "elapsed_time": "4:47:22", "remaining_time": "0:14:22"}
149
+ {"current_steps": 745, "total_steps": 777, "loss": 0.2649, "lr": 2.1957202728370542e-07, "epoch": 6.711711711711712, "percentage": 95.88, "elapsed_time": "4:49:13", "remaining_time": "0:12:25"}
150
+ {"current_steps": 750, "total_steps": 777, "loss": 0.2483, "lr": 1.5815693351480587e-07, "epoch": 6.756756756756757, "percentage": 96.53, "elapsed_time": "4:51:06", "remaining_time": "0:10:28"}
151
+ {"current_steps": 755, "total_steps": 777, "loss": 0.2714, "lr": 1.0676141096453097e-07, "epoch": 6.801801801801802, "percentage": 97.17, "elapsed_time": "4:52:56", "remaining_time": "0:08:32"}
152
+ {"current_steps": 760, "total_steps": 777, "loss": 0.269, "lr": 6.541141292243814e-08, "epoch": 6.846846846846847, "percentage": 97.81, "elapsed_time": "4:54:48", "remaining_time": "0:06:35"}
153
+ {"current_steps": 765, "total_steps": 777, "loss": 0.2628, "lr": 3.412781997148784e-08, "epoch": 6.891891891891892, "percentage": 98.46, "elapsed_time": "4:56:45", "remaining_time": "0:04:39"}
154
+ {"current_steps": 770, "total_steps": 777, "loss": 0.2532, "lr": 1.29264294439424e-08, "epoch": 6.936936936936937, "percentage": 99.1, "elapsed_time": "4:58:36", "remaining_time": "0:02:42"}
155
+ {"current_steps": 775, "total_steps": 777, "loss": 0.2576, "lr": 1.817947444149315e-09, "epoch": 6.981981981981982, "percentage": 99.74, "elapsed_time": "5:00:40", "remaining_time": "0:00:46"}
156
+ {"current_steps": 777, "total_steps": 777, "epoch": 7.0, "percentage": 100.0, "elapsed_time": "5:01:37", "remaining_time": "0:00:00"}
157
+ {"current_steps": 777, "total_steps": 777, "epoch": 7.0, "percentage": 100.0, "elapsed_time": "0:00:00", "remaining_time": "0:00:00"}
158
+ {"current_steps": 777, "total_steps": 777, "epoch": 7.0, "percentage": 100.0, "elapsed_time": "0:00:00", "remaining_time": "0:00:00"}
159
+ {"current_steps": 777, "total_steps": 777, "epoch": 7.0, "percentage": 100.0, "elapsed_time": "0:00:00", "remaining_time": "0:00:00"}
160
+ {"current_steps": 777, "total_steps": 777, "epoch": 7.0, "percentage": 100.0, "elapsed_time": "0:00:00", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,1748 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 7.0,
6
+ "eval_steps": 500,
7
+ "global_step": 777,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.04504504504504504,
14
+ "grad_norm": 12.213179482888265,
15
+ "learning_rate": 2.0512820512820513e-06,
16
+ "loss": 0.6951,
17
+ "loss_nan_ranks": 0,
18
+ "loss_rank_avg": 0.21766051650047302,
19
+ "step": 5,
20
+ "valid_targets_mean": 3509.5,
21
+ "valid_targets_min": 811
22
+ },
23
+ {
24
+ "epoch": 0.09009009009009009,
25
+ "grad_norm": 7.570539034885599,
26
+ "learning_rate": 4.615384615384616e-06,
27
+ "loss": 0.6634,
28
+ "loss_nan_ranks": 0,
29
+ "loss_rank_avg": 0.1856536865234375,
30
+ "step": 10,
31
+ "valid_targets_mean": 3624.1,
32
+ "valid_targets_min": 292
33
+ },
34
+ {
35
+ "epoch": 0.13513513513513514,
36
+ "grad_norm": 2.6810961238620084,
37
+ "learning_rate": 7.17948717948718e-06,
38
+ "loss": 0.5718,
39
+ "loss_nan_ranks": 0,
40
+ "loss_rank_avg": 0.1723824441432953,
41
+ "step": 15,
42
+ "valid_targets_mean": 3464.6,
43
+ "valid_targets_min": 483
44
+ },
45
+ {
46
+ "epoch": 0.18018018018018017,
47
+ "grad_norm": 1.2227064393103297,
48
+ "learning_rate": 9.743589743589744e-06,
49
+ "loss": 0.5115,
50
+ "loss_nan_ranks": 0,
51
+ "loss_rank_avg": 0.17001838982105255,
52
+ "step": 20,
53
+ "valid_targets_mean": 3984.6,
54
+ "valid_targets_min": 488
55
+ },
56
+ {
57
+ "epoch": 0.22522522522522523,
58
+ "grad_norm": 0.8898938226580776,
59
+ "learning_rate": 1.230769230769231e-05,
60
+ "loss": 0.4871,
61
+ "loss_nan_ranks": 0,
62
+ "loss_rank_avg": 0.17259931564331055,
63
+ "step": 25,
64
+ "valid_targets_mean": 4314.8,
65
+ "valid_targets_min": 456
66
+ },
67
+ {
68
+ "epoch": 0.2702702702702703,
69
+ "grad_norm": 0.6432303488130432,
70
+ "learning_rate": 1.4871794871794874e-05,
71
+ "loss": 0.4791,
72
+ "loss_nan_ranks": 0,
73
+ "loss_rank_avg": 0.2156098484992981,
74
+ "step": 30,
75
+ "valid_targets_mean": 5139.6,
76
+ "valid_targets_min": 285
77
+ },
78
+ {
79
+ "epoch": 0.3153153153153153,
80
+ "grad_norm": 0.420063730489216,
81
+ "learning_rate": 1.7435897435897438e-05,
82
+ "loss": 0.4542,
83
+ "loss_nan_ranks": 0,
84
+ "loss_rank_avg": 0.14423362910747528,
85
+ "step": 35,
86
+ "valid_targets_mean": 4965.8,
87
+ "valid_targets_min": 610
88
+ },
89
+ {
90
+ "epoch": 0.36036036036036034,
91
+ "grad_norm": 0.4108255376992912,
92
+ "learning_rate": 2e-05,
93
+ "loss": 0.4362,
94
+ "loss_nan_ranks": 0,
95
+ "loss_rank_avg": 0.1998554766178131,
96
+ "step": 40,
97
+ "valid_targets_mean": 5446.5,
98
+ "valid_targets_min": 626
99
+ },
100
+ {
101
+ "epoch": 0.40540540540540543,
102
+ "grad_norm": 0.3763657695313296,
103
+ "learning_rate": 2.2564102564102566e-05,
104
+ "loss": 0.4108,
105
+ "loss_nan_ranks": 0,
106
+ "loss_rank_avg": 0.1512545347213745,
107
+ "step": 45,
108
+ "valid_targets_mean": 4206.4,
109
+ "valid_targets_min": 484
110
+ },
111
+ {
112
+ "epoch": 0.45045045045045046,
113
+ "grad_norm": 0.3362402566929759,
114
+ "learning_rate": 2.512820512820513e-05,
115
+ "loss": 0.4218,
116
+ "loss_nan_ranks": 0,
117
+ "loss_rank_avg": 0.13444261252880096,
118
+ "step": 50,
119
+ "valid_targets_mean": 3656.3,
120
+ "valid_targets_min": 398
121
+ },
122
+ {
123
+ "epoch": 0.4954954954954955,
124
+ "grad_norm": 0.29451984543253784,
125
+ "learning_rate": 2.7692307692307694e-05,
126
+ "loss": 0.4003,
127
+ "loss_nan_ranks": 0,
128
+ "loss_rank_avg": 0.11262520402669907,
129
+ "step": 55,
130
+ "valid_targets_mean": 4485.2,
131
+ "valid_targets_min": 606
132
+ },
133
+ {
134
+ "epoch": 0.5405405405405406,
135
+ "grad_norm": 0.29803110540835986,
136
+ "learning_rate": 3.0256410256410257e-05,
137
+ "loss": 0.3955,
138
+ "loss_nan_ranks": 0,
139
+ "loss_rank_avg": 0.14588972926139832,
140
+ "step": 60,
141
+ "valid_targets_mean": 4701.9,
142
+ "valid_targets_min": 470
143
+ },
144
+ {
145
+ "epoch": 0.5855855855855856,
146
+ "grad_norm": 0.8391493968662852,
147
+ "learning_rate": 3.282051282051282e-05,
148
+ "loss": 0.3757,
149
+ "loss_nan_ranks": 0,
150
+ "loss_rank_avg": 0.09588639438152313,
151
+ "step": 65,
152
+ "valid_targets_mean": 4384.8,
153
+ "valid_targets_min": 664
154
+ },
155
+ {
156
+ "epoch": 0.6306306306306306,
157
+ "grad_norm": 0.2886198856062646,
158
+ "learning_rate": 3.538461538461539e-05,
159
+ "loss": 0.3825,
160
+ "loss_nan_ranks": 0,
161
+ "loss_rank_avg": 0.11931118369102478,
162
+ "step": 70,
163
+ "valid_targets_mean": 3792.6,
164
+ "valid_targets_min": 529
165
+ },
166
+ {
167
+ "epoch": 0.6756756756756757,
168
+ "grad_norm": 0.26848114245238813,
169
+ "learning_rate": 3.794871794871795e-05,
170
+ "loss": 0.3694,
171
+ "loss_nan_ranks": 0,
172
+ "loss_rank_avg": 0.11248503625392914,
173
+ "step": 75,
174
+ "valid_targets_mean": 4532.7,
175
+ "valid_targets_min": 536
176
+ },
177
+ {
178
+ "epoch": 0.7207207207207207,
179
+ "grad_norm": 0.2710447890224146,
180
+ "learning_rate": 3.999979800311937e-05,
181
+ "loss": 0.356,
182
+ "loss_nan_ranks": 0,
183
+ "loss_rank_avg": 0.10272569209337234,
184
+ "step": 80,
185
+ "valid_targets_mean": 3715.0,
186
+ "valid_targets_min": 404
187
+ },
188
+ {
189
+ "epoch": 0.7657657657657657,
190
+ "grad_norm": 0.26845203304003346,
191
+ "learning_rate": 3.999272854071669e-05,
192
+ "loss": 0.3409,
193
+ "loss_nan_ranks": 0,
194
+ "loss_rank_avg": 0.08947348594665527,
195
+ "step": 85,
196
+ "valid_targets_mean": 4057.3,
197
+ "valid_targets_min": 562
198
+ },
199
+ {
200
+ "epoch": 0.8108108108108109,
201
+ "grad_norm": 0.28500381948167564,
202
+ "learning_rate": 3.99755633141858e-05,
203
+ "loss": 0.347,
204
+ "loss_nan_ranks": 0,
205
+ "loss_rank_avg": 0.11878690123558044,
206
+ "step": 90,
207
+ "valid_targets_mean": 3781.4,
208
+ "valid_targets_min": 513
209
+ },
210
+ {
211
+ "epoch": 0.8558558558558559,
212
+ "grad_norm": 0.2897579470253114,
213
+ "learning_rate": 3.994831099148205e-05,
214
+ "loss": 0.3339,
215
+ "loss_nan_ranks": 0,
216
+ "loss_rank_avg": 0.09589523077011108,
217
+ "step": 95,
218
+ "valid_targets_mean": 3537.8,
219
+ "valid_targets_min": 410
220
+ },
221
+ {
222
+ "epoch": 0.9009009009009009,
223
+ "grad_norm": 0.37166782552169725,
224
+ "learning_rate": 3.991098533425988e-05,
225
+ "loss": 0.3304,
226
+ "loss_nan_ranks": 0,
227
+ "loss_rank_avg": 0.12742844223976135,
228
+ "step": 100,
229
+ "valid_targets_mean": 4120.6,
230
+ "valid_targets_min": 625
231
+ },
232
+ {
233
+ "epoch": 0.9459459459459459,
234
+ "grad_norm": 0.2721758202080061,
235
+ "learning_rate": 3.9863605190923655e-05,
236
+ "loss": 0.3353,
237
+ "loss_nan_ranks": 0,
238
+ "loss_rank_avg": 0.1229485422372818,
239
+ "step": 105,
240
+ "valid_targets_mean": 4571.3,
241
+ "valid_targets_min": 536
242
+ },
243
+ {
244
+ "epoch": 0.990990990990991,
245
+ "grad_norm": 0.2716104773435794,
246
+ "learning_rate": 3.9806194487109636e-05,
247
+ "loss": 0.3293,
248
+ "loss_nan_ranks": 0,
249
+ "loss_rank_avg": 0.1011948436498642,
250
+ "step": 110,
251
+ "valid_targets_mean": 3915.6,
252
+ "valid_targets_min": 536
253
+ },
254
+ {
255
+ "epoch": 1.0360360360360361,
256
+ "grad_norm": 0.4113397770099122,
257
+ "learning_rate": 3.9738782213604305e-05,
258
+ "loss": 0.3461,
259
+ "loss_nan_ranks": 0,
260
+ "loss_rank_avg": 0.12486414611339569,
261
+ "step": 115,
262
+ "valid_targets_mean": 4712.1,
263
+ "valid_targets_min": 663
264
+ },
265
+ {
266
+ "epoch": 1.0810810810810811,
267
+ "grad_norm": 0.2775525865510845,
268
+ "learning_rate": 3.9661402411704794e-05,
269
+ "loss": 0.3399,
270
+ "loss_nan_ranks": 0,
271
+ "loss_rank_avg": 0.09220485389232635,
272
+ "step": 120,
273
+ "valid_targets_mean": 3749.4,
274
+ "valid_targets_min": 381
275
+ },
276
+ {
277
+ "epoch": 1.1261261261261262,
278
+ "grad_norm": 0.26661128827223074,
279
+ "learning_rate": 3.957409415602899e-05,
280
+ "loss": 0.3244,
281
+ "loss_nan_ranks": 0,
282
+ "loss_rank_avg": 0.11258970201015472,
283
+ "step": 125,
284
+ "valid_targets_mean": 4166.4,
285
+ "valid_targets_min": 395
286
+ },
287
+ {
288
+ "epoch": 1.1711711711711712,
289
+ "grad_norm": 0.27815633052261163,
290
+ "learning_rate": 3.947690153478396e-05,
291
+ "loss": 0.3128,
292
+ "loss_nan_ranks": 0,
293
+ "loss_rank_avg": 0.1220335066318512,
294
+ "step": 130,
295
+ "valid_targets_mean": 4006.0,
296
+ "valid_targets_min": 525
297
+ },
298
+ {
299
+ "epoch": 1.2162162162162162,
300
+ "grad_norm": 0.2758450156551077,
301
+ "learning_rate": 3.936987362750266e-05,
302
+ "loss": 0.3319,
303
+ "loss_nan_ranks": 0,
304
+ "loss_rank_avg": 0.10510491579771042,
305
+ "step": 135,
306
+ "valid_targets_mean": 4167.9,
307
+ "valid_targets_min": 576
308
+ },
309
+ {
310
+ "epoch": 1.2612612612612613,
311
+ "grad_norm": 0.251293693452417,
312
+ "learning_rate": 3.925306448026011e-05,
313
+ "loss": 0.3272,
314
+ "loss_nan_ranks": 0,
315
+ "loss_rank_avg": 0.0853872001171112,
316
+ "step": 140,
317
+ "valid_targets_mean": 2816.8,
318
+ "valid_targets_min": 627
319
+ },
320
+ {
321
+ "epoch": 1.3063063063063063,
322
+ "grad_norm": 0.25769295208742854,
323
+ "learning_rate": 3.912653307838173e-05,
324
+ "loss": 0.3191,
325
+ "loss_nan_ranks": 0,
326
+ "loss_rank_avg": 0.11158967018127441,
327
+ "step": 145,
328
+ "valid_targets_mean": 4418.5,
329
+ "valid_targets_min": 688
330
+ },
331
+ {
332
+ "epoch": 1.3513513513513513,
333
+ "grad_norm": 0.28387795710075575,
334
+ "learning_rate": 3.899034331665733e-05,
335
+ "loss": 0.3132,
336
+ "loss_nan_ranks": 0,
337
+ "loss_rank_avg": 0.13492226600646973,
338
+ "step": 150,
339
+ "valid_targets_mean": 4884.4,
340
+ "valid_targets_min": 273
341
+ },
342
+ {
343
+ "epoch": 1.3963963963963963,
344
+ "grad_norm": 0.25474866914888333,
345
+ "learning_rate": 3.884456396707611e-05,
346
+ "loss": 0.3172,
347
+ "loss_nan_ranks": 0,
348
+ "loss_rank_avg": 0.1127757877111435,
349
+ "step": 155,
350
+ "valid_targets_mean": 5368.9,
351
+ "valid_targets_min": 563
352
+ },
353
+ {
354
+ "epoch": 1.4414414414414414,
355
+ "grad_norm": 0.2801065026702884,
356
+ "learning_rate": 3.8689268644098715e-05,
357
+ "loss": 0.3205,
358
+ "loss_nan_ranks": 0,
359
+ "loss_rank_avg": 0.11982893943786621,
360
+ "step": 160,
361
+ "valid_targets_mean": 4552.2,
362
+ "valid_targets_min": 487
363
+ },
364
+ {
365
+ "epoch": 1.4864864864864864,
366
+ "grad_norm": 0.26487929604501453,
367
+ "learning_rate": 3.852453576748397e-05,
368
+ "loss": 0.3006,
369
+ "loss_nan_ranks": 0,
370
+ "loss_rank_avg": 0.09722155332565308,
371
+ "step": 165,
372
+ "valid_targets_mean": 4420.7,
373
+ "valid_targets_min": 489
374
+ },
375
+ {
376
+ "epoch": 1.5315315315315314,
377
+ "grad_norm": 0.2755186974342944,
378
+ "learning_rate": 3.835044852268921e-05,
379
+ "loss": 0.3053,
380
+ "loss_nan_ranks": 0,
381
+ "loss_rank_avg": 0.09628415107727051,
382
+ "step": 170,
383
+ "valid_targets_mean": 3981.4,
384
+ "valid_targets_min": 521
385
+ },
386
+ {
387
+ "epoch": 1.5765765765765765,
388
+ "grad_norm": 0.2779173348107798,
389
+ "learning_rate": 3.816709481886386e-05,
390
+ "loss": 0.3197,
391
+ "loss_nan_ranks": 0,
392
+ "loss_rank_avg": 0.10215558111667633,
393
+ "step": 175,
394
+ "valid_targets_mean": 4846.7,
395
+ "valid_targets_min": 957
396
+ },
397
+ {
398
+ "epoch": 1.6216216216216215,
399
+ "grad_norm": 0.27126966265543495,
400
+ "learning_rate": 3.7974567244457886e-05,
401
+ "loss": 0.3112,
402
+ "loss_nan_ranks": 0,
403
+ "loss_rank_avg": 0.1354570835828781,
404
+ "step": 180,
405
+ "valid_targets_mean": 4403.5,
406
+ "valid_targets_min": 648
407
+ },
408
+ {
409
+ "epoch": 1.6666666666666665,
410
+ "grad_norm": 0.2880963639114082,
411
+ "learning_rate": 3.777296302046719e-05,
412
+ "loss": 0.3096,
413
+ "loss_nan_ranks": 0,
414
+ "loss_rank_avg": 0.08432003855705261,
415
+ "step": 185,
416
+ "valid_targets_mean": 3485.8,
417
+ "valid_targets_min": 367
418
+ },
419
+ {
420
+ "epoch": 1.7117117117117115,
421
+ "grad_norm": 0.2706373394484155,
422
+ "learning_rate": 3.756238395133972e-05,
423
+ "loss": 0.302,
424
+ "loss_nan_ranks": 0,
425
+ "loss_rank_avg": 0.11862757056951523,
426
+ "step": 190,
427
+ "valid_targets_mean": 4555.7,
428
+ "valid_targets_min": 693
429
+ },
430
+ {
431
+ "epoch": 1.7567567567567568,
432
+ "grad_norm": 0.3075395709355726,
433
+ "learning_rate": 3.734293637356719e-05,
434
+ "loss": 0.3059,
435
+ "loss_nan_ranks": 0,
436
+ "loss_rank_avg": 0.12262945622205734,
437
+ "step": 195,
438
+ "valid_targets_mean": 4144.0,
439
+ "valid_targets_min": 434
440
+ },
441
+ {
442
+ "epoch": 1.8018018018018018,
443
+ "grad_norm": 0.25683284776009463,
444
+ "learning_rate": 3.711473110198805e-05,
445
+ "loss": 0.3168,
446
+ "loss_nan_ranks": 0,
447
+ "loss_rank_avg": 0.13503742218017578,
448
+ "step": 200,
449
+ "valid_targets_mean": 4794.2,
450
+ "valid_targets_min": 418
451
+ },
452
+ {
453
+ "epoch": 1.8468468468468469,
454
+ "grad_norm": 0.2652406976146456,
455
+ "learning_rate": 3.687788337382918e-05,
456
+ "loss": 0.2959,
457
+ "loss_nan_ranks": 0,
458
+ "loss_rank_avg": 0.06610264629125595,
459
+ "step": 205,
460
+ "valid_targets_mean": 3298.5,
461
+ "valid_targets_min": 534
462
+ },
463
+ {
464
+ "epoch": 1.8918918918918919,
465
+ "grad_norm": 0.2913009925285956,
466
+ "learning_rate": 3.663251279051431e-05,
467
+ "loss": 0.2993,
468
+ "loss_nan_ranks": 0,
469
+ "loss_rank_avg": 0.09436690807342529,
470
+ "step": 210,
471
+ "valid_targets_mean": 3334.3,
472
+ "valid_targets_min": 367
473
+ },
474
+ {
475
+ "epoch": 1.936936936936937,
476
+ "grad_norm": 0.31970742341653646,
477
+ "learning_rate": 3.6378743257268696e-05,
478
+ "loss": 0.2999,
479
+ "loss_nan_ranks": 0,
480
+ "loss_rank_avg": 0.07659884542226791,
481
+ "step": 215,
482
+ "valid_targets_mean": 3069.9,
483
+ "valid_targets_min": 432
484
+ },
485
+ {
486
+ "epoch": 1.981981981981982,
487
+ "grad_norm": 0.2562136000194939,
488
+ "learning_rate": 3.6116702920550445e-05,
489
+ "loss": 0.3047,
490
+ "loss_nan_ranks": 0,
491
+ "loss_rank_avg": 0.10911617428064346,
492
+ "step": 220,
493
+ "valid_targets_mean": 4779.1,
494
+ "valid_targets_min": 561
495
+ },
496
+ {
497
+ "epoch": 2.027027027027027,
498
+ "grad_norm": 0.2724809389688923,
499
+ "learning_rate": 3.58465241033402e-05,
500
+ "loss": 0.2942,
501
+ "loss_nan_ranks": 0,
502
+ "loss_rank_avg": 0.09413851797580719,
503
+ "step": 225,
504
+ "valid_targets_mean": 5175.0,
505
+ "valid_targets_min": 611
506
+ },
507
+ {
508
+ "epoch": 2.0720720720720722,
509
+ "grad_norm": 0.37889132983663676,
510
+ "learning_rate": 3.556834323832174e-05,
511
+ "loss": 0.2942,
512
+ "loss_nan_ranks": 0,
513
+ "loss_rank_avg": 0.12257418036460876,
514
+ "step": 230,
515
+ "valid_targets_mean": 4611.3,
516
+ "valid_targets_min": 656
517
+ },
518
+ {
519
+ "epoch": 2.1171171171171173,
520
+ "grad_norm": 0.2788335973338977,
521
+ "learning_rate": 3.528230079898734e-05,
522
+ "loss": 0.2918,
523
+ "loss_nan_ranks": 0,
524
+ "loss_rank_avg": 0.11126174032688141,
525
+ "step": 235,
526
+ "valid_targets_mean": 4446.8,
527
+ "valid_targets_min": 563
528
+ },
529
+ {
530
+ "epoch": 2.1621621621621623,
531
+ "grad_norm": 0.2808879591424535,
532
+ "learning_rate": 3.498854122870263e-05,
533
+ "loss": 0.2903,
534
+ "loss_nan_ranks": 0,
535
+ "loss_rank_avg": 0.09562689065933228,
536
+ "step": 240,
537
+ "valid_targets_mean": 4357.7,
538
+ "valid_targets_min": 482
539
+ },
540
+ {
541
+ "epoch": 2.2072072072072073,
542
+ "grad_norm": 0.254732860948126,
543
+ "learning_rate": 3.4687212867766696e-05,
544
+ "loss": 0.2968,
545
+ "loss_nan_ranks": 0,
546
+ "loss_rank_avg": 0.10171202570199966,
547
+ "step": 245,
548
+ "valid_targets_mean": 4400.3,
549
+ "valid_targets_min": 583
550
+ },
551
+ {
552
+ "epoch": 2.2522522522522523,
553
+ "grad_norm": 0.25383907196480765,
554
+ "learning_rate": 3.437846787850454e-05,
555
+ "loss": 0.3055,
556
+ "loss_nan_ranks": 0,
557
+ "loss_rank_avg": 0.11306051164865494,
558
+ "step": 250,
559
+ "valid_targets_mean": 5253.9,
560
+ "valid_targets_min": 529
561
+ },
562
+ {
563
+ "epoch": 2.2972972972972974,
564
+ "grad_norm": 0.2651689069419142,
565
+ "learning_rate": 3.4062462168429267e-05,
566
+ "loss": 0.2857,
567
+ "loss_nan_ranks": 0,
568
+ "loss_rank_avg": 0.10121257603168488,
569
+ "step": 255,
570
+ "valid_targets_mean": 4426.2,
571
+ "valid_targets_min": 565
572
+ },
573
+ {
574
+ "epoch": 2.3423423423423424,
575
+ "grad_norm": 0.26957637460385697,
576
+ "learning_rate": 3.373935531151326e-05,
577
+ "loss": 0.2905,
578
+ "loss_nan_ranks": 0,
579
+ "loss_rank_avg": 0.10117916762828827,
580
+ "step": 260,
581
+ "valid_targets_mean": 3801.0,
582
+ "valid_targets_min": 452
583
+ },
584
+ {
585
+ "epoch": 2.3873873873873874,
586
+ "grad_norm": 0.2656592182447648,
587
+ "learning_rate": 3.3409310467607824e-05,
588
+ "loss": 0.2858,
589
+ "loss_nan_ranks": 0,
590
+ "loss_rank_avg": 0.08973188698291779,
591
+ "step": 265,
592
+ "valid_targets_mean": 4057.0,
593
+ "valid_targets_min": 576
594
+ },
595
+ {
596
+ "epoch": 2.4324324324324325,
597
+ "grad_norm": 0.2580157565140095,
598
+ "learning_rate": 3.307249430005203e-05,
599
+ "loss": 0.2982,
600
+ "loss_nan_ranks": 0,
601
+ "loss_rank_avg": 0.09819403290748596,
602
+ "step": 270,
603
+ "valid_targets_mean": 4610.7,
604
+ "valid_targets_min": 368
605
+ },
606
+ {
607
+ "epoch": 2.4774774774774775,
608
+ "grad_norm": 0.2755474257162277,
609
+ "learning_rate": 3.272907689151245e-05,
610
+ "loss": 0.3072,
611
+ "loss_nan_ranks": 0,
612
+ "loss_rank_avg": 0.10150294005870819,
613
+ "step": 275,
614
+ "valid_targets_mean": 4629.2,
615
+ "valid_targets_min": 323
616
+ },
617
+ {
618
+ "epoch": 2.5225225225225225,
619
+ "grad_norm": 0.27610850469305503,
620
+ "learning_rate": 3.237923165809619e-05,
621
+ "loss": 0.2958,
622
+ "loss_nan_ranks": 0,
623
+ "loss_rank_avg": 0.0950014516711235,
624
+ "step": 280,
625
+ "valid_targets_mean": 3554.4,
626
+ "valid_targets_min": 381
627
+ },
628
+ {
629
+ "epoch": 2.5675675675675675,
630
+ "grad_norm": 0.2611418490003288,
631
+ "learning_rate": 3.202313526178067e-05,
632
+ "loss": 0.2995,
633
+ "loss_nan_ranks": 0,
634
+ "loss_rank_avg": 0.1143759936094284,
635
+ "step": 285,
636
+ "valid_targets_mean": 4839.2,
637
+ "valid_targets_min": 446
638
+ },
639
+ {
640
+ "epoch": 2.6126126126126126,
641
+ "grad_norm": 0.2477097409751127,
642
+ "learning_rate": 3.16609675212043e-05,
643
+ "loss": 0.2818,
644
+ "loss_nan_ranks": 0,
645
+ "loss_rank_avg": 0.07015109807252884,
646
+ "step": 290,
647
+ "valid_targets_mean": 4588.3,
648
+ "valid_targets_min": 700
649
+ },
650
+ {
651
+ "epoch": 2.6576576576576576,
652
+ "grad_norm": 0.2974710482372013,
653
+ "learning_rate": 3.1292911320863104e-05,
654
+ "loss": 0.2959,
655
+ "loss_nan_ranks": 0,
656
+ "loss_rank_avg": 0.10549891740083694,
657
+ "step": 295,
658
+ "valid_targets_mean": 3873.7,
659
+ "valid_targets_min": 747
660
+ },
661
+ {
662
+ "epoch": 2.7027027027027026,
663
+ "grad_norm": 0.2385129443001723,
664
+ "learning_rate": 3.091915251875928e-05,
665
+ "loss": 0.2936,
666
+ "loss_nan_ranks": 0,
667
+ "loss_rank_avg": 0.08176982402801514,
668
+ "step": 300,
669
+ "valid_targets_mean": 4635.4,
670
+ "valid_targets_min": 640
671
+ },
672
+ {
673
+ "epoch": 2.7477477477477477,
674
+ "grad_norm": 0.2690939226544654,
675
+ "learning_rate": 3.053987985254806e-05,
676
+ "loss": 0.2841,
677
+ "loss_nan_ranks": 0,
678
+ "loss_rank_avg": 0.0824771448969841,
679
+ "step": 305,
680
+ "valid_targets_mean": 4034.1,
681
+ "valid_targets_min": 612
682
+ },
683
+ {
684
+ "epoch": 2.7927927927927927,
685
+ "grad_norm": 0.23489549980956795,
686
+ "learning_rate": 3.015528484423059e-05,
687
+ "loss": 0.2828,
688
+ "loss_nan_ranks": 0,
689
+ "loss_rank_avg": 0.07864581048488617,
690
+ "step": 310,
691
+ "valid_targets_mean": 3631.1,
692
+ "valid_targets_min": 612
693
+ },
694
+ {
695
+ "epoch": 2.8378378378378377,
696
+ "grad_norm": 0.25255671921419315,
697
+ "learning_rate": 2.9765561703440688e-05,
698
+ "loss": 0.293,
699
+ "loss_nan_ranks": 0,
700
+ "loss_rank_avg": 0.07940647006034851,
701
+ "step": 315,
702
+ "valid_targets_mean": 4242.5,
703
+ "valid_targets_min": 713
704
+ },
705
+ {
706
+ "epoch": 2.8828828828828827,
707
+ "grad_norm": 0.25230980048173096,
708
+ "learning_rate": 2.937090722937446e-05,
709
+ "loss": 0.2753,
710
+ "loss_nan_ranks": 0,
711
+ "loss_rank_avg": 0.10888753086328506,
712
+ "step": 320,
713
+ "valid_targets_mean": 4590.7,
714
+ "valid_targets_min": 547
715
+ },
716
+ {
717
+ "epoch": 2.9279279279279278,
718
+ "grad_norm": 0.24800878753672767,
719
+ "learning_rate": 2.897152071141225e-05,
720
+ "loss": 0.2873,
721
+ "loss_nan_ranks": 0,
722
+ "loss_rank_avg": 0.07101110368967056,
723
+ "step": 325,
724
+ "valid_targets_mean": 3616.2,
725
+ "valid_targets_min": 550
726
+ },
727
+ {
728
+ "epoch": 2.972972972972973,
729
+ "grad_norm": 0.2616860912144822,
730
+ "learning_rate": 2.8567603828483125e-05,
731
+ "loss": 0.2975,
732
+ "loss_nan_ranks": 0,
733
+ "loss_rank_avg": 0.133597731590271,
734
+ "step": 330,
735
+ "valid_targets_mean": 5871.1,
736
+ "valid_targets_min": 439
737
+ },
738
+ {
739
+ "epoch": 3.018018018018018,
740
+ "grad_norm": 0.25331466603093555,
741
+ "learning_rate": 2.8159360547222716e-05,
742
+ "loss": 0.2851,
743
+ "loss_nan_ranks": 0,
744
+ "loss_rank_avg": 0.07654231786727905,
745
+ "step": 335,
746
+ "valid_targets_mean": 4105.6,
747
+ "valid_targets_min": 515
748
+ },
749
+ {
750
+ "epoch": 3.063063063063063,
751
+ "grad_norm": 0.2657531594205501,
752
+ "learning_rate": 2.7746997018975804e-05,
753
+ "loss": 0.2825,
754
+ "loss_nan_ranks": 0,
755
+ "loss_rank_avg": 0.08719119429588318,
756
+ "step": 340,
757
+ "valid_targets_mean": 3864.1,
758
+ "valid_targets_min": 595
759
+ },
760
+ {
761
+ "epoch": 3.108108108108108,
762
+ "grad_norm": 0.6735205939178378,
763
+ "learning_rate": 2.733072147569572e-05,
764
+ "loss": 0.2749,
765
+ "loss_nan_ranks": 0,
766
+ "loss_rank_avg": 0.08523587137460709,
767
+ "step": 345,
768
+ "valid_targets_mean": 4730.9,
769
+ "valid_targets_min": 659
770
+ },
771
+ {
772
+ "epoch": 3.153153153153153,
773
+ "grad_norm": 0.2591942491892,
774
+ "learning_rate": 2.6910744124793046e-05,
775
+ "loss": 0.2788,
776
+ "loss_nan_ranks": 0,
777
+ "loss_rank_avg": 0.10307951271533966,
778
+ "step": 350,
779
+ "valid_targets_mean": 4142.2,
780
+ "valid_targets_min": 664
781
+ },
782
+ {
783
+ "epoch": 3.1981981981981984,
784
+ "grad_norm": 0.24596341545878944,
785
+ "learning_rate": 2.648727704298685e-05,
786
+ "loss": 0.2794,
787
+ "loss_nan_ranks": 0,
788
+ "loss_rank_avg": 0.08397462218999863,
789
+ "step": 355,
790
+ "valid_targets_mean": 4468.1,
791
+ "valid_targets_min": 815
792
+ },
793
+ {
794
+ "epoch": 3.2432432432432434,
795
+ "grad_norm": 0.28207232487891526,
796
+ "learning_rate": 2.6060534069211877e-05,
797
+ "loss": 0.2876,
798
+ "loss_nan_ranks": 0,
799
+ "loss_rank_avg": 0.11069733649492264,
800
+ "step": 360,
801
+ "valid_targets_mean": 4422.8,
802
+ "valid_targets_min": 703
803
+ },
804
+ {
805
+ "epoch": 3.2882882882882885,
806
+ "grad_norm": 0.2512548681824469,
807
+ "learning_rate": 2.5630730696635953e-05,
808
+ "loss": 0.2773,
809
+ "loss_nan_ranks": 0,
810
+ "loss_rank_avg": 0.09579600393772125,
811
+ "step": 365,
812
+ "valid_targets_mean": 4744.6,
813
+ "valid_targets_min": 580
814
+ },
815
+ {
816
+ "epoch": 3.3333333333333335,
817
+ "grad_norm": 0.26070640673282175,
818
+ "learning_rate": 2.5198083963841988e-05,
819
+ "loss": 0.2824,
820
+ "loss_nan_ranks": 0,
821
+ "loss_rank_avg": 0.09562422335147858,
822
+ "step": 370,
823
+ "valid_targets_mean": 4330.3,
824
+ "valid_targets_min": 503
825
+ },
826
+ {
827
+ "epoch": 3.3783783783783785,
828
+ "grad_norm": 0.2568330889976537,
829
+ "learning_rate": 2.4762812345229622e-05,
830
+ "loss": 0.2846,
831
+ "loss_nan_ranks": 0,
832
+ "loss_rank_avg": 0.07404516637325287,
833
+ "step": 375,
834
+ "valid_targets_mean": 3901.9,
835
+ "valid_targets_min": 724
836
+ },
837
+ {
838
+ "epoch": 3.4234234234234235,
839
+ "grad_norm": 0.25175456004756475,
840
+ "learning_rate": 2.4325135640691823e-05,
841
+ "loss": 0.2821,
842
+ "loss_nan_ranks": 0,
843
+ "loss_rank_avg": 0.10944640636444092,
844
+ "step": 380,
845
+ "valid_targets_mean": 5086.4,
846
+ "valid_targets_min": 583
847
+ },
848
+ {
849
+ "epoch": 3.4684684684684686,
850
+ "grad_norm": 0.26905322183252883,
851
+ "learning_rate": 2.388527486462212e-05,
852
+ "loss": 0.2693,
853
+ "loss_nan_ranks": 0,
854
+ "loss_rank_avg": 0.1297810822725296,
855
+ "step": 385,
856
+ "valid_targets_mean": 5236.8,
857
+ "valid_targets_min": 552
858
+ },
859
+ {
860
+ "epoch": 3.5135135135135136,
861
+ "grad_norm": 0.24924455050919309,
862
+ "learning_rate": 2.3443452134308565e-05,
863
+ "loss": 0.2775,
864
+ "loss_nan_ranks": 0,
865
+ "loss_rank_avg": 0.07704746723175049,
866
+ "step": 390,
867
+ "valid_targets_mean": 4304.3,
868
+ "valid_targets_min": 716
869
+ },
870
+ {
871
+ "epoch": 3.5585585585585586,
872
+ "grad_norm": 0.2800927179852231,
873
+ "learning_rate": 2.299989055777079e-05,
874
+ "loss": 0.276,
875
+ "loss_nan_ranks": 0,
876
+ "loss_rank_avg": 0.11317361146211624,
877
+ "step": 395,
878
+ "valid_targets_mean": 4685.0,
879
+ "valid_targets_min": 581
880
+ },
881
+ {
882
+ "epoch": 3.6036036036036037,
883
+ "grad_norm": 0.27986795372322626,
884
+ "learning_rate": 2.2554814121096748e-05,
885
+ "loss": 0.2924,
886
+ "loss_nan_ranks": 0,
887
+ "loss_rank_avg": 0.12253560870885849,
888
+ "step": 400,
889
+ "valid_targets_mean": 5609.9,
890
+ "valid_targets_min": 755
891
+ },
892
+ {
893
+ "epoch": 3.6486486486486487,
894
+ "grad_norm": 0.5289053951258326,
895
+ "learning_rate": 2.2108447575336015e-05,
896
+ "loss": 0.2783,
897
+ "loss_nan_ranks": 0,
898
+ "loss_rank_avg": 0.08305458724498749,
899
+ "step": 405,
900
+ "valid_targets_mean": 4169.9,
901
+ "valid_targets_min": 469
902
+ },
903
+ {
904
+ "epoch": 3.6936936936936937,
905
+ "grad_norm": 0.2849928276867334,
906
+ "learning_rate": 2.16610163230069e-05,
907
+ "loss": 0.2841,
908
+ "loss_nan_ranks": 0,
909
+ "loss_rank_avg": 0.08885318040847778,
910
+ "step": 410,
911
+ "valid_targets_mean": 3384.0,
912
+ "valid_targets_min": 624
913
+ },
914
+ {
915
+ "epoch": 3.7387387387387387,
916
+ "grad_norm": 0.2592566881525863,
917
+ "learning_rate": 2.1212746304274482e-05,
918
+ "loss": 0.2709,
919
+ "loss_nan_ranks": 0,
920
+ "loss_rank_avg": 0.10845595598220825,
921
+ "step": 415,
922
+ "valid_targets_mean": 4444.3,
923
+ "valid_targets_min": 443
924
+ },
925
+ {
926
+ "epoch": 3.7837837837837838,
927
+ "grad_norm": 0.25283053340082196,
928
+ "learning_rate": 2.0763863882857242e-05,
929
+ "loss": 0.2802,
930
+ "loss_nan_ranks": 0,
931
+ "loss_rank_avg": 0.08974164724349976,
932
+ "step": 420,
933
+ "valid_targets_mean": 4321.2,
934
+ "valid_targets_min": 750
935
+ },
936
+ {
937
+ "epoch": 3.828828828828829,
938
+ "grad_norm": 0.26724652513260666,
939
+ "learning_rate": 2.031459573171973e-05,
940
+ "loss": 0.2764,
941
+ "loss_nan_ranks": 0,
942
+ "loss_rank_avg": 0.10250722616910934,
943
+ "step": 425,
944
+ "valid_targets_mean": 4252.0,
945
+ "valid_targets_min": 437
946
+ },
947
+ {
948
+ "epoch": 3.873873873873874,
949
+ "grad_norm": 0.2741788640534842,
950
+ "learning_rate": 1.9865168718609142e-05,
951
+ "loss": 0.2753,
952
+ "loss_nan_ranks": 0,
953
+ "loss_rank_avg": 0.0931130200624466,
954
+ "step": 430,
955
+ "valid_targets_mean": 3780.3,
956
+ "valid_targets_min": 626
957
+ },
958
+ {
959
+ "epoch": 3.918918918918919,
960
+ "grad_norm": 0.26804478752024036,
961
+ "learning_rate": 1.9415809791493484e-05,
962
+ "loss": 0.2599,
963
+ "loss_nan_ranks": 0,
964
+ "loss_rank_avg": 0.09308881312608719,
965
+ "step": 435,
966
+ "valid_targets_mean": 4676.5,
967
+ "valid_targets_min": 504
968
+ },
969
+ {
970
+ "epoch": 3.963963963963964,
971
+ "grad_norm": 0.2641063176140694,
972
+ "learning_rate": 1.8966745863959256e-05,
973
+ "loss": 0.29,
974
+ "loss_nan_ranks": 0,
975
+ "loss_rank_avg": 0.08381877839565277,
976
+ "step": 440,
977
+ "valid_targets_mean": 4378.8,
978
+ "valid_targets_min": 464
979
+ },
980
+ {
981
+ "epoch": 4.009009009009009,
982
+ "grad_norm": 0.27400842575376944,
983
+ "learning_rate": 1.851820370062648e-05,
984
+ "loss": 0.2694,
985
+ "loss_nan_ranks": 0,
986
+ "loss_rank_avg": 0.11071999371051788,
987
+ "step": 445,
988
+ "valid_targets_mean": 4126.1,
989
+ "valid_targets_min": 424
990
+ },
991
+ {
992
+ "epoch": 4.054054054054054,
993
+ "grad_norm": 0.252436623647466,
994
+ "learning_rate": 1.8070409802638985e-05,
995
+ "loss": 0.2819,
996
+ "loss_nan_ranks": 0,
997
+ "loss_rank_avg": 0.08786597847938538,
998
+ "step": 450,
999
+ "valid_targets_mean": 3961.2,
1000
+ "valid_targets_min": 252
1001
+ },
1002
+ {
1003
+ "epoch": 4.099099099099099,
1004
+ "grad_norm": 0.24872749257249535,
1005
+ "learning_rate": 1.762359029328768e-05,
1006
+ "loss": 0.2679,
1007
+ "loss_nan_ranks": 0,
1008
+ "loss_rank_avg": 0.09103064984083176,
1009
+ "step": 455,
1010
+ "valid_targets_mean": 4177.6,
1011
+ "valid_targets_min": 666
1012
+ },
1013
+ {
1014
+ "epoch": 4.1441441441441444,
1015
+ "grad_norm": 0.27264806865130753,
1016
+ "learning_rate": 1.7177970803824714e-05,
1017
+ "loss": 0.2655,
1018
+ "loss_nan_ranks": 0,
1019
+ "loss_rank_avg": 0.14946812391281128,
1020
+ "step": 460,
1021
+ "valid_targets_mean": 5840.9,
1022
+ "valid_targets_min": 609
1023
+ },
1024
+ {
1025
+ "epoch": 4.1891891891891895,
1026
+ "grad_norm": 0.2577412462406186,
1027
+ "learning_rate": 1.6733776359526024e-05,
1028
+ "loss": 0.273,
1029
+ "loss_nan_ranks": 0,
1030
+ "loss_rank_avg": 0.08085731416940689,
1031
+ "step": 465,
1032
+ "valid_targets_mean": 3977.6,
1033
+ "valid_targets_min": 409
1034
+ },
1035
+ {
1036
+ "epoch": 4.2342342342342345,
1037
+ "grad_norm": 0.2792589372950184,
1038
+ "learning_rate": 1.6291231266059912e-05,
1039
+ "loss": 0.2758,
1040
+ "loss_nan_ranks": 0,
1041
+ "loss_rank_avg": 0.11503882706165314,
1042
+ "step": 470,
1043
+ "valid_targets_mean": 4669.9,
1044
+ "valid_targets_min": 508
1045
+ },
1046
+ {
1047
+ "epoch": 4.2792792792792795,
1048
+ "grad_norm": 0.32367217230222545,
1049
+ "learning_rate": 1.585055899621904e-05,
1050
+ "loss": 0.2771,
1051
+ "loss_nan_ranks": 0,
1052
+ "loss_rank_avg": 0.11892901360988617,
1053
+ "step": 475,
1054
+ "valid_targets_mean": 3773.2,
1055
+ "valid_targets_min": 474
1056
+ },
1057
+ {
1058
+ "epoch": 4.324324324324325,
1059
+ "grad_norm": 0.258336978621668,
1060
+ "learning_rate": 1.5411982077072925e-05,
1061
+ "loss": 0.2736,
1062
+ "loss_nan_ranks": 0,
1063
+ "loss_rank_avg": 0.07667964696884155,
1064
+ "step": 480,
1065
+ "valid_targets_mean": 3893.4,
1066
+ "valid_targets_min": 697
1067
+ },
1068
+ {
1069
+ "epoch": 4.36936936936937,
1070
+ "grad_norm": 0.24609619451384765,
1071
+ "learning_rate": 1.497572197759811e-05,
1072
+ "loss": 0.2636,
1073
+ "loss_nan_ranks": 0,
1074
+ "loss_rank_avg": 0.06371718645095825,
1075
+ "step": 485,
1076
+ "valid_targets_mean": 3523.3,
1077
+ "valid_targets_min": 452
1078
+ },
1079
+ {
1080
+ "epoch": 4.414414414414415,
1081
+ "grad_norm": 0.23835609022118848,
1082
+ "learning_rate": 1.4541998996842503e-05,
1083
+ "loss": 0.2721,
1084
+ "loss_nan_ranks": 0,
1085
+ "loss_rank_avg": 0.07387564331293106,
1086
+ "step": 490,
1087
+ "valid_targets_mean": 4369.7,
1088
+ "valid_targets_min": 847
1089
+ },
1090
+ {
1091
+ "epoch": 4.45945945945946,
1092
+ "grad_norm": 0.28159658575417745,
1093
+ "learning_rate": 1.4111032152680621e-05,
1094
+ "loss": 0.2669,
1095
+ "loss_nan_ranks": 0,
1096
+ "loss_rank_avg": 0.09198597073554993,
1097
+ "step": 495,
1098
+ "valid_targets_mean": 4141.6,
1099
+ "valid_targets_min": 374
1100
+ },
1101
+ {
1102
+ "epoch": 4.504504504504505,
1103
+ "grad_norm": 0.2433049555484407,
1104
+ "learning_rate": 1.3683039071215717e-05,
1105
+ "loss": 0.2704,
1106
+ "loss_nan_ranks": 0,
1107
+ "loss_rank_avg": 0.0909474790096283,
1108
+ "step": 500,
1109
+ "valid_targets_mean": 5381.2,
1110
+ "valid_targets_min": 605
1111
+ },
1112
+ {
1113
+ "epoch": 4.54954954954955,
1114
+ "grad_norm": 0.24437578427894577,
1115
+ "learning_rate": 1.3258235876884735e-05,
1116
+ "loss": 0.2746,
1117
+ "loss_nan_ranks": 0,
1118
+ "loss_rank_avg": 0.10796620696783066,
1119
+ "step": 505,
1120
+ "valid_targets_mean": 5468.9,
1121
+ "valid_targets_min": 414
1122
+ },
1123
+ {
1124
+ "epoch": 4.594594594594595,
1125
+ "grad_norm": 0.26846184374905985,
1126
+ "learning_rate": 1.283683708332159e-05,
1127
+ "loss": 0.2572,
1128
+ "loss_nan_ranks": 0,
1129
+ "loss_rank_avg": 0.09838269650936127,
1130
+ "step": 510,
1131
+ "valid_targets_mean": 4829.6,
1132
+ "valid_targets_min": 691
1133
+ },
1134
+ {
1135
+ "epoch": 4.63963963963964,
1136
+ "grad_norm": 0.252099525432998,
1137
+ "learning_rate": 1.2419055485033788e-05,
1138
+ "loss": 0.2538,
1139
+ "loss_nan_ranks": 0,
1140
+ "loss_rank_avg": 0.08270560204982758,
1141
+ "step": 515,
1142
+ "valid_targets_mean": 4539.7,
1143
+ "valid_targets_min": 758
1144
+ },
1145
+ {
1146
+ "epoch": 4.684684684684685,
1147
+ "grad_norm": 0.22995117395664008,
1148
+ "learning_rate": 1.200510204994724e-05,
1149
+ "loss": 0.2671,
1150
+ "loss_nan_ranks": 0,
1151
+ "loss_rank_avg": 0.07044124603271484,
1152
+ "step": 520,
1153
+ "valid_targets_mean": 4077.8,
1154
+ "valid_targets_min": 368
1155
+ },
1156
+ {
1157
+ "epoch": 4.72972972972973,
1158
+ "grad_norm": 0.25606569592040784,
1159
+ "learning_rate": 1.1595185812873382e-05,
1160
+ "loss": 0.283,
1161
+ "loss_nan_ranks": 0,
1162
+ "loss_rank_avg": 0.09904761612415314,
1163
+ "step": 525,
1164
+ "valid_targets_mean": 4964.8,
1165
+ "valid_targets_min": 534
1166
+ },
1167
+ {
1168
+ "epoch": 4.774774774774775,
1169
+ "grad_norm": 0.2421726915390166,
1170
+ "learning_rate": 1.118951376995251e-05,
1171
+ "loss": 0.269,
1172
+ "loss_nan_ranks": 0,
1173
+ "loss_rank_avg": 0.0894804447889328,
1174
+ "step": 530,
1175
+ "valid_targets_mean": 4745.5,
1176
+ "valid_targets_min": 347
1177
+ },
1178
+ {
1179
+ "epoch": 4.81981981981982,
1180
+ "grad_norm": 0.2408175858115054,
1181
+ "learning_rate": 1.0788290774126549e-05,
1182
+ "loss": 0.2757,
1183
+ "loss_nan_ranks": 0,
1184
+ "loss_rank_avg": 0.0949220210313797,
1185
+ "step": 535,
1186
+ "valid_targets_mean": 4688.9,
1187
+ "valid_targets_min": 558
1188
+ },
1189
+ {
1190
+ "epoch": 4.864864864864865,
1191
+ "grad_norm": 0.28683879615194635,
1192
+ "learning_rate": 1.039171943169411e-05,
1193
+ "loss": 0.2697,
1194
+ "loss_nan_ranks": 0,
1195
+ "loss_rank_avg": 0.08911705017089844,
1196
+ "step": 540,
1197
+ "valid_targets_mean": 4208.7,
1198
+ "valid_targets_min": 301
1199
+ },
1200
+ {
1201
+ "epoch": 4.90990990990991,
1202
+ "grad_norm": 0.23515882176666528,
1203
+ "learning_rate": 1.0000000000000006e-05,
1204
+ "loss": 0.2582,
1205
+ "loss_nan_ranks": 0,
1206
+ "loss_rank_avg": 0.06455663591623306,
1207
+ "step": 545,
1208
+ "valid_targets_mean": 4094.1,
1209
+ "valid_targets_min": 574
1210
+ },
1211
+ {
1212
+ "epoch": 4.954954954954955,
1213
+ "grad_norm": 0.25099139871892956,
1214
+ "learning_rate": 9.613330286310952e-06,
1215
+ "loss": 0.2702,
1216
+ "loss_nan_ranks": 0,
1217
+ "loss_rank_avg": 0.08976513147354126,
1218
+ "step": 550,
1219
+ "valid_targets_mean": 3831.1,
1220
+ "valid_targets_min": 484
1221
+ },
1222
+ {
1223
+ "epoch": 5.0,
1224
+ "grad_norm": 0.23080263557748837,
1225
+ "learning_rate": 9.23190554792847e-06,
1226
+ "loss": 0.2727,
1227
+ "loss_nan_ranks": 0,
1228
+ "loss_rank_avg": 0.0830158069729805,
1229
+ "step": 555,
1230
+ "valid_targets_mean": 5210.4,
1231
+ "valid_targets_min": 1005
1232
+ },
1233
+ {
1234
+ "epoch": 5.045045045045045,
1235
+ "grad_norm": 0.25916115934759043,
1236
+ "learning_rate": 8.855918393589462e-06,
1237
+ "loss": 0.2649,
1238
+ "loss_nan_ranks": 0,
1239
+ "loss_rank_avg": 0.08410818129777908,
1240
+ "step": 560,
1241
+ "valid_targets_mean": 4458.4,
1242
+ "valid_targets_min": 510
1243
+ },
1244
+ {
1245
+ "epoch": 5.09009009009009,
1246
+ "grad_norm": 0.2564850081349879,
1247
+ "learning_rate": 8.485558686204215e-06,
1248
+ "loss": 0.2687,
1249
+ "loss_nan_ranks": 0,
1250
+ "loss_rank_avg": 0.11359532922506332,
1251
+ "step": 565,
1252
+ "valid_targets_mean": 4616.7,
1253
+ "valid_targets_min": 627
1254
+ },
1255
+ {
1256
+ "epoch": 5.135135135135135,
1257
+ "grad_norm": 0.2510056064640416,
1258
+ "learning_rate": 8.121013446981004e-06,
1259
+ "loss": 0.2547,
1260
+ "loss_nan_ranks": 0,
1261
+ "loss_rank_avg": 0.08746031671762466,
1262
+ "step": 570,
1263
+ "valid_targets_mean": 4663.1,
1264
+ "valid_targets_min": 624
1265
+ },
1266
+ {
1267
+ "epoch": 5.18018018018018,
1268
+ "grad_norm": 0.2625475326627896,
1269
+ "learning_rate": 7.762466760985651e-06,
1270
+ "loss": 0.2613,
1271
+ "loss_nan_ranks": 0,
1272
+ "loss_rank_avg": 0.07597170025110245,
1273
+ "step": 575,
1274
+ "valid_targets_mean": 4726.4,
1275
+ "valid_targets_min": 460
1276
+ },
1277
+ {
1278
+ "epoch": 5.225225225225225,
1279
+ "grad_norm": 0.2734042593938881,
1280
+ "learning_rate": 7.410099684183738e-06,
1281
+ "loss": 0.2692,
1282
+ "loss_nan_ranks": 0,
1283
+ "loss_rank_avg": 0.09236225485801697,
1284
+ "step": 580,
1285
+ "valid_targets_mean": 4880.7,
1286
+ "valid_targets_min": 622
1287
+ },
1288
+ {
1289
+ "epoch": 5.27027027027027,
1290
+ "grad_norm": 0.2561238491798988,
1291
+ "learning_rate": 7.064090152012488e-06,
1292
+ "loss": 0.2625,
1293
+ "loss_nan_ranks": 0,
1294
+ "loss_rank_avg": 0.07333845645189285,
1295
+ "step": 585,
1296
+ "valid_targets_mean": 4421.5,
1297
+ "valid_targets_min": 464
1298
+ },
1299
+ {
1300
+ "epoch": 5.315315315315315,
1301
+ "grad_norm": 0.26430414248927997,
1302
+ "learning_rate": 6.72461288952835e-06,
1303
+ "loss": 0.2592,
1304
+ "loss_nan_ranks": 0,
1305
+ "loss_rank_avg": 0.11116979271173477,
1306
+ "step": 590,
1307
+ "valid_targets_mean": 4556.4,
1308
+ "valid_targets_min": 486
1309
+ },
1310
+ {
1311
+ "epoch": 5.36036036036036,
1312
+ "grad_norm": 0.23575468631305518,
1313
+ "learning_rate": 6.391839323175788e-06,
1314
+ "loss": 0.263,
1315
+ "loss_nan_ranks": 0,
1316
+ "loss_rank_avg": 0.09228559583425522,
1317
+ "step": 595,
1318
+ "valid_targets_mean": 5292.0,
1319
+ "valid_targets_min": 637
1320
+ },
1321
+ {
1322
+ "epoch": 5.405405405405405,
1323
+ "grad_norm": 0.24008357776737607,
1324
+ "learning_rate": 6.065937494221763e-06,
1325
+ "loss": 0.277,
1326
+ "loss_nan_ranks": 0,
1327
+ "loss_rank_avg": 0.07229343056678772,
1328
+ "step": 600,
1329
+ "valid_targets_mean": 3771.4,
1330
+ "valid_targets_min": 441
1331
+ },
1332
+ {
1333
+ "epoch": 5.45045045045045,
1334
+ "grad_norm": 0.2804930790244736,
1335
+ "learning_rate": 5.747071973899634e-06,
1336
+ "loss": 0.2712,
1337
+ "loss_nan_ranks": 0,
1338
+ "loss_rank_avg": 0.10028033703565598,
1339
+ "step": 605,
1340
+ "valid_targets_mean": 4424.5,
1341
+ "valid_targets_min": 666
1342
+ },
1343
+ {
1344
+ "epoch": 5.495495495495495,
1345
+ "grad_norm": 0.2854127484831119,
1346
+ "learning_rate": 5.4354037803053124e-06,
1347
+ "loss": 0.2633,
1348
+ "loss_nan_ranks": 0,
1349
+ "loss_rank_avg": 0.09467929601669312,
1350
+ "step": 610,
1351
+ "valid_targets_mean": 4428.1,
1352
+ "valid_targets_min": 581
1353
+ },
1354
+ {
1355
+ "epoch": 5.54054054054054,
1356
+ "grad_norm": 0.23697817143658992,
1357
+ "learning_rate": 5.131090297087682e-06,
1358
+ "loss": 0.2467,
1359
+ "loss_nan_ranks": 0,
1360
+ "loss_rank_avg": 0.06766346096992493,
1361
+ "step": 615,
1362
+ "valid_targets_mean": 4754.2,
1363
+ "valid_targets_min": 820
1364
+ },
1365
+ {
1366
+ "epoch": 5.585585585585585,
1367
+ "grad_norm": 0.22448330366536967,
1368
+ "learning_rate": 4.834285193974277e-06,
1369
+ "loss": 0.2828,
1370
+ "loss_nan_ranks": 0,
1371
+ "loss_rank_avg": 0.08224675059318542,
1372
+ "step": 620,
1373
+ "valid_targets_mean": 4920.5,
1374
+ "valid_targets_min": 761
1375
+ },
1376
+ {
1377
+ "epoch": 5.63063063063063,
1378
+ "grad_norm": 0.2636611878403353,
1379
+ "learning_rate": 4.545138349172418e-06,
1380
+ "loss": 0.267,
1381
+ "loss_nan_ranks": 0,
1382
+ "loss_rank_avg": 0.08178374171257019,
1383
+ "step": 625,
1384
+ "valid_targets_mean": 4456.0,
1385
+ "valid_targets_min": 536
1386
+ },
1387
+ {
1388
+ "epoch": 5.675675675675675,
1389
+ "grad_norm": 0.2622213694212647,
1390
+ "learning_rate": 4.263795773684929e-06,
1391
+ "loss": 0.2685,
1392
+ "loss_nan_ranks": 0,
1393
+ "loss_rank_avg": 0.09543284773826599,
1394
+ "step": 630,
1395
+ "valid_targets_mean": 5318.9,
1396
+ "valid_targets_min": 681
1397
+ },
1398
+ {
1399
+ "epoch": 5.7207207207207205,
1400
+ "grad_norm": 0.2697167973214109,
1401
+ "learning_rate": 3.9903995375787245e-06,
1402
+ "loss": 0.2682,
1403
+ "loss_nan_ranks": 0,
1404
+ "loss_rank_avg": 0.06491408497095108,
1405
+ "step": 635,
1406
+ "valid_targets_mean": 2849.9,
1407
+ "valid_targets_min": 542
1408
+ },
1409
+ {
1410
+ "epoch": 5.7657657657657655,
1411
+ "grad_norm": 0.25988309126817144,
1412
+ "learning_rate": 3.7250876982433947e-06,
1413
+ "loss": 0.2712,
1414
+ "loss_nan_ranks": 0,
1415
+ "loss_rank_avg": 0.07970383018255234,
1416
+ "step": 640,
1417
+ "valid_targets_mean": 4716.7,
1418
+ "valid_targets_min": 747
1419
+ },
1420
+ {
1421
+ "epoch": 5.8108108108108105,
1422
+ "grad_norm": 0.25075025233615533,
1423
+ "learning_rate": 3.4679942306761484e-06,
1424
+ "loss": 0.2673,
1425
+ "loss_nan_ranks": 0,
1426
+ "loss_rank_avg": 0.11483700573444366,
1427
+ "step": 645,
1428
+ "valid_targets_mean": 4390.4,
1429
+ "valid_targets_min": 564
1430
+ },
1431
+ {
1432
+ "epoch": 5.8558558558558556,
1433
+ "grad_norm": 0.2284807711852441,
1434
+ "learning_rate": 3.219248959828196e-06,
1435
+ "loss": 0.2626,
1436
+ "loss_nan_ranks": 0,
1437
+ "loss_rank_avg": 0.07720620930194855,
1438
+ "step": 650,
1439
+ "valid_targets_mean": 4076.5,
1440
+ "valid_targets_min": 391
1441
+ },
1442
+ {
1443
+ "epoch": 5.900900900900901,
1444
+ "grad_norm": 0.2427431505988026,
1445
+ "learning_rate": 2.9789774950468265e-06,
1446
+ "loss": 0.2614,
1447
+ "loss_nan_ranks": 0,
1448
+ "loss_rank_avg": 0.07379475235939026,
1449
+ "step": 655,
1450
+ "valid_targets_mean": 3298.6,
1451
+ "valid_targets_min": 569
1452
+ },
1453
+ {
1454
+ "epoch": 5.945945945945946,
1455
+ "grad_norm": 0.25718430981692053,
1456
+ "learning_rate": 2.747301166646221e-06,
1457
+ "loss": 0.2732,
1458
+ "loss_nan_ranks": 0,
1459
+ "loss_rank_avg": 0.11568117141723633,
1460
+ "step": 660,
1461
+ "valid_targets_mean": 4579.6,
1462
+ "valid_targets_min": 686
1463
+ },
1464
+ {
1465
+ "epoch": 5.990990990990991,
1466
+ "grad_norm": 0.2654642551899208,
1467
+ "learning_rate": 2.524336964639067e-06,
1468
+ "loss": 0.2579,
1469
+ "loss_nan_ranks": 0,
1470
+ "loss_rank_avg": 0.08498381823301315,
1471
+ "step": 665,
1472
+ "valid_targets_mean": 3435.5,
1473
+ "valid_targets_min": 584
1474
+ },
1475
+ {
1476
+ "epoch": 6.036036036036036,
1477
+ "grad_norm": 0.24038944183599942,
1478
+ "learning_rate": 2.3101974796599015e-06,
1479
+ "loss": 0.2628,
1480
+ "loss_nan_ranks": 0,
1481
+ "loss_rank_avg": 0.10556922852993011,
1482
+ "step": 670,
1483
+ "valid_targets_mean": 5025.0,
1484
+ "valid_targets_min": 734
1485
+ },
1486
+ {
1487
+ "epoch": 6.081081081081081,
1488
+ "grad_norm": 0.30520401217062976,
1489
+ "learning_rate": 2.1049908461100086e-06,
1490
+ "loss": 0.2705,
1491
+ "loss_nan_ranks": 0,
1492
+ "loss_rank_avg": 0.06116008758544922,
1493
+ "step": 675,
1494
+ "valid_targets_mean": 3497.3,
1495
+ "valid_targets_min": 439
1496
+ },
1497
+ {
1498
+ "epoch": 6.126126126126126,
1499
+ "grad_norm": 0.2323895979226439,
1500
+ "learning_rate": 1.9088206875526128e-06,
1501
+ "loss": 0.2652,
1502
+ "loss_nan_ranks": 0,
1503
+ "loss_rank_avg": 0.07196008414030075,
1504
+ "step": 680,
1505
+ "valid_targets_mean": 5198.0,
1506
+ "valid_targets_min": 513
1507
+ },
1508
+ {
1509
+ "epoch": 6.171171171171171,
1510
+ "grad_norm": 0.21965113590368227,
1511
+ "learning_rate": 1.7217860643858797e-06,
1512
+ "loss": 0.2655,
1513
+ "loss_nan_ranks": 0,
1514
+ "loss_rank_avg": 0.06284235417842865,
1515
+ "step": 685,
1516
+ "valid_targets_mean": 3905.0,
1517
+ "valid_targets_min": 322
1518
+ },
1519
+ {
1520
+ "epoch": 6.216216216216216,
1521
+ "grad_norm": 0.2768660919779248,
1522
+ "learning_rate": 1.5439814238202356e-06,
1523
+ "loss": 0.2625,
1524
+ "loss_nan_ranks": 0,
1525
+ "loss_rank_avg": 0.09718060493469238,
1526
+ "step": 690,
1527
+ "valid_targets_mean": 4281.0,
1528
+ "valid_targets_min": 498
1529
+ },
1530
+ {
1531
+ "epoch": 6.261261261261261,
1532
+ "grad_norm": 0.2534131835087623,
1533
+ "learning_rate": 1.3754965521851582e-06,
1534
+ "loss": 0.2657,
1535
+ "loss_nan_ranks": 0,
1536
+ "loss_rank_avg": 0.06957890093326569,
1537
+ "step": 695,
1538
+ "valid_targets_mean": 3135.7,
1539
+ "valid_targets_min": 473
1540
+ },
1541
+ {
1542
+ "epoch": 6.306306306306306,
1543
+ "grad_norm": 0.2763941898176929,
1544
+ "learning_rate": 1.2164165295896392e-06,
1545
+ "loss": 0.2658,
1546
+ "loss_nan_ranks": 0,
1547
+ "loss_rank_avg": 0.12905296683311462,
1548
+ "step": 700,
1549
+ "valid_targets_mean": 5124.8,
1550
+ "valid_targets_min": 480
1551
+ },
1552
+ {
1553
+ "epoch": 6.351351351351352,
1554
+ "grad_norm": 0.29676241873336406,
1555
+ "learning_rate": 1.0668216869591098e-06,
1556
+ "loss": 0.268,
1557
+ "loss_nan_ranks": 0,
1558
+ "loss_rank_avg": 0.07291467487812042,
1559
+ "step": 705,
1560
+ "valid_targets_mean": 3921.7,
1561
+ "valid_targets_min": 550
1562
+ },
1563
+ {
1564
+ "epoch": 6.396396396396397,
1565
+ "grad_norm": 0.2396542772968403,
1566
+ "learning_rate": 9.267875654706015e-07,
1567
+ "loss": 0.2607,
1568
+ "loss_nan_ranks": 0,
1569
+ "loss_rank_avg": 0.06727921962738037,
1570
+ "step": 710,
1571
+ "valid_targets_mean": 4327.9,
1572
+ "valid_targets_min": 540
1573
+ },
1574
+ {
1575
+ "epoch": 6.441441441441442,
1576
+ "grad_norm": 0.2477130333886694,
1577
+ "learning_rate": 7.963848784065753e-07,
1578
+ "loss": 0.2523,
1579
+ "loss_nan_ranks": 0,
1580
+ "loss_rank_avg": 0.10727176815271378,
1581
+ "step": 715,
1582
+ "valid_targets_mean": 4858.2,
1583
+ "valid_targets_min": 460
1584
+ },
1585
+ {
1586
+ "epoch": 6.486486486486487,
1587
+ "grad_norm": 0.23785509289793144,
1588
+ "learning_rate": 6.756794754467045e-07,
1589
+ "loss": 0.2642,
1590
+ "loss_nan_ranks": 0,
1591
+ "loss_rank_avg": 0.06506279110908508,
1592
+ "step": 720,
1593
+ "valid_targets_mean": 3841.0,
1594
+ "valid_targets_min": 486
1595
+ },
1596
+ {
1597
+ "epoch": 6.531531531531532,
1598
+ "grad_norm": 0.2599446216714078,
1599
+ "learning_rate": 5.647323094156565e-07,
1600
+ "loss": 0.2579,
1601
+ "loss_nan_ranks": 0,
1602
+ "loss_rank_avg": 0.06333674490451813,
1603
+ "step": 725,
1604
+ "valid_targets_mean": 3253.1,
1605
+ "valid_targets_min": 527
1606
+ },
1607
+ {
1608
+ "epoch": 6.576576576576577,
1609
+ "grad_norm": 0.23275171125994232,
1610
+ "learning_rate": 4.635994055036208e-07,
1611
+ "loss": 0.2618,
1612
+ "loss_nan_ranks": 0,
1613
+ "loss_rank_avg": 0.09324885904788971,
1614
+ "step": 730,
1615
+ "valid_targets_mean": 4914.8,
1616
+ "valid_targets_min": 643
1617
+ },
1618
+ {
1619
+ "epoch": 6.621621621621622,
1620
+ "grad_norm": 0.2700560927072675,
1621
+ "learning_rate": 3.723318329751746e-07,
1622
+ "loss": 0.264,
1623
+ "loss_nan_ranks": 0,
1624
+ "loss_rank_avg": 0.08801683783531189,
1625
+ "step": 735,
1626
+ "valid_targets_mean": 3425.6,
1627
+ "valid_targets_min": 443
1628
+ },
1629
+ {
1630
+ "epoch": 6.666666666666667,
1631
+ "grad_norm": 0.2856364748943094,
1632
+ "learning_rate": 2.9097567938074943e-07,
1633
+ "loss": 0.2668,
1634
+ "loss_nan_ranks": 0,
1635
+ "loss_rank_avg": 0.06195676326751709,
1636
+ "step": 740,
1637
+ "valid_targets_mean": 2569.1,
1638
+ "valid_targets_min": 396
1639
+ },
1640
+ {
1641
+ "epoch": 6.711711711711712,
1642
+ "grad_norm": 0.2660876149226891,
1643
+ "learning_rate": 2.1957202728370542e-07,
1644
+ "loss": 0.2649,
1645
+ "loss_nan_ranks": 0,
1646
+ "loss_rank_avg": 0.08706497400999069,
1647
+ "step": 745,
1648
+ "valid_targets_mean": 3522.3,
1649
+ "valid_targets_min": 613
1650
+ },
1651
+ {
1652
+ "epoch": 6.756756756756757,
1653
+ "grad_norm": 0.2516831997354625,
1654
+ "learning_rate": 1.5815693351480587e-07,
1655
+ "loss": 0.2483,
1656
+ "loss_nan_ranks": 0,
1657
+ "loss_rank_avg": 0.081519715487957,
1658
+ "step": 750,
1659
+ "valid_targets_mean": 4058.4,
1660
+ "valid_targets_min": 594
1661
+ },
1662
+ {
1663
+ "epoch": 6.801801801801802,
1664
+ "grad_norm": 0.25213554356755763,
1665
+ "learning_rate": 1.0676141096453097e-07,
1666
+ "loss": 0.2714,
1667
+ "loss_nan_ranks": 0,
1668
+ "loss_rank_avg": 0.1169489175081253,
1669
+ "step": 755,
1670
+ "valid_targets_mean": 4910.7,
1671
+ "valid_targets_min": 311
1672
+ },
1673
+ {
1674
+ "epoch": 6.846846846846847,
1675
+ "grad_norm": 0.24647297685340094,
1676
+ "learning_rate": 6.541141292243814e-08,
1677
+ "loss": 0.269,
1678
+ "loss_nan_ranks": 0,
1679
+ "loss_rank_avg": 0.09742185473442078,
1680
+ "step": 760,
1681
+ "valid_targets_mean": 4796.6,
1682
+ "valid_targets_min": 371
1683
+ },
1684
+ {
1685
+ "epoch": 6.891891891891892,
1686
+ "grad_norm": 0.2655695590930414,
1687
+ "learning_rate": 3.412781997148784e-08,
1688
+ "loss": 0.2628,
1689
+ "loss_nan_ranks": 0,
1690
+ "loss_rank_avg": 0.07396084070205688,
1691
+ "step": 765,
1692
+ "valid_targets_mean": 3007.1,
1693
+ "valid_targets_min": 323
1694
+ },
1695
+ {
1696
+ "epoch": 6.936936936936937,
1697
+ "grad_norm": 0.24263597192661657,
1698
+ "learning_rate": 1.29264294439424e-08,
1699
+ "loss": 0.2532,
1700
+ "loss_nan_ranks": 0,
1701
+ "loss_rank_avg": 0.07865872979164124,
1702
+ "step": 770,
1703
+ "valid_targets_mean": 3486.3,
1704
+ "valid_targets_min": 709
1705
+ },
1706
+ {
1707
+ "epoch": 6.981981981981982,
1708
+ "grad_norm": 0.2461657123510328,
1709
+ "learning_rate": 1.817947444149315e-09,
1710
+ "loss": 0.2576,
1711
+ "loss_nan_ranks": 0,
1712
+ "loss_rank_avg": 0.07983484864234924,
1713
+ "step": 775,
1714
+ "valid_targets_mean": 4066.2,
1715
+ "valid_targets_min": 659
1716
+ },
1717
+ {
1718
+ "epoch": 7.0,
1719
+ "step": 777,
1720
+ "total_flos": 1.9140418922369516e+18,
1721
+ "train_loss": 0.0,
1722
+ "train_runtime": 1.4065,
1723
+ "train_samples_per_second": 53019.754,
1724
+ "train_steps_per_second": 552.445
1725
+ }
1726
+ ],
1727
+ "logging_steps": 5,
1728
+ "max_steps": 777,
1729
+ "num_input_tokens_seen": 0,
1730
+ "num_train_epochs": 7,
1731
+ "save_steps": 300,
1732
+ "stateful_callbacks": {
1733
+ "TrainerControl": {
1734
+ "args": {
1735
+ "should_epoch_stop": false,
1736
+ "should_evaluate": false,
1737
+ "should_log": false,
1738
+ "should_save": true,
1739
+ "should_training_stop": true
1740
+ },
1741
+ "attributes": {}
1742
+ }
1743
+ },
1744
+ "total_flos": 1.9140418922369516e+18,
1745
+ "train_batch_size": 1,
1746
+ "trial_name": null,
1747
+ "trial_params": null
1748
+ }
training_loss.png ADDED
vocab.json ADDED
The diff for this file is too large to render. See raw diff