diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..52373fe24473b1aa44333d318f578ae6bf04b49b 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+tokenizer.json filter=lfs diff=lfs merge=lfs -text
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..a93f51d7d689fe64a86a34916102880826a8f9c6
--- /dev/null
+++ b/config.json
@@ -0,0 +1,29 @@
+{
+ "_name_or_path": "/cpfs02/user/liurunze/hf_models/models--deepseek-ai--DeepSeek-R1-Distill-Qwen-32B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 5120,
+ "initializer_range": 0.02,
+ "intermediate_size": 27648,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 64,
+ "model_type": "qwen2",
+ "num_attention_heads": 40,
+ "num_hidden_layers": 64,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.47.1",
+ "use_cache": false,
+ "use_sliding_window": false,
+ "vocab_size": 152064
+}
diff --git a/generation_config.json b/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/latest b/latest
new file mode 100644
index 0000000000000000000000000000000000000000..3eecd55d34b8457560df599420fbfbe3ab57b3b8
--- /dev/null
+++ b/latest
@@ -0,0 +1 @@
+global_step2527
\ No newline at end of file
diff --git a/model-00001-of-00014.safetensors b/model-00001-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a5223ef99eb9a6c9b61968c0b7e08423580579cb
--- /dev/null
+++ b/model-00001-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dcec9b7c302950321d6b9f6200305c56f2e7dc7ca827e410ef7262052294b93e
+size 4891730992
diff --git a/model-00002-of-00014.safetensors b/model-00002-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c6be20f1b25acd49d8e0d93230159d392cbab4f4
--- /dev/null
+++ b/model-00002-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c3009b154b6edfca1945d40664547697b76382ae6eaa96cf879696ccb85b96d1
+size 4876059352
diff --git a/model-00003-of-00014.safetensors b/model-00003-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8692cf4b3ae43529be17acf0c28b0ff48aa1e852
--- /dev/null
+++ b/model-00003-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:34f54186efea605024b956f1a90704460a928c6aa5784d77f846a81c740c7f8e
+size 4876059384
diff --git a/model-00004-of-00014.safetensors b/model-00004-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..785f924d1e727f579da08c6daf5bc875c2febfe4
--- /dev/null
+++ b/model-00004-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:591564c17ae2b53eeddff06dbc537afe5ae7bee87267e2357a78dae7cbd26c39
+size 4876059416
diff --git a/model-00005-of-00014.safetensors b/model-00005-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..97ecf790c089afafcd05074b22693e8438baed1e
--- /dev/null
+++ b/model-00005-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:06809d0c34afe756099a68bd245881fe7ac18cfb04f49610eba8a705a581dfdb
+size 4876059416
diff --git a/model-00006-of-00014.safetensors b/model-00006-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0313cbca2e792eb4cdb71957329d0eb9667a44d9
--- /dev/null
+++ b/model-00006-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:552df193096b569463001f66cd8d6b219bfa11dcf7b283e9625f1e8df3b8652d
+size 4876059416
diff --git a/model-00007-of-00014.safetensors b/model-00007-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ab920be30883a0dbb9e3ded02dd6c3fea6f9fb0b
--- /dev/null
+++ b/model-00007-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:28a3a81a3f5b03dda6b39ecb6af8ba4fc3a5773770a1d38a13a1bbc4f75bfb1d
+size 4876059416
diff --git a/model-00008-of-00014.safetensors b/model-00008-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1d577d405a3ad01e8f86bcb3eb3cafec33c83c4b
--- /dev/null
+++ b/model-00008-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:62e3bc99b5facd13da92244883055ab8cb587d043d6765f5a03c20e96d77c16b
+size 4876059416
diff --git a/model-00009-of-00014.safetensors b/model-00009-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..21851bafc33c67295dac8b91a10155c9e68d462c
--- /dev/null
+++ b/model-00009-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3bc6ae900d00068ba60ab5d5815175e3452bd318a4b4e7de58f4691662d79264
+size 4876059416
diff --git a/model-00010-of-00014.safetensors b/model-00010-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9413387af97f3e62dc07cc323551eac12ad9c2ce
--- /dev/null
+++ b/model-00010-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f0fcbeb8fdad5441b5f1f1ec996e258959ab1096c9502b10bc1e281997ee45c
+size 4876059416
diff --git a/model-00011-of-00014.safetensors b/model-00011-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1d739a79cd7433f96190f5bf922294a00802a86a
--- /dev/null
+++ b/model-00011-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:926a3676e3e2f8bc21e1e09d343bcf05b59a7aa13e202234154842dba350de2e
+size 4876059416
diff --git a/model-00012-of-00014.safetensors b/model-00012-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dde1861cc82a812efc7d45210d45365e9e95b8a2
--- /dev/null
+++ b/model-00012-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:984b74f6a669a9e95962a1304e113741d1e58d4a36e4f34a46d3020fca458fe3
+size 4876059416
diff --git a/model-00013-of-00014.safetensors b/model-00013-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..03e906885abc528592488769a8770f7a067dbb9e
--- /dev/null
+++ b/model-00013-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6fdf64212068ec59078a98735e623aebe988ed94bca37d1d911622d471dc512a
+size 4876059416
diff --git a/model-00014-of-00014.safetensors b/model-00014-of-00014.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3c8f9a52f9fb266759fc0b2d99d564236ddc631f
--- /dev/null
+++ b/model-00014-of-00014.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3e1ccc3712205bf38056c834c2ad62dc470bfb988953f47a57ea12729f318f6f
+size 2123397800
diff --git a/model.safetensors.index.json b/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..5001e730d08cefeeb49c4e431f0ede2c48f84d8c
--- /dev/null
+++ b/model.safetensors.index.json
@@ -0,0 +1,778 @@
+{
+ "metadata": {
+ "total_size": 65527752704
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00014-of-00014.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00014.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00014.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00014.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00014.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00014.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00003-of-00014.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00003-of-00014.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00003-of-00014.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00003-of-00014.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00003-of-00014.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00003-of-00014.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00004-of-00014.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00004-of-00014.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00004-of-00014.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00004-of-00014.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00004-of-00014.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00004-of-00014.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00004-of-00014.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00004-of-00014.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00004-of-00014.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00004-of-00014.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00005-of-00014.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00005-of-00014.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00004-of-00014.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00004-of-00014.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00005-of-00014.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00005-of-00014.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00014.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00014.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00005-of-00014.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00005-of-00014.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00005-of-00014.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00005-of-00014.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00005-of-00014.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00005-of-00014.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00006-of-00014.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00006-of-00014.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00005-of-00014.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00005-of-00014.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00006-of-00014.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00006-of-00014.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00006-of-00014.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00006-of-00014.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00006-of-00014.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00006-of-00014.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00006-of-00014.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00006-of-00014.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00007-of-00014.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00007-of-00014.safetensors",
+ "model.layers.28.self_attn.k_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.28.self_attn.q_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.28.self_attn.v_proj.bias": "model-00006-of-00014.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00006-of-00014.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00007-of-00014.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00007-of-00014.safetensors",
+ "model.layers.29.self_attn.k_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.29.self_attn.q_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.29.self_attn.v_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00002-of-00014.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00002-of-00014.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00014.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00014.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00007-of-00014.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00007-of-00014.safetensors",
+ "model.layers.30.self_attn.k_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.30.self_attn.q_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.30.self_attn.v_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00007-of-00014.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00007-of-00014.safetensors",
+ "model.layers.31.self_attn.k_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.31.self_attn.q_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.31.self_attn.v_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.32.input_layernorm.weight": "model-00007-of-00014.safetensors",
+ "model.layers.32.mlp.down_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.32.mlp.gate_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.32.mlp.up_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.32.post_attention_layernorm.weight": "model-00007-of-00014.safetensors",
+ "model.layers.32.self_attn.k_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.32.self_attn.k_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.32.self_attn.o_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.32.self_attn.q_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.32.self_attn.q_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.32.self_attn.v_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.32.self_attn.v_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.33.input_layernorm.weight": "model-00008-of-00014.safetensors",
+ "model.layers.33.mlp.down_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.33.mlp.gate_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.33.mlp.up_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.33.post_attention_layernorm.weight": "model-00008-of-00014.safetensors",
+ "model.layers.33.self_attn.k_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.33.self_attn.k_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.33.self_attn.o_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.33.self_attn.q_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.33.self_attn.q_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.33.self_attn.v_proj.bias": "model-00007-of-00014.safetensors",
+ "model.layers.33.self_attn.v_proj.weight": "model-00007-of-00014.safetensors",
+ "model.layers.34.input_layernorm.weight": "model-00008-of-00014.safetensors",
+ "model.layers.34.mlp.down_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.34.mlp.gate_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.34.mlp.up_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.34.post_attention_layernorm.weight": "model-00008-of-00014.safetensors",
+ "model.layers.34.self_attn.k_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.34.self_attn.k_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.34.self_attn.o_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.34.self_attn.q_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.34.self_attn.q_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.34.self_attn.v_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.34.self_attn.v_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.35.input_layernorm.weight": "model-00008-of-00014.safetensors",
+ "model.layers.35.mlp.down_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.35.mlp.gate_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.35.mlp.up_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.35.post_attention_layernorm.weight": "model-00008-of-00014.safetensors",
+ "model.layers.35.self_attn.k_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.35.self_attn.k_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.35.self_attn.o_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.35.self_attn.q_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.35.self_attn.q_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.35.self_attn.v_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.35.self_attn.v_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.36.input_layernorm.weight": "model-00008-of-00014.safetensors",
+ "model.layers.36.mlp.down_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.36.mlp.gate_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.36.mlp.up_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.36.post_attention_layernorm.weight": "model-00008-of-00014.safetensors",
+ "model.layers.36.self_attn.k_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.36.self_attn.k_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.36.self_attn.o_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.36.self_attn.q_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.36.self_attn.q_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.36.self_attn.v_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.36.self_attn.v_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.37.input_layernorm.weight": "model-00008-of-00014.safetensors",
+ "model.layers.37.mlp.down_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.37.mlp.gate_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.37.mlp.up_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.37.post_attention_layernorm.weight": "model-00008-of-00014.safetensors",
+ "model.layers.37.self_attn.k_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.37.self_attn.k_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.37.self_attn.o_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.37.self_attn.q_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.37.self_attn.q_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.37.self_attn.v_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.37.self_attn.v_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.38.input_layernorm.weight": "model-00009-of-00014.safetensors",
+ "model.layers.38.mlp.down_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.38.mlp.gate_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.38.mlp.up_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.38.post_attention_layernorm.weight": "model-00009-of-00014.safetensors",
+ "model.layers.38.self_attn.k_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.38.self_attn.k_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.38.self_attn.o_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.38.self_attn.q_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.38.self_attn.q_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.38.self_attn.v_proj.bias": "model-00008-of-00014.safetensors",
+ "model.layers.38.self_attn.v_proj.weight": "model-00008-of-00014.safetensors",
+ "model.layers.39.input_layernorm.weight": "model-00009-of-00014.safetensors",
+ "model.layers.39.mlp.down_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.39.mlp.gate_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.39.mlp.up_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.39.post_attention_layernorm.weight": "model-00009-of-00014.safetensors",
+ "model.layers.39.self_attn.k_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.39.self_attn.k_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.39.self_attn.o_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.39.self_attn.q_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.39.self_attn.q_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.39.self_attn.v_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.39.self_attn.v_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00002-of-00014.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00014.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.40.input_layernorm.weight": "model-00009-of-00014.safetensors",
+ "model.layers.40.mlp.down_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.40.mlp.gate_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.40.mlp.up_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.40.post_attention_layernorm.weight": "model-00009-of-00014.safetensors",
+ "model.layers.40.self_attn.k_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.40.self_attn.k_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.40.self_attn.o_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.40.self_attn.q_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.40.self_attn.q_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.40.self_attn.v_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.40.self_attn.v_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.41.input_layernorm.weight": "model-00009-of-00014.safetensors",
+ "model.layers.41.mlp.down_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.41.mlp.gate_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.41.mlp.up_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.41.post_attention_layernorm.weight": "model-00009-of-00014.safetensors",
+ "model.layers.41.self_attn.k_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.41.self_attn.k_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.41.self_attn.o_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.41.self_attn.q_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.41.self_attn.q_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.41.self_attn.v_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.41.self_attn.v_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.42.input_layernorm.weight": "model-00009-of-00014.safetensors",
+ "model.layers.42.mlp.down_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.42.mlp.gate_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.42.mlp.up_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.42.post_attention_layernorm.weight": "model-00009-of-00014.safetensors",
+ "model.layers.42.self_attn.k_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.42.self_attn.k_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.42.self_attn.o_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.42.self_attn.q_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.42.self_attn.q_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.42.self_attn.v_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.42.self_attn.v_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.43.input_layernorm.weight": "model-00010-of-00014.safetensors",
+ "model.layers.43.mlp.down_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.43.mlp.gate_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.43.mlp.up_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.43.post_attention_layernorm.weight": "model-00010-of-00014.safetensors",
+ "model.layers.43.self_attn.k_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.43.self_attn.k_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.43.self_attn.o_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.43.self_attn.q_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.43.self_attn.q_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.43.self_attn.v_proj.bias": "model-00009-of-00014.safetensors",
+ "model.layers.43.self_attn.v_proj.weight": "model-00009-of-00014.safetensors",
+ "model.layers.44.input_layernorm.weight": "model-00010-of-00014.safetensors",
+ "model.layers.44.mlp.down_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.44.mlp.gate_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.44.mlp.up_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.44.post_attention_layernorm.weight": "model-00010-of-00014.safetensors",
+ "model.layers.44.self_attn.k_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.44.self_attn.k_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.44.self_attn.o_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.44.self_attn.q_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.44.self_attn.q_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.44.self_attn.v_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.44.self_attn.v_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.45.input_layernorm.weight": "model-00010-of-00014.safetensors",
+ "model.layers.45.mlp.down_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.45.mlp.gate_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.45.mlp.up_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.45.post_attention_layernorm.weight": "model-00010-of-00014.safetensors",
+ "model.layers.45.self_attn.k_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.45.self_attn.k_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.45.self_attn.o_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.45.self_attn.q_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.45.self_attn.q_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.45.self_attn.v_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.45.self_attn.v_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.46.input_layernorm.weight": "model-00010-of-00014.safetensors",
+ "model.layers.46.mlp.down_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.46.mlp.gate_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.46.mlp.up_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.46.post_attention_layernorm.weight": "model-00010-of-00014.safetensors",
+ "model.layers.46.self_attn.k_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.46.self_attn.k_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.46.self_attn.o_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.46.self_attn.q_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.46.self_attn.q_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.46.self_attn.v_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.46.self_attn.v_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.47.input_layernorm.weight": "model-00010-of-00014.safetensors",
+ "model.layers.47.mlp.down_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.47.mlp.gate_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.47.mlp.up_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.47.post_attention_layernorm.weight": "model-00010-of-00014.safetensors",
+ "model.layers.47.self_attn.k_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.47.self_attn.k_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.47.self_attn.o_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.47.self_attn.q_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.47.self_attn.q_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.47.self_attn.v_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.47.self_attn.v_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.48.input_layernorm.weight": "model-00011-of-00014.safetensors",
+ "model.layers.48.mlp.down_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.48.mlp.gate_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.48.mlp.up_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.48.post_attention_layernorm.weight": "model-00011-of-00014.safetensors",
+ "model.layers.48.self_attn.k_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.48.self_attn.k_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.48.self_attn.o_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.48.self_attn.q_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.48.self_attn.q_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.48.self_attn.v_proj.bias": "model-00010-of-00014.safetensors",
+ "model.layers.48.self_attn.v_proj.weight": "model-00010-of-00014.safetensors",
+ "model.layers.49.input_layernorm.weight": "model-00011-of-00014.safetensors",
+ "model.layers.49.mlp.down_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.49.mlp.gate_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.49.mlp.up_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.49.post_attention_layernorm.weight": "model-00011-of-00014.safetensors",
+ "model.layers.49.self_attn.k_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.49.self_attn.k_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.49.self_attn.o_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.49.self_attn.q_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.49.self_attn.q_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.49.self_attn.v_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.49.self_attn.v_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00002-of-00014.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00014.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.50.input_layernorm.weight": "model-00011-of-00014.safetensors",
+ "model.layers.50.mlp.down_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.50.mlp.gate_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.50.mlp.up_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.50.post_attention_layernorm.weight": "model-00011-of-00014.safetensors",
+ "model.layers.50.self_attn.k_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.50.self_attn.k_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.50.self_attn.o_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.50.self_attn.q_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.50.self_attn.q_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.50.self_attn.v_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.50.self_attn.v_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.51.input_layernorm.weight": "model-00011-of-00014.safetensors",
+ "model.layers.51.mlp.down_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.51.mlp.gate_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.51.mlp.up_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.51.post_attention_layernorm.weight": "model-00011-of-00014.safetensors",
+ "model.layers.51.self_attn.k_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.51.self_attn.k_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.51.self_attn.o_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.51.self_attn.q_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.51.self_attn.q_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.51.self_attn.v_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.51.self_attn.v_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.52.input_layernorm.weight": "model-00011-of-00014.safetensors",
+ "model.layers.52.mlp.down_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.52.mlp.gate_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.52.mlp.up_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.52.post_attention_layernorm.weight": "model-00011-of-00014.safetensors",
+ "model.layers.52.self_attn.k_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.52.self_attn.k_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.52.self_attn.o_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.52.self_attn.q_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.52.self_attn.q_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.52.self_attn.v_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.52.self_attn.v_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.53.input_layernorm.weight": "model-00012-of-00014.safetensors",
+ "model.layers.53.mlp.down_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.53.mlp.gate_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.53.mlp.up_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.53.post_attention_layernorm.weight": "model-00012-of-00014.safetensors",
+ "model.layers.53.self_attn.k_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.53.self_attn.k_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.53.self_attn.o_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.53.self_attn.q_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.53.self_attn.q_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.53.self_attn.v_proj.bias": "model-00011-of-00014.safetensors",
+ "model.layers.53.self_attn.v_proj.weight": "model-00011-of-00014.safetensors",
+ "model.layers.54.input_layernorm.weight": "model-00012-of-00014.safetensors",
+ "model.layers.54.mlp.down_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.54.mlp.gate_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.54.mlp.up_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.54.post_attention_layernorm.weight": "model-00012-of-00014.safetensors",
+ "model.layers.54.self_attn.k_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.54.self_attn.k_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.54.self_attn.o_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.54.self_attn.q_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.54.self_attn.q_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.54.self_attn.v_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.54.self_attn.v_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.55.input_layernorm.weight": "model-00012-of-00014.safetensors",
+ "model.layers.55.mlp.down_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.55.mlp.gate_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.55.mlp.up_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.55.post_attention_layernorm.weight": "model-00012-of-00014.safetensors",
+ "model.layers.55.self_attn.k_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.55.self_attn.k_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.55.self_attn.o_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.55.self_attn.q_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.55.self_attn.q_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.55.self_attn.v_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.55.self_attn.v_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.56.input_layernorm.weight": "model-00012-of-00014.safetensors",
+ "model.layers.56.mlp.down_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.56.mlp.gate_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.56.mlp.up_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.56.post_attention_layernorm.weight": "model-00012-of-00014.safetensors",
+ "model.layers.56.self_attn.k_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.56.self_attn.k_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.56.self_attn.o_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.56.self_attn.q_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.56.self_attn.q_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.56.self_attn.v_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.56.self_attn.v_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.57.input_layernorm.weight": "model-00012-of-00014.safetensors",
+ "model.layers.57.mlp.down_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.57.mlp.gate_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.57.mlp.up_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.57.post_attention_layernorm.weight": "model-00012-of-00014.safetensors",
+ "model.layers.57.self_attn.k_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.57.self_attn.k_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.57.self_attn.o_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.57.self_attn.q_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.57.self_attn.q_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.57.self_attn.v_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.57.self_attn.v_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.58.input_layernorm.weight": "model-00013-of-00014.safetensors",
+ "model.layers.58.mlp.down_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.58.mlp.gate_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.58.mlp.up_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.58.post_attention_layernorm.weight": "model-00013-of-00014.safetensors",
+ "model.layers.58.self_attn.k_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.58.self_attn.k_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.58.self_attn.o_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.58.self_attn.q_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.58.self_attn.q_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.58.self_attn.v_proj.bias": "model-00012-of-00014.safetensors",
+ "model.layers.58.self_attn.v_proj.weight": "model-00012-of-00014.safetensors",
+ "model.layers.59.input_layernorm.weight": "model-00013-of-00014.safetensors",
+ "model.layers.59.mlp.down_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.59.mlp.gate_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.59.mlp.up_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.59.post_attention_layernorm.weight": "model-00013-of-00014.safetensors",
+ "model.layers.59.self_attn.k_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.59.self_attn.k_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.59.self_attn.o_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.59.self_attn.q_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.59.self_attn.q_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.59.self_attn.v_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.59.self_attn.v_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00014.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00014.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.60.input_layernorm.weight": "model-00013-of-00014.safetensors",
+ "model.layers.60.mlp.down_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.60.mlp.gate_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.60.mlp.up_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.60.post_attention_layernorm.weight": "model-00013-of-00014.safetensors",
+ "model.layers.60.self_attn.k_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.60.self_attn.k_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.60.self_attn.o_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.60.self_attn.q_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.60.self_attn.q_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.60.self_attn.v_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.60.self_attn.v_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.61.input_layernorm.weight": "model-00013-of-00014.safetensors",
+ "model.layers.61.mlp.down_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.61.mlp.gate_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.61.mlp.up_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.61.post_attention_layernorm.weight": "model-00013-of-00014.safetensors",
+ "model.layers.61.self_attn.k_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.61.self_attn.k_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.61.self_attn.o_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.61.self_attn.q_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.61.self_attn.q_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.61.self_attn.v_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.61.self_attn.v_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.62.input_layernorm.weight": "model-00013-of-00014.safetensors",
+ "model.layers.62.mlp.down_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.62.mlp.gate_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.62.mlp.up_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.62.post_attention_layernorm.weight": "model-00013-of-00014.safetensors",
+ "model.layers.62.self_attn.k_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.62.self_attn.k_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.62.self_attn.o_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.62.self_attn.q_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.62.self_attn.q_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.62.self_attn.v_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.62.self_attn.v_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.63.input_layernorm.weight": "model-00014-of-00014.safetensors",
+ "model.layers.63.mlp.down_proj.weight": "model-00014-of-00014.safetensors",
+ "model.layers.63.mlp.gate_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.63.mlp.up_proj.weight": "model-00014-of-00014.safetensors",
+ "model.layers.63.post_attention_layernorm.weight": "model-00014-of-00014.safetensors",
+ "model.layers.63.self_attn.k_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.63.self_attn.k_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.63.self_attn.o_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.63.self_attn.q_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.63.self_attn.q_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.63.self_attn.v_proj.bias": "model-00013-of-00014.safetensors",
+ "model.layers.63.self_attn.v_proj.weight": "model-00013-of-00014.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00014.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00014.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00003-of-00014.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00003-of-00014.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00002-of-00014.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00014.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00003-of-00014.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00003-of-00014.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00003-of-00014.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00003-of-00014.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00003-of-00014.safetensors",
+ "model.norm.weight": "model-00014-of-00014.safetensors"
+ }
+}
diff --git a/rng_state_0.pth b/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..46068b4fc6097787f30523e11417e4a113e27883
--- /dev/null
+++ b/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e335f544caa857d11abf9cb09880353f64949e255a62dc5453b96847c561f0ac
+size 15984
diff --git a/rng_state_1.pth b/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d7092c9972e785c5da458db9d5c880beb4a856a4
--- /dev/null
+++ b/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c3386b1e39d8e02e3e871adae85560cf234b867a3c992efc33f75936ee1f2d78
+size 15984
diff --git a/rng_state_10.pth b/rng_state_10.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9be16cd55cc9768a6f9b5163df776f52ffc03b9c
--- /dev/null
+++ b/rng_state_10.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15e65e76a59c96de4c86f07894dae30ba72e43712e51b60ee0252c9eca8cff8a
+size 15997
diff --git a/rng_state_11.pth b/rng_state_11.pth
new file mode 100644
index 0000000000000000000000000000000000000000..5fff806f8d7db85842234ecec50e86426f4da703
--- /dev/null
+++ b/rng_state_11.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:905970d8f8cc31dbfba5bba06e6840ba7af623eff6119e6222e4bb45f9741c82
+size 15997
diff --git a/rng_state_12.pth b/rng_state_12.pth
new file mode 100644
index 0000000000000000000000000000000000000000..885665be262b4ed828952f295d608041356ea827
--- /dev/null
+++ b/rng_state_12.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26935a49181087389f9799801be503e8f54c810b1f2ee76a91c3cff98c6e3860
+size 15997
diff --git a/rng_state_13.pth b/rng_state_13.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e41b3b0fbac93937b7abd8e014589ee776b84c82
--- /dev/null
+++ b/rng_state_13.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3f2aa1bbac9fd89cb076394addab817f8dbdfebbe42db79cbdb3a6db402e3e93
+size 15997
diff --git a/rng_state_14.pth b/rng_state_14.pth
new file mode 100644
index 0000000000000000000000000000000000000000..79c66468f8801afaecfd987e0c2263e93cc59fa6
--- /dev/null
+++ b/rng_state_14.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d5434266b1c1767aaaeac296f20e1158f0a2705db0a8a041faf257a0996da3e0
+size 15997
diff --git a/rng_state_15.pth b/rng_state_15.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3f2a1237f462df8948f771cbba58bbbcd826e3a0
--- /dev/null
+++ b/rng_state_15.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cf70262d30f04edb74f1c70a11e12da43055fd4e7906d609275cc3c2f73a7a0a
+size 15997
diff --git a/rng_state_16.pth b/rng_state_16.pth
new file mode 100644
index 0000000000000000000000000000000000000000..448e52e8790b4da0efba1efb3f28e813d3717872
--- /dev/null
+++ b/rng_state_16.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c52cfac0373c845e6586ef800b29a39af51cd10b3daa72ddac6e253e0ca14feb
+size 15997
diff --git a/rng_state_17.pth b/rng_state_17.pth
new file mode 100644
index 0000000000000000000000000000000000000000..21a7122e47cfa761739c8e9b66758d64c313518d
--- /dev/null
+++ b/rng_state_17.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:46df6b8f5a1fe1f5903da63aee47a8a4c56b8eab221656d85e08611b67d8e89b
+size 15997
diff --git a/rng_state_18.pth b/rng_state_18.pth
new file mode 100644
index 0000000000000000000000000000000000000000..de256ce042476a54fa53aa36a7793a9e7aecdd3d
--- /dev/null
+++ b/rng_state_18.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1f9f5d62083fd9d0da74154f8ad6c5b0a832f40782a8d5a455dec4c37bd16d41
+size 15997
diff --git a/rng_state_19.pth b/rng_state_19.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ef3917591261106eadcc4f411c6595a418552f5e
--- /dev/null
+++ b/rng_state_19.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e719f5bf34f0595f87cc53c4eceac6602486254d622eef76b3dc6e463efce2d
+size 15997
diff --git a/rng_state_2.pth b/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0d0a6dfe5bbf8e662bf730780e5625c728c22e57
--- /dev/null
+++ b/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a9cdc3bc69c0cdbf403bd33fe473b728c8b289a359ceccbc0a9aa47bbd9d17b5
+size 15984
diff --git a/rng_state_20.pth b/rng_state_20.pth
new file mode 100644
index 0000000000000000000000000000000000000000..41d4db8a2ffcbe4ad542cec132838de0f012e5b3
--- /dev/null
+++ b/rng_state_20.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d5af05efedf3722d068e7d3dd11f54f06d7ba258d66b1c760a301e8fcdd84403
+size 15997
diff --git a/rng_state_21.pth b/rng_state_21.pth
new file mode 100644
index 0000000000000000000000000000000000000000..38604994d5beb25ba0562ae1498b0a7948b6a903
--- /dev/null
+++ b/rng_state_21.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bd51c1d11241fc18a3ccebbb28323a290be6e1e77f64e8714cf7d1030959865e
+size 15997
diff --git a/rng_state_22.pth b/rng_state_22.pth
new file mode 100644
index 0000000000000000000000000000000000000000..17b7402c24d08e34c118aa11fb63800fc7a08d74
--- /dev/null
+++ b/rng_state_22.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:070eacef7d71ed313d6659b86e6d704d2012c274b6c8800c312750bfb3c922a5
+size 15997
diff --git a/rng_state_23.pth b/rng_state_23.pth
new file mode 100644
index 0000000000000000000000000000000000000000..93e70fcf291956bb29f11eadda1456fe25c0ca4a
--- /dev/null
+++ b/rng_state_23.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a80d93c8fd464eb5b769038b80fd4a6ecdb7641ccc90f9c5af4a315c07d3987f
+size 15997
diff --git a/rng_state_24.pth b/rng_state_24.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f1d492b1c8fc8a41b4ca46c56537b951be3527b6
--- /dev/null
+++ b/rng_state_24.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e23e9b367dbac6b297d438a26180c7fc93a11bba8cbf2f37a11cacb009c1322
+size 15997
diff --git a/rng_state_25.pth b/rng_state_25.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fa0370e675cc04ee5ab8dc09e3876092c7a5c56c
--- /dev/null
+++ b/rng_state_25.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e51c3fd06eb3c5c481815ec15572044e5a3330788f7eb5ed067febe72a07ba9e
+size 15997
diff --git a/rng_state_26.pth b/rng_state_26.pth
new file mode 100644
index 0000000000000000000000000000000000000000..14c0224fbdf801fb9b46fdb6bb238a06c00d4976
--- /dev/null
+++ b/rng_state_26.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b0894d1b21858f18c461036239063b792b33d89a33a4c9c2c84c7546b5f399a
+size 15997
diff --git a/rng_state_27.pth b/rng_state_27.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8a788c98e02a74ef74252ee5b15801e03e201a46
--- /dev/null
+++ b/rng_state_27.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:710d6e2cafcfcfe35fff0344fdda019a5e0d451c159ed5435814dbcda731beeb
+size 15997
diff --git a/rng_state_28.pth b/rng_state_28.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a6220b3716f6e1b0bfea577f70814a9ef7a68d09
--- /dev/null
+++ b/rng_state_28.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:777ef408657b39dd3077693f0d665fa722f9aca7dfb4afd89addd316fb0916bb
+size 15997
diff --git a/rng_state_29.pth b/rng_state_29.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9762eff542079675bc488e9a04b7cee50c7d4f80
--- /dev/null
+++ b/rng_state_29.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4018fa6c37cd43d0a752a4014ea9afe2e4b672cea6121f457c147db04f75eeaf
+size 15997
diff --git a/rng_state_3.pth b/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..07ac7a01796476c31aa6677bacad25014b7db7be
--- /dev/null
+++ b/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8633ec6c78fdc9b764706dc4a108d9778e4be03084ed075204c83816bfb0b594
+size 15984
diff --git a/rng_state_30.pth b/rng_state_30.pth
new file mode 100644
index 0000000000000000000000000000000000000000..49aca6a7de7fbb46d730859d976cae5cd1e00693
--- /dev/null
+++ b/rng_state_30.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d86502640b6039f2222f78077c96b6daa375f1cec79c73d8db150fb1d1aff063
+size 15997
diff --git a/rng_state_31.pth b/rng_state_31.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4d5513d0f9bf1f0922455890f05a5f443942d3a4
--- /dev/null
+++ b/rng_state_31.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e71db23ccc526fc810d4a1b7edd23bdc728f655b465d304999d7c23f21056ff
+size 15997
diff --git a/rng_state_4.pth b/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..af1d7f32e3fa47e3a752b3424eaf61f874cc24e8
--- /dev/null
+++ b/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e266d26e5111e7ee3fd6b754955dcc6e942824318775a3d91f136d08aff277d
+size 15984
diff --git a/rng_state_5.pth b/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ca7527b667b3034830e3778a78144bca02c84c22
--- /dev/null
+++ b/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:506ce4f9f78a95adeea3bdf6fd3c37022550c880c097f9a5097be36b0a685ef8
+size 15984
diff --git a/rng_state_6.pth b/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..dcb4c32ab693b0c78c5fda872ffd70bac0a8a060
--- /dev/null
+++ b/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:475cd26f1278f1ce58936112317402ff5ce4e15a7fad5d733299e542617bb52b
+size 15984
diff --git a/rng_state_7.pth b/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..270ddd9c5010cfa53de5af14705d684b31aa6551
--- /dev/null
+++ b/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9cf3ddb2fd167ef380ad3ca8a0fa303c8e0476c4aab0f31bb9a6945306d21e50
+size 15984
diff --git a/rng_state_8.pth b/rng_state_8.pth
new file mode 100644
index 0000000000000000000000000000000000000000..713bbddff7c1e190b9c9a54c782a821d3537e41e
--- /dev/null
+++ b/rng_state_8.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b97da1018fae3e5c078c125d71c4e808fef9b2c7ee68d84492ac4f68eab5901
+size 15984
diff --git a/rng_state_9.pth b/rng_state_9.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cd7488616c391beaf1e52b32e664c4cc5de8d0d3
--- /dev/null
+++ b/rng_state_9.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:44dfd219f0dba4a3e6e24a07f59c8cf9512b107a87276ce458c2478f2b8ff921
+size 15984
diff --git a/scheduler.pt b/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0e02b0f6f051a212e2802ff511283ef9f7899c05
--- /dev/null
+++ b/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0ab25ee6fd5d6a6b74cf8caac100f20349c6ea49961cdcfa21eb4e85ef53de66
+size 1064
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..6bd9d66021dd663b22e84f26c1788e26a88cc22e
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end_of_text|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenizer.json b/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..dc1953ef221005a7d58ae5bf737d693f393ca40c
--- /dev/null
+++ b/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a69acec4e98f98777b2cf45d384dd34efb45f65a75ce02b13ffbb15f318ac1ff
+size 11422970
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2bc5c64406bd47d00ee487e7b65c2d9d53ca7517
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,203 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151665": {
+ "content": "<|end_of_text|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end_of_text|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/trainer_state.json b/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..3e7a17140cc47f01eb192c560961002d03d37c95
--- /dev/null
+++ b/trainer_state.json
@@ -0,0 +1,17882 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 19.0,
+ "eval_steps": 133,
+ "global_step": 2527,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.007518796992481203,
+ "grad_norm": 3.0947007522786545,
+ "learning_rate": 1.5151515151515152e-08,
+ "loss": 0.6385,
+ "step": 1
+ },
+ {
+ "epoch": 0.007518796992481203,
+ "eval_loss": 0.6617226004600525,
+ "eval_runtime": 36.2097,
+ "eval_samples_per_second": 12.345,
+ "eval_steps_per_second": 0.193,
+ "step": 1
+ },
+ {
+ "epoch": 0.015037593984962405,
+ "grad_norm": 3.0901832870211545,
+ "learning_rate": 3.0303030303030305e-08,
+ "loss": 0.6435,
+ "step": 2
+ },
+ {
+ "epoch": 0.022556390977443608,
+ "grad_norm": 3.0017586226585764,
+ "learning_rate": 4.545454545454545e-08,
+ "loss": 0.6302,
+ "step": 3
+ },
+ {
+ "epoch": 0.03007518796992481,
+ "grad_norm": 3.069672985149119,
+ "learning_rate": 6.060606060606061e-08,
+ "loss": 0.6138,
+ "step": 4
+ },
+ {
+ "epoch": 0.03759398496240601,
+ "grad_norm": 2.9288456669302523,
+ "learning_rate": 7.575757575757576e-08,
+ "loss": 0.6202,
+ "step": 5
+ },
+ {
+ "epoch": 0.045112781954887216,
+ "grad_norm": 3.0971837945349803,
+ "learning_rate": 9.09090909090909e-08,
+ "loss": 0.6348,
+ "step": 6
+ },
+ {
+ "epoch": 0.05263157894736842,
+ "grad_norm": 3.0272849858343798,
+ "learning_rate": 1.0606060606060605e-07,
+ "loss": 0.6274,
+ "step": 7
+ },
+ {
+ "epoch": 0.06015037593984962,
+ "grad_norm": 3.06526733180596,
+ "learning_rate": 1.2121212121212122e-07,
+ "loss": 0.6334,
+ "step": 8
+ },
+ {
+ "epoch": 0.06766917293233082,
+ "grad_norm": 3.1217050520444727,
+ "learning_rate": 1.3636363636363635e-07,
+ "loss": 0.6357,
+ "step": 9
+ },
+ {
+ "epoch": 0.07518796992481203,
+ "grad_norm": 3.010541943465656,
+ "learning_rate": 1.5151515151515152e-07,
+ "loss": 0.634,
+ "step": 10
+ },
+ {
+ "epoch": 0.08270676691729323,
+ "grad_norm": 3.0562404173101605,
+ "learning_rate": 1.6666666666666665e-07,
+ "loss": 0.634,
+ "step": 11
+ },
+ {
+ "epoch": 0.09022556390977443,
+ "grad_norm": 3.100270675465523,
+ "learning_rate": 1.818181818181818e-07,
+ "loss": 0.631,
+ "step": 12
+ },
+ {
+ "epoch": 0.09774436090225563,
+ "grad_norm": 3.153839735295146,
+ "learning_rate": 1.9696969696969696e-07,
+ "loss": 0.6401,
+ "step": 13
+ },
+ {
+ "epoch": 0.10526315789473684,
+ "grad_norm": 3.0366806584378883,
+ "learning_rate": 2.121212121212121e-07,
+ "loss": 0.6176,
+ "step": 14
+ },
+ {
+ "epoch": 0.11278195488721804,
+ "grad_norm": 3.1629250911154108,
+ "learning_rate": 2.2727272727272726e-07,
+ "loss": 0.6257,
+ "step": 15
+ },
+ {
+ "epoch": 0.12030075187969924,
+ "grad_norm": 3.1229905261936506,
+ "learning_rate": 2.4242424242424244e-07,
+ "loss": 0.6326,
+ "step": 16
+ },
+ {
+ "epoch": 0.12781954887218044,
+ "grad_norm": 3.001167909959741,
+ "learning_rate": 2.5757575757575754e-07,
+ "loss": 0.6124,
+ "step": 17
+ },
+ {
+ "epoch": 0.13533834586466165,
+ "grad_norm": 3.16541700973941,
+ "learning_rate": 2.727272727272727e-07,
+ "loss": 0.6321,
+ "step": 18
+ },
+ {
+ "epoch": 0.14285714285714285,
+ "grad_norm": 3.010964193866785,
+ "learning_rate": 2.878787878787879e-07,
+ "loss": 0.6143,
+ "step": 19
+ },
+ {
+ "epoch": 0.15037593984962405,
+ "grad_norm": 3.163021954141743,
+ "learning_rate": 3.0303030303030305e-07,
+ "loss": 0.628,
+ "step": 20
+ },
+ {
+ "epoch": 0.15789473684210525,
+ "grad_norm": 3.1384769742446323,
+ "learning_rate": 3.1818181818181815e-07,
+ "loss": 0.6238,
+ "step": 21
+ },
+ {
+ "epoch": 0.16541353383458646,
+ "grad_norm": 3.1380200657337696,
+ "learning_rate": 3.333333333333333e-07,
+ "loss": 0.6186,
+ "step": 22
+ },
+ {
+ "epoch": 0.17293233082706766,
+ "grad_norm": 2.978314325061019,
+ "learning_rate": 3.484848484848485e-07,
+ "loss": 0.6114,
+ "step": 23
+ },
+ {
+ "epoch": 0.18045112781954886,
+ "grad_norm": 3.1434934635990794,
+ "learning_rate": 3.636363636363636e-07,
+ "loss": 0.5882,
+ "step": 24
+ },
+ {
+ "epoch": 0.18796992481203006,
+ "grad_norm": 2.996029155136013,
+ "learning_rate": 3.7878787878787876e-07,
+ "loss": 0.5789,
+ "step": 25
+ },
+ {
+ "epoch": 0.19548872180451127,
+ "grad_norm": 2.9499054488620198,
+ "learning_rate": 3.939393939393939e-07,
+ "loss": 0.5885,
+ "step": 26
+ },
+ {
+ "epoch": 0.20300751879699247,
+ "grad_norm": 2.7791354395728787,
+ "learning_rate": 4.090909090909091e-07,
+ "loss": 0.5627,
+ "step": 27
+ },
+ {
+ "epoch": 0.21052631578947367,
+ "grad_norm": 2.862368255255815,
+ "learning_rate": 4.242424242424242e-07,
+ "loss": 0.5785,
+ "step": 28
+ },
+ {
+ "epoch": 0.21804511278195488,
+ "grad_norm": 2.7023209697303123,
+ "learning_rate": 4.3939393939393937e-07,
+ "loss": 0.5726,
+ "step": 29
+ },
+ {
+ "epoch": 0.22556390977443608,
+ "grad_norm": 2.6977160294245923,
+ "learning_rate": 4.545454545454545e-07,
+ "loss": 0.574,
+ "step": 30
+ },
+ {
+ "epoch": 0.23308270676691728,
+ "grad_norm": 2.6439820897036608,
+ "learning_rate": 4.696969696969697e-07,
+ "loss": 0.5651,
+ "step": 31
+ },
+ {
+ "epoch": 0.24060150375939848,
+ "grad_norm": 2.6910609972416357,
+ "learning_rate": 4.848484848484849e-07,
+ "loss": 0.5727,
+ "step": 32
+ },
+ {
+ "epoch": 0.24812030075187969,
+ "grad_norm": 2.1298258019433116,
+ "learning_rate": 5e-07,
+ "loss": 0.5383,
+ "step": 33
+ },
+ {
+ "epoch": 0.2556390977443609,
+ "grad_norm": 1.9327652708359986,
+ "learning_rate": 5.151515151515151e-07,
+ "loss": 0.5111,
+ "step": 34
+ },
+ {
+ "epoch": 0.2631578947368421,
+ "grad_norm": 1.9639944765005037,
+ "learning_rate": 5.303030303030303e-07,
+ "loss": 0.5229,
+ "step": 35
+ },
+ {
+ "epoch": 0.2706766917293233,
+ "grad_norm": 1.900798905934905,
+ "learning_rate": 5.454545454545454e-07,
+ "loss": 0.5143,
+ "step": 36
+ },
+ {
+ "epoch": 0.2781954887218045,
+ "grad_norm": 1.9405565207869795,
+ "learning_rate": 5.606060606060605e-07,
+ "loss": 0.5067,
+ "step": 37
+ },
+ {
+ "epoch": 0.2857142857142857,
+ "grad_norm": 1.9877453517397594,
+ "learning_rate": 5.757575757575758e-07,
+ "loss": 0.5236,
+ "step": 38
+ },
+ {
+ "epoch": 0.2932330827067669,
+ "grad_norm": 1.811016844285034,
+ "learning_rate": 5.909090909090909e-07,
+ "loss": 0.4901,
+ "step": 39
+ },
+ {
+ "epoch": 0.3007518796992481,
+ "grad_norm": 1.8678454531184236,
+ "learning_rate": 6.060606060606061e-07,
+ "loss": 0.503,
+ "step": 40
+ },
+ {
+ "epoch": 0.3082706766917293,
+ "grad_norm": 1.8321835136284517,
+ "learning_rate": 6.212121212121212e-07,
+ "loss": 0.4882,
+ "step": 41
+ },
+ {
+ "epoch": 0.3157894736842105,
+ "grad_norm": 1.7419079239553439,
+ "learning_rate": 6.363636363636363e-07,
+ "loss": 0.4905,
+ "step": 42
+ },
+ {
+ "epoch": 0.3233082706766917,
+ "grad_norm": 1.574988102908275,
+ "learning_rate": 6.515151515151515e-07,
+ "loss": 0.4799,
+ "step": 43
+ },
+ {
+ "epoch": 0.3308270676691729,
+ "grad_norm": 1.530484241752006,
+ "learning_rate": 6.666666666666666e-07,
+ "loss": 0.4609,
+ "step": 44
+ },
+ {
+ "epoch": 0.3383458646616541,
+ "grad_norm": 1.477391031263001,
+ "learning_rate": 6.818181818181817e-07,
+ "loss": 0.4591,
+ "step": 45
+ },
+ {
+ "epoch": 0.3458646616541353,
+ "grad_norm": 1.4251095754183176,
+ "learning_rate": 6.96969696969697e-07,
+ "loss": 0.4548,
+ "step": 46
+ },
+ {
+ "epoch": 0.3533834586466165,
+ "grad_norm": 1.2960852948060615,
+ "learning_rate": 7.121212121212121e-07,
+ "loss": 0.4275,
+ "step": 47
+ },
+ {
+ "epoch": 0.3609022556390977,
+ "grad_norm": 1.3007235936338233,
+ "learning_rate": 7.272727272727272e-07,
+ "loss": 0.4264,
+ "step": 48
+ },
+ {
+ "epoch": 0.3684210526315789,
+ "grad_norm": 1.2598567595619259,
+ "learning_rate": 7.424242424242424e-07,
+ "loss": 0.4221,
+ "step": 49
+ },
+ {
+ "epoch": 0.37593984962406013,
+ "grad_norm": 1.2614826698236414,
+ "learning_rate": 7.575757575757575e-07,
+ "loss": 0.4243,
+ "step": 50
+ },
+ {
+ "epoch": 0.38345864661654133,
+ "grad_norm": 1.2131056120642278,
+ "learning_rate": 7.727272727272727e-07,
+ "loss": 0.41,
+ "step": 51
+ },
+ {
+ "epoch": 0.39097744360902253,
+ "grad_norm": 1.2301471635606012,
+ "learning_rate": 7.878787878787878e-07,
+ "loss": 0.4096,
+ "step": 52
+ },
+ {
+ "epoch": 0.39849624060150374,
+ "grad_norm": 1.1422058202227148,
+ "learning_rate": 8.030303030303029e-07,
+ "loss": 0.3949,
+ "step": 53
+ },
+ {
+ "epoch": 0.40601503759398494,
+ "grad_norm": 1.1544456640986898,
+ "learning_rate": 8.181818181818182e-07,
+ "loss": 0.4072,
+ "step": 54
+ },
+ {
+ "epoch": 0.41353383458646614,
+ "grad_norm": 1.0968886519936152,
+ "learning_rate": 8.333333333333333e-07,
+ "loss": 0.3963,
+ "step": 55
+ },
+ {
+ "epoch": 0.42105263157894735,
+ "grad_norm": 1.0792859115543776,
+ "learning_rate": 8.484848484848484e-07,
+ "loss": 0.3965,
+ "step": 56
+ },
+ {
+ "epoch": 0.42857142857142855,
+ "grad_norm": 1.0648568884379008,
+ "learning_rate": 8.636363636363636e-07,
+ "loss": 0.3884,
+ "step": 57
+ },
+ {
+ "epoch": 0.43609022556390975,
+ "grad_norm": 1.057568515707388,
+ "learning_rate": 8.787878787878787e-07,
+ "loss": 0.3888,
+ "step": 58
+ },
+ {
+ "epoch": 0.44360902255639095,
+ "grad_norm": 0.9346399159872492,
+ "learning_rate": 8.939393939393938e-07,
+ "loss": 0.376,
+ "step": 59
+ },
+ {
+ "epoch": 0.45112781954887216,
+ "grad_norm": 0.786354007216819,
+ "learning_rate": 9.09090909090909e-07,
+ "loss": 0.3491,
+ "step": 60
+ },
+ {
+ "epoch": 0.45864661654135336,
+ "grad_norm": 0.7278392951725713,
+ "learning_rate": 9.242424242424241e-07,
+ "loss": 0.3709,
+ "step": 61
+ },
+ {
+ "epoch": 0.46616541353383456,
+ "grad_norm": 0.6327040473902301,
+ "learning_rate": 9.393939393939395e-07,
+ "loss": 0.3566,
+ "step": 62
+ },
+ {
+ "epoch": 0.47368421052631576,
+ "grad_norm": 0.5625803265903285,
+ "learning_rate": 9.545454545454546e-07,
+ "loss": 0.3415,
+ "step": 63
+ },
+ {
+ "epoch": 0.48120300751879697,
+ "grad_norm": 0.5265140474450013,
+ "learning_rate": 9.696969696969698e-07,
+ "loss": 0.3441,
+ "step": 64
+ },
+ {
+ "epoch": 0.48872180451127817,
+ "grad_norm": 0.5046532795980826,
+ "learning_rate": 9.848484848484847e-07,
+ "loss": 0.3444,
+ "step": 65
+ },
+ {
+ "epoch": 0.49624060150375937,
+ "grad_norm": 0.4698808826606394,
+ "learning_rate": 1e-06,
+ "loss": 0.3367,
+ "step": 66
+ },
+ {
+ "epoch": 0.5037593984962406,
+ "grad_norm": 0.4617407333174884,
+ "learning_rate": 1.0151515151515152e-06,
+ "loss": 0.3327,
+ "step": 67
+ },
+ {
+ "epoch": 0.5112781954887218,
+ "grad_norm": 0.482687919087654,
+ "learning_rate": 1.0303030303030302e-06,
+ "loss": 0.3375,
+ "step": 68
+ },
+ {
+ "epoch": 0.518796992481203,
+ "grad_norm": 0.4413301861828575,
+ "learning_rate": 1.0454545454545454e-06,
+ "loss": 0.3293,
+ "step": 69
+ },
+ {
+ "epoch": 0.5263157894736842,
+ "grad_norm": 0.4290911250504892,
+ "learning_rate": 1.0606060606060606e-06,
+ "loss": 0.3298,
+ "step": 70
+ },
+ {
+ "epoch": 0.5338345864661654,
+ "grad_norm": 0.37597389221056043,
+ "learning_rate": 1.0757575757575756e-06,
+ "loss": 0.3252,
+ "step": 71
+ },
+ {
+ "epoch": 0.5413533834586466,
+ "grad_norm": 0.3902542237928673,
+ "learning_rate": 1.0909090909090908e-06,
+ "loss": 0.3288,
+ "step": 72
+ },
+ {
+ "epoch": 0.5488721804511278,
+ "grad_norm": 0.3972300534884243,
+ "learning_rate": 1.106060606060606e-06,
+ "loss": 0.327,
+ "step": 73
+ },
+ {
+ "epoch": 0.556390977443609,
+ "grad_norm": 0.37971034374033324,
+ "learning_rate": 1.121212121212121e-06,
+ "loss": 0.3284,
+ "step": 74
+ },
+ {
+ "epoch": 0.5639097744360902,
+ "grad_norm": 0.38664592413659205,
+ "learning_rate": 1.1363636363636364e-06,
+ "loss": 0.3142,
+ "step": 75
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 0.40972977534330796,
+ "learning_rate": 1.1515151515151516e-06,
+ "loss": 0.3221,
+ "step": 76
+ },
+ {
+ "epoch": 0.5789473684210527,
+ "grad_norm": 0.37766212929495485,
+ "learning_rate": 1.1666666666666668e-06,
+ "loss": 0.3143,
+ "step": 77
+ },
+ {
+ "epoch": 0.5864661654135338,
+ "grad_norm": 0.34567412490494304,
+ "learning_rate": 1.1818181818181818e-06,
+ "loss": 0.3208,
+ "step": 78
+ },
+ {
+ "epoch": 0.5939849624060151,
+ "grad_norm": 0.32912199821025606,
+ "learning_rate": 1.196969696969697e-06,
+ "loss": 0.3049,
+ "step": 79
+ },
+ {
+ "epoch": 0.6015037593984962,
+ "grad_norm": 0.3281408761403813,
+ "learning_rate": 1.2121212121212122e-06,
+ "loss": 0.3113,
+ "step": 80
+ },
+ {
+ "epoch": 0.6090225563909775,
+ "grad_norm": 0.30636133898523926,
+ "learning_rate": 1.2272727272727272e-06,
+ "loss": 0.3298,
+ "step": 81
+ },
+ {
+ "epoch": 0.6165413533834586,
+ "grad_norm": 0.2908108768710968,
+ "learning_rate": 1.2424242424242424e-06,
+ "loss": 0.3131,
+ "step": 82
+ },
+ {
+ "epoch": 0.6240601503759399,
+ "grad_norm": 0.28670755556500105,
+ "learning_rate": 1.2575757575757576e-06,
+ "loss": 0.2976,
+ "step": 83
+ },
+ {
+ "epoch": 0.631578947368421,
+ "grad_norm": 0.28528128086127563,
+ "learning_rate": 1.2727272727272726e-06,
+ "loss": 0.3193,
+ "step": 84
+ },
+ {
+ "epoch": 0.6390977443609023,
+ "grad_norm": 0.2792441071880718,
+ "learning_rate": 1.2878787878787878e-06,
+ "loss": 0.323,
+ "step": 85
+ },
+ {
+ "epoch": 0.6466165413533834,
+ "grad_norm": 0.27240192017196374,
+ "learning_rate": 1.303030303030303e-06,
+ "loss": 0.3177,
+ "step": 86
+ },
+ {
+ "epoch": 0.6541353383458647,
+ "grad_norm": 0.256995174992382,
+ "learning_rate": 1.318181818181818e-06,
+ "loss": 0.3056,
+ "step": 87
+ },
+ {
+ "epoch": 0.6616541353383458,
+ "grad_norm": 0.2798996527170617,
+ "learning_rate": 1.3333333333333332e-06,
+ "loss": 0.3029,
+ "step": 88
+ },
+ {
+ "epoch": 0.6691729323308271,
+ "grad_norm": 0.27532848112696073,
+ "learning_rate": 1.3484848484848484e-06,
+ "loss": 0.2988,
+ "step": 89
+ },
+ {
+ "epoch": 0.6766917293233082,
+ "grad_norm": 0.2478878836550549,
+ "learning_rate": 1.3636363636363634e-06,
+ "loss": 0.2969,
+ "step": 90
+ },
+ {
+ "epoch": 0.6842105263157895,
+ "grad_norm": 0.2405770932063179,
+ "learning_rate": 1.3787878787878788e-06,
+ "loss": 0.2984,
+ "step": 91
+ },
+ {
+ "epoch": 0.6917293233082706,
+ "grad_norm": 0.25506023834460945,
+ "learning_rate": 1.393939393939394e-06,
+ "loss": 0.3084,
+ "step": 92
+ },
+ {
+ "epoch": 0.6992481203007519,
+ "grad_norm": 0.23857771793716218,
+ "learning_rate": 1.409090909090909e-06,
+ "loss": 0.3091,
+ "step": 93
+ },
+ {
+ "epoch": 0.706766917293233,
+ "grad_norm": 0.24634399802629797,
+ "learning_rate": 1.4242424242424242e-06,
+ "loss": 0.2977,
+ "step": 94
+ },
+ {
+ "epoch": 0.7142857142857143,
+ "grad_norm": 0.2412996787277028,
+ "learning_rate": 1.4393939393939394e-06,
+ "loss": 0.2971,
+ "step": 95
+ },
+ {
+ "epoch": 0.7218045112781954,
+ "grad_norm": 0.24304016282646815,
+ "learning_rate": 1.4545454545454544e-06,
+ "loss": 0.3062,
+ "step": 96
+ },
+ {
+ "epoch": 0.7293233082706767,
+ "grad_norm": 0.2372744007698051,
+ "learning_rate": 1.4696969696969696e-06,
+ "loss": 0.2872,
+ "step": 97
+ },
+ {
+ "epoch": 0.7368421052631579,
+ "grad_norm": 0.2375607687669293,
+ "learning_rate": 1.4848484848484848e-06,
+ "loss": 0.307,
+ "step": 98
+ },
+ {
+ "epoch": 0.7443609022556391,
+ "grad_norm": 0.2343912627751333,
+ "learning_rate": 1.5e-06,
+ "loss": 0.3036,
+ "step": 99
+ },
+ {
+ "epoch": 0.7518796992481203,
+ "grad_norm": 0.22164293319980358,
+ "learning_rate": 1.515151515151515e-06,
+ "loss": 0.3115,
+ "step": 100
+ },
+ {
+ "epoch": 0.7593984962406015,
+ "grad_norm": 0.22581749991141575,
+ "learning_rate": 1.5303030303030302e-06,
+ "loss": 0.301,
+ "step": 101
+ },
+ {
+ "epoch": 0.7669172932330827,
+ "grad_norm": 0.2187418808772986,
+ "learning_rate": 1.5454545454545454e-06,
+ "loss": 0.3002,
+ "step": 102
+ },
+ {
+ "epoch": 0.7744360902255639,
+ "grad_norm": 0.21493852623423515,
+ "learning_rate": 1.5606060606060604e-06,
+ "loss": 0.293,
+ "step": 103
+ },
+ {
+ "epoch": 0.7819548872180451,
+ "grad_norm": 0.22130714069207072,
+ "learning_rate": 1.5757575757575756e-06,
+ "loss": 0.2842,
+ "step": 104
+ },
+ {
+ "epoch": 0.7894736842105263,
+ "grad_norm": 0.21185562685663265,
+ "learning_rate": 1.5909090909090908e-06,
+ "loss": 0.2947,
+ "step": 105
+ },
+ {
+ "epoch": 0.7969924812030075,
+ "grad_norm": 0.2270995660570705,
+ "learning_rate": 1.6060606060606058e-06,
+ "loss": 0.2959,
+ "step": 106
+ },
+ {
+ "epoch": 0.8045112781954887,
+ "grad_norm": 0.2046562848025141,
+ "learning_rate": 1.621212121212121e-06,
+ "loss": 0.2947,
+ "step": 107
+ },
+ {
+ "epoch": 0.8120300751879699,
+ "grad_norm": 0.2085628026404982,
+ "learning_rate": 1.6363636363636365e-06,
+ "loss": 0.289,
+ "step": 108
+ },
+ {
+ "epoch": 0.8195488721804511,
+ "grad_norm": 0.2054438017502729,
+ "learning_rate": 1.6515151515151515e-06,
+ "loss": 0.2892,
+ "step": 109
+ },
+ {
+ "epoch": 0.8270676691729323,
+ "grad_norm": 0.22086195009045176,
+ "learning_rate": 1.6666666666666667e-06,
+ "loss": 0.2873,
+ "step": 110
+ },
+ {
+ "epoch": 0.8345864661654135,
+ "grad_norm": 0.20621228393410063,
+ "learning_rate": 1.6818181818181819e-06,
+ "loss": 0.2887,
+ "step": 111
+ },
+ {
+ "epoch": 0.8421052631578947,
+ "grad_norm": 0.1960898721853398,
+ "learning_rate": 1.6969696969696969e-06,
+ "loss": 0.2795,
+ "step": 112
+ },
+ {
+ "epoch": 0.849624060150376,
+ "grad_norm": 0.1968588802710802,
+ "learning_rate": 1.712121212121212e-06,
+ "loss": 0.2815,
+ "step": 113
+ },
+ {
+ "epoch": 0.8571428571428571,
+ "grad_norm": 0.20349582334986419,
+ "learning_rate": 1.7272727272727273e-06,
+ "loss": 0.2812,
+ "step": 114
+ },
+ {
+ "epoch": 0.8646616541353384,
+ "grad_norm": 0.19844240229279997,
+ "learning_rate": 1.7424242424242423e-06,
+ "loss": 0.2812,
+ "step": 115
+ },
+ {
+ "epoch": 0.8721804511278195,
+ "grad_norm": 0.19282714203409276,
+ "learning_rate": 1.7575757575757575e-06,
+ "loss": 0.2794,
+ "step": 116
+ },
+ {
+ "epoch": 0.8796992481203008,
+ "grad_norm": 0.20577894275373879,
+ "learning_rate": 1.7727272727272727e-06,
+ "loss": 0.2934,
+ "step": 117
+ },
+ {
+ "epoch": 0.8872180451127819,
+ "grad_norm": 0.20008081630830774,
+ "learning_rate": 1.7878787878787877e-06,
+ "loss": 0.2864,
+ "step": 118
+ },
+ {
+ "epoch": 0.8947368421052632,
+ "grad_norm": 0.1893438647225705,
+ "learning_rate": 1.8030303030303029e-06,
+ "loss": 0.2902,
+ "step": 119
+ },
+ {
+ "epoch": 0.9022556390977443,
+ "grad_norm": 0.19307405764697674,
+ "learning_rate": 1.818181818181818e-06,
+ "loss": 0.2791,
+ "step": 120
+ },
+ {
+ "epoch": 0.9097744360902256,
+ "grad_norm": 0.1914751240650069,
+ "learning_rate": 1.833333333333333e-06,
+ "loss": 0.2753,
+ "step": 121
+ },
+ {
+ "epoch": 0.9172932330827067,
+ "grad_norm": 0.19377778023804626,
+ "learning_rate": 1.8484848484848483e-06,
+ "loss": 0.2919,
+ "step": 122
+ },
+ {
+ "epoch": 0.924812030075188,
+ "grad_norm": 0.18985565036895105,
+ "learning_rate": 1.8636363636363635e-06,
+ "loss": 0.2824,
+ "step": 123
+ },
+ {
+ "epoch": 0.9323308270676691,
+ "grad_norm": 0.20708786081030217,
+ "learning_rate": 1.878787878787879e-06,
+ "loss": 0.2901,
+ "step": 124
+ },
+ {
+ "epoch": 0.9398496240601504,
+ "grad_norm": 0.20717659687757517,
+ "learning_rate": 1.893939393939394e-06,
+ "loss": 0.2824,
+ "step": 125
+ },
+ {
+ "epoch": 0.9473684210526315,
+ "grad_norm": 0.19266762914690314,
+ "learning_rate": 1.909090909090909e-06,
+ "loss": 0.2804,
+ "step": 126
+ },
+ {
+ "epoch": 0.9548872180451128,
+ "grad_norm": 0.18819454076916833,
+ "learning_rate": 1.924242424242424e-06,
+ "loss": 0.2747,
+ "step": 127
+ },
+ {
+ "epoch": 0.9624060150375939,
+ "grad_norm": 0.2035416014589256,
+ "learning_rate": 1.9393939393939395e-06,
+ "loss": 0.2927,
+ "step": 128
+ },
+ {
+ "epoch": 0.9699248120300752,
+ "grad_norm": 0.18696617168124985,
+ "learning_rate": 1.9545454545454545e-06,
+ "loss": 0.2684,
+ "step": 129
+ },
+ {
+ "epoch": 0.9774436090225563,
+ "grad_norm": 0.19292005591343866,
+ "learning_rate": 1.9696969696969695e-06,
+ "loss": 0.2788,
+ "step": 130
+ },
+ {
+ "epoch": 0.9849624060150376,
+ "grad_norm": 0.18754061788648113,
+ "learning_rate": 1.984848484848485e-06,
+ "loss": 0.2812,
+ "step": 131
+ },
+ {
+ "epoch": 0.9924812030075187,
+ "grad_norm": 0.1967857206612375,
+ "learning_rate": 2e-06,
+ "loss": 0.2925,
+ "step": 132
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.19540137853667394,
+ "learning_rate": 1.9999992278253237e-06,
+ "loss": 0.2806,
+ "step": 133
+ },
+ {
+ "epoch": 1.0,
+ "eval_loss": 0.26313385367393494,
+ "eval_runtime": 36.6138,
+ "eval_samples_per_second": 12.209,
+ "eval_steps_per_second": 0.191,
+ "step": 133
+ },
+ {
+ "epoch": 1.0075187969924813,
+ "grad_norm": 0.18770941174985312,
+ "learning_rate": 1.999996911302488e-06,
+ "loss": 0.2734,
+ "step": 134
+ },
+ {
+ "epoch": 1.0150375939849625,
+ "grad_norm": 0.18548777041915152,
+ "learning_rate": 1.99999305043507e-06,
+ "loss": 0.2614,
+ "step": 135
+ },
+ {
+ "epoch": 1.0225563909774436,
+ "grad_norm": 0.18434231902151554,
+ "learning_rate": 1.9999876452290317e-06,
+ "loss": 0.2697,
+ "step": 136
+ },
+ {
+ "epoch": 1.0300751879699248,
+ "grad_norm": 0.1867527291678806,
+ "learning_rate": 1.999980695692722e-06,
+ "loss": 0.2682,
+ "step": 137
+ },
+ {
+ "epoch": 1.037593984962406,
+ "grad_norm": 0.19815721943572842,
+ "learning_rate": 1.999972201836872e-06,
+ "loss": 0.2693,
+ "step": 138
+ },
+ {
+ "epoch": 1.045112781954887,
+ "grad_norm": 0.19470084518680814,
+ "learning_rate": 1.9999621636746e-06,
+ "loss": 0.2781,
+ "step": 139
+ },
+ {
+ "epoch": 1.0526315789473684,
+ "grad_norm": 0.19071397823533437,
+ "learning_rate": 1.999950581221408e-06,
+ "loss": 0.2854,
+ "step": 140
+ },
+ {
+ "epoch": 1.0601503759398496,
+ "grad_norm": 0.19155730461999457,
+ "learning_rate": 1.999937454495184e-06,
+ "loss": 0.2746,
+ "step": 141
+ },
+ {
+ "epoch": 1.0676691729323309,
+ "grad_norm": 0.193098865153624,
+ "learning_rate": 1.9999227835162e-06,
+ "loss": 0.2723,
+ "step": 142
+ },
+ {
+ "epoch": 1.0751879699248121,
+ "grad_norm": 0.18024121279664976,
+ "learning_rate": 1.9999065683071128e-06,
+ "loss": 0.2696,
+ "step": 143
+ },
+ {
+ "epoch": 1.0827067669172932,
+ "grad_norm": 0.18714846860152845,
+ "learning_rate": 1.9998888088929643e-06,
+ "loss": 0.2712,
+ "step": 144
+ },
+ {
+ "epoch": 1.0902255639097744,
+ "grad_norm": 0.19764827785324923,
+ "learning_rate": 1.9998695053011815e-06,
+ "loss": 0.285,
+ "step": 145
+ },
+ {
+ "epoch": 1.0977443609022557,
+ "grad_norm": 0.19250237182893865,
+ "learning_rate": 1.9998486575615758e-06,
+ "loss": 0.2664,
+ "step": 146
+ },
+ {
+ "epoch": 1.1052631578947367,
+ "grad_norm": 0.18350244998477347,
+ "learning_rate": 1.9998262657063435e-06,
+ "loss": 0.2648,
+ "step": 147
+ },
+ {
+ "epoch": 1.112781954887218,
+ "grad_norm": 0.18594616349634643,
+ "learning_rate": 1.9998023297700654e-06,
+ "loss": 0.2652,
+ "step": 148
+ },
+ {
+ "epoch": 1.1203007518796992,
+ "grad_norm": 0.18924497794129985,
+ "learning_rate": 1.999776849789707e-06,
+ "loss": 0.2624,
+ "step": 149
+ },
+ {
+ "epoch": 1.1278195488721805,
+ "grad_norm": 0.19654482978682783,
+ "learning_rate": 1.999749825804618e-06,
+ "loss": 0.2739,
+ "step": 150
+ },
+ {
+ "epoch": 1.1353383458646618,
+ "grad_norm": 0.1913597930566055,
+ "learning_rate": 1.9997212578565333e-06,
+ "loss": 0.2787,
+ "step": 151
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 0.189108908295423,
+ "learning_rate": 1.9996911459895713e-06,
+ "loss": 0.2752,
+ "step": 152
+ },
+ {
+ "epoch": 1.150375939849624,
+ "grad_norm": 0.1894247754612144,
+ "learning_rate": 1.999659490250236e-06,
+ "loss": 0.2717,
+ "step": 153
+ },
+ {
+ "epoch": 1.1578947368421053,
+ "grad_norm": 0.2089088736764278,
+ "learning_rate": 1.9996262906874136e-06,
+ "loss": 0.2638,
+ "step": 154
+ },
+ {
+ "epoch": 1.1654135338345863,
+ "grad_norm": 0.20425388029293756,
+ "learning_rate": 1.9995915473523774e-06,
+ "loss": 0.2682,
+ "step": 155
+ },
+ {
+ "epoch": 1.1729323308270676,
+ "grad_norm": 0.19852155595520504,
+ "learning_rate": 1.9995552602987826e-06,
+ "loss": 0.2632,
+ "step": 156
+ },
+ {
+ "epoch": 1.1804511278195489,
+ "grad_norm": 0.1821227436775735,
+ "learning_rate": 1.9995174295826686e-06,
+ "loss": 0.2618,
+ "step": 157
+ },
+ {
+ "epoch": 1.1879699248120301,
+ "grad_norm": 0.20366138533564454,
+ "learning_rate": 1.9994780552624593e-06,
+ "loss": 0.2695,
+ "step": 158
+ },
+ {
+ "epoch": 1.1954887218045114,
+ "grad_norm": 0.19652539193851176,
+ "learning_rate": 1.9994371373989633e-06,
+ "loss": 0.2726,
+ "step": 159
+ },
+ {
+ "epoch": 1.2030075187969924,
+ "grad_norm": 0.19152365052992962,
+ "learning_rate": 1.9993946760553714e-06,
+ "loss": 0.2581,
+ "step": 160
+ },
+ {
+ "epoch": 1.2105263157894737,
+ "grad_norm": 0.18817380753999768,
+ "learning_rate": 1.9993506712972588e-06,
+ "loss": 0.2751,
+ "step": 161
+ },
+ {
+ "epoch": 1.218045112781955,
+ "grad_norm": 0.21290117397284375,
+ "learning_rate": 1.9993051231925845e-06,
+ "loss": 0.2806,
+ "step": 162
+ },
+ {
+ "epoch": 1.225563909774436,
+ "grad_norm": 0.1904992346586108,
+ "learning_rate": 1.9992580318116905e-06,
+ "loss": 0.2668,
+ "step": 163
+ },
+ {
+ "epoch": 1.2330827067669172,
+ "grad_norm": 0.18921488122451685,
+ "learning_rate": 1.9992093972273017e-06,
+ "loss": 0.2775,
+ "step": 164
+ },
+ {
+ "epoch": 1.2406015037593985,
+ "grad_norm": 0.19181806559632314,
+ "learning_rate": 1.999159219514528e-06,
+ "loss": 0.282,
+ "step": 165
+ },
+ {
+ "epoch": 1.2481203007518797,
+ "grad_norm": 0.18813599585082966,
+ "learning_rate": 1.9991074987508608e-06,
+ "loss": 0.2489,
+ "step": 166
+ },
+ {
+ "epoch": 1.255639097744361,
+ "grad_norm": 0.199719961311488,
+ "learning_rate": 1.999054235016175e-06,
+ "loss": 0.2587,
+ "step": 167
+ },
+ {
+ "epoch": 1.263157894736842,
+ "grad_norm": 0.20836457004117756,
+ "learning_rate": 1.9989994283927284e-06,
+ "loss": 0.2695,
+ "step": 168
+ },
+ {
+ "epoch": 1.2706766917293233,
+ "grad_norm": 0.20039428534025827,
+ "learning_rate": 1.9989430789651617e-06,
+ "loss": 0.2634,
+ "step": 169
+ },
+ {
+ "epoch": 1.2781954887218046,
+ "grad_norm": 0.19159975850488167,
+ "learning_rate": 1.9988851868204982e-06,
+ "loss": 0.2531,
+ "step": 170
+ },
+ {
+ "epoch": 1.2857142857142856,
+ "grad_norm": 0.19088566263580148,
+ "learning_rate": 1.9988257520481433e-06,
+ "loss": 0.2611,
+ "step": 171
+ },
+ {
+ "epoch": 1.2932330827067668,
+ "grad_norm": 0.19442651406442973,
+ "learning_rate": 1.998764774739885e-06,
+ "loss": 0.2657,
+ "step": 172
+ },
+ {
+ "epoch": 1.300751879699248,
+ "grad_norm": 0.20916832426030357,
+ "learning_rate": 1.9987022549898943e-06,
+ "loss": 0.2745,
+ "step": 173
+ },
+ {
+ "epoch": 1.3082706766917294,
+ "grad_norm": 0.19118241036057385,
+ "learning_rate": 1.9986381928947225e-06,
+ "loss": 0.2697,
+ "step": 174
+ },
+ {
+ "epoch": 1.3157894736842106,
+ "grad_norm": 0.19569957805772634,
+ "learning_rate": 1.9985725885533043e-06,
+ "loss": 0.2705,
+ "step": 175
+ },
+ {
+ "epoch": 1.3233082706766917,
+ "grad_norm": 0.21954876812669122,
+ "learning_rate": 1.998505442066956e-06,
+ "loss": 0.2714,
+ "step": 176
+ },
+ {
+ "epoch": 1.330827067669173,
+ "grad_norm": 0.20360168953004992,
+ "learning_rate": 1.998436753539375e-06,
+ "loss": 0.2662,
+ "step": 177
+ },
+ {
+ "epoch": 1.3383458646616542,
+ "grad_norm": 0.20426117471662625,
+ "learning_rate": 1.9983665230766404e-06,
+ "loss": 0.2688,
+ "step": 178
+ },
+ {
+ "epoch": 1.3458646616541352,
+ "grad_norm": 0.1895009949521178,
+ "learning_rate": 1.9982947507872127e-06,
+ "loss": 0.2685,
+ "step": 179
+ },
+ {
+ "epoch": 1.3533834586466165,
+ "grad_norm": 0.1969313157254311,
+ "learning_rate": 1.998221436781933e-06,
+ "loss": 0.2573,
+ "step": 180
+ },
+ {
+ "epoch": 1.3609022556390977,
+ "grad_norm": 0.19925672751341714,
+ "learning_rate": 1.998146581174024e-06,
+ "loss": 0.264,
+ "step": 181
+ },
+ {
+ "epoch": 1.368421052631579,
+ "grad_norm": 0.19319251874839152,
+ "learning_rate": 1.998070184079089e-06,
+ "loss": 0.259,
+ "step": 182
+ },
+ {
+ "epoch": 1.3759398496240602,
+ "grad_norm": 0.1955617480381582,
+ "learning_rate": 1.9979922456151114e-06,
+ "loss": 0.2558,
+ "step": 183
+ },
+ {
+ "epoch": 1.3834586466165413,
+ "grad_norm": 0.20584720384505467,
+ "learning_rate": 1.997912765902456e-06,
+ "loss": 0.2639,
+ "step": 184
+ },
+ {
+ "epoch": 1.3909774436090225,
+ "grad_norm": 0.20557603221979076,
+ "learning_rate": 1.997831745063867e-06,
+ "loss": 0.2705,
+ "step": 185
+ },
+ {
+ "epoch": 1.3984962406015038,
+ "grad_norm": 0.19837714415058105,
+ "learning_rate": 1.9977491832244686e-06,
+ "loss": 0.2677,
+ "step": 186
+ },
+ {
+ "epoch": 1.4060150375939848,
+ "grad_norm": 0.2065107868978118,
+ "learning_rate": 1.9976650805117658e-06,
+ "loss": 0.2621,
+ "step": 187
+ },
+ {
+ "epoch": 1.413533834586466,
+ "grad_norm": 0.2054324951471292,
+ "learning_rate": 1.9975794370556416e-06,
+ "loss": 0.2603,
+ "step": 188
+ },
+ {
+ "epoch": 1.4210526315789473,
+ "grad_norm": 0.20038271951832962,
+ "learning_rate": 1.99749225298836e-06,
+ "loss": 0.2711,
+ "step": 189
+ },
+ {
+ "epoch": 1.4285714285714286,
+ "grad_norm": 0.19253836643575556,
+ "learning_rate": 1.9974035284445638e-06,
+ "loss": 0.2646,
+ "step": 190
+ },
+ {
+ "epoch": 1.4360902255639099,
+ "grad_norm": 0.21237757168252666,
+ "learning_rate": 1.997313263561275e-06,
+ "loss": 0.2655,
+ "step": 191
+ },
+ {
+ "epoch": 1.443609022556391,
+ "grad_norm": 0.20218693755691775,
+ "learning_rate": 1.9972214584778924e-06,
+ "loss": 0.2548,
+ "step": 192
+ },
+ {
+ "epoch": 1.4511278195488722,
+ "grad_norm": 0.19342746967706817,
+ "learning_rate": 1.9971281133361973e-06,
+ "loss": 0.276,
+ "step": 193
+ },
+ {
+ "epoch": 1.4586466165413534,
+ "grad_norm": 0.18329816762712028,
+ "learning_rate": 1.997033228280346e-06,
+ "loss": 0.2661,
+ "step": 194
+ },
+ {
+ "epoch": 1.4661654135338344,
+ "grad_norm": 0.19600924145004447,
+ "learning_rate": 1.996936803456874e-06,
+ "loss": 0.2636,
+ "step": 195
+ },
+ {
+ "epoch": 1.4736842105263157,
+ "grad_norm": 0.20121230536700305,
+ "learning_rate": 1.9968388390146957e-06,
+ "loss": 0.2556,
+ "step": 196
+ },
+ {
+ "epoch": 1.481203007518797,
+ "grad_norm": 0.19565021668401447,
+ "learning_rate": 1.996739335105102e-06,
+ "loss": 0.2572,
+ "step": 197
+ },
+ {
+ "epoch": 1.4887218045112782,
+ "grad_norm": 0.1802959086879156,
+ "learning_rate": 1.996638291881762e-06,
+ "loss": 0.2508,
+ "step": 198
+ },
+ {
+ "epoch": 1.4962406015037595,
+ "grad_norm": 0.18863582150492816,
+ "learning_rate": 1.996535709500721e-06,
+ "loss": 0.2674,
+ "step": 199
+ },
+ {
+ "epoch": 1.5037593984962405,
+ "grad_norm": 0.19507256384689756,
+ "learning_rate": 1.9964315881204026e-06,
+ "loss": 0.2678,
+ "step": 200
+ },
+ {
+ "epoch": 1.5112781954887218,
+ "grad_norm": 0.19180137265923144,
+ "learning_rate": 1.996325927901607e-06,
+ "loss": 0.2581,
+ "step": 201
+ },
+ {
+ "epoch": 1.518796992481203,
+ "grad_norm": 0.18908974331141806,
+ "learning_rate": 1.9962187290075095e-06,
+ "loss": 0.2568,
+ "step": 202
+ },
+ {
+ "epoch": 1.526315789473684,
+ "grad_norm": 0.19774642307992613,
+ "learning_rate": 1.996109991603663e-06,
+ "loss": 0.2591,
+ "step": 203
+ },
+ {
+ "epoch": 1.5338345864661656,
+ "grad_norm": 0.1906503658062754,
+ "learning_rate": 1.9959997158579965e-06,
+ "loss": 0.2631,
+ "step": 204
+ },
+ {
+ "epoch": 1.5413533834586466,
+ "grad_norm": 0.20052527698905925,
+ "learning_rate": 1.995887901940814e-06,
+ "loss": 0.267,
+ "step": 205
+ },
+ {
+ "epoch": 1.5488721804511278,
+ "grad_norm": 0.18826018369325126,
+ "learning_rate": 1.9957745500247954e-06,
+ "loss": 0.2626,
+ "step": 206
+ },
+ {
+ "epoch": 1.556390977443609,
+ "grad_norm": 0.19515220606797573,
+ "learning_rate": 1.995659660284995e-06,
+ "loss": 0.2631,
+ "step": 207
+ },
+ {
+ "epoch": 1.5639097744360901,
+ "grad_norm": 0.21308214298386363,
+ "learning_rate": 1.9955432328988433e-06,
+ "loss": 0.2604,
+ "step": 208
+ },
+ {
+ "epoch": 1.5714285714285714,
+ "grad_norm": 0.18491308963935976,
+ "learning_rate": 1.995425268046145e-06,
+ "loss": 0.2606,
+ "step": 209
+ },
+ {
+ "epoch": 1.5789473684210527,
+ "grad_norm": 0.1887996661716039,
+ "learning_rate": 1.9953057659090784e-06,
+ "loss": 0.261,
+ "step": 210
+ },
+ {
+ "epoch": 1.5864661654135337,
+ "grad_norm": 0.1877006119076644,
+ "learning_rate": 1.9951847266721967e-06,
+ "loss": 0.2556,
+ "step": 211
+ },
+ {
+ "epoch": 1.5939849624060152,
+ "grad_norm": 0.1941233301331481,
+ "learning_rate": 1.9950621505224274e-06,
+ "loss": 0.2628,
+ "step": 212
+ },
+ {
+ "epoch": 1.6015037593984962,
+ "grad_norm": 0.19059852116888013,
+ "learning_rate": 1.9949380376490703e-06,
+ "loss": 0.271,
+ "step": 213
+ },
+ {
+ "epoch": 1.6090225563909775,
+ "grad_norm": 0.19981972444605373,
+ "learning_rate": 1.9948123882437994e-06,
+ "loss": 0.267,
+ "step": 214
+ },
+ {
+ "epoch": 1.6165413533834587,
+ "grad_norm": 0.201964554328199,
+ "learning_rate": 1.9946852025006605e-06,
+ "loss": 0.2651,
+ "step": 215
+ },
+ {
+ "epoch": 1.6240601503759398,
+ "grad_norm": 0.19077961225888726,
+ "learning_rate": 1.994556480616074e-06,
+ "loss": 0.251,
+ "step": 216
+ },
+ {
+ "epoch": 1.631578947368421,
+ "grad_norm": 0.18515320294313534,
+ "learning_rate": 1.9944262227888307e-06,
+ "loss": 0.26,
+ "step": 217
+ },
+ {
+ "epoch": 1.6390977443609023,
+ "grad_norm": 0.1940684254505976,
+ "learning_rate": 1.9942944292200944e-06,
+ "loss": 0.2574,
+ "step": 218
+ },
+ {
+ "epoch": 1.6466165413533833,
+ "grad_norm": 0.19581059701027748,
+ "learning_rate": 1.9941611001134e-06,
+ "loss": 0.2627,
+ "step": 219
+ },
+ {
+ "epoch": 1.6541353383458648,
+ "grad_norm": 0.1897055167940306,
+ "learning_rate": 1.9940262356746553e-06,
+ "loss": 0.2636,
+ "step": 220
+ },
+ {
+ "epoch": 1.6616541353383458,
+ "grad_norm": 0.19221662900293143,
+ "learning_rate": 1.993889836112137e-06,
+ "loss": 0.2552,
+ "step": 221
+ },
+ {
+ "epoch": 1.669172932330827,
+ "grad_norm": 0.19418996595951837,
+ "learning_rate": 1.9937519016364938e-06,
+ "loss": 0.2622,
+ "step": 222
+ },
+ {
+ "epoch": 1.6766917293233083,
+ "grad_norm": 0.18292470827596977,
+ "learning_rate": 1.9936124324607453e-06,
+ "loss": 0.2498,
+ "step": 223
+ },
+ {
+ "epoch": 1.6842105263157894,
+ "grad_norm": 0.1901384608322056,
+ "learning_rate": 1.9934714288002807e-06,
+ "loss": 0.255,
+ "step": 224
+ },
+ {
+ "epoch": 1.6917293233082706,
+ "grad_norm": 0.19249386451607295,
+ "learning_rate": 1.9933288908728577e-06,
+ "loss": 0.2693,
+ "step": 225
+ },
+ {
+ "epoch": 1.699248120300752,
+ "grad_norm": 0.19790387250494224,
+ "learning_rate": 1.993184818898606e-06,
+ "loss": 0.2628,
+ "step": 226
+ },
+ {
+ "epoch": 1.706766917293233,
+ "grad_norm": 0.20288868771549234,
+ "learning_rate": 1.9930392131000224e-06,
+ "loss": 0.2657,
+ "step": 227
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 0.1886909275885503,
+ "learning_rate": 1.992892073701973e-06,
+ "loss": 0.2619,
+ "step": 228
+ },
+ {
+ "epoch": 1.7218045112781954,
+ "grad_norm": 0.18850836259369894,
+ "learning_rate": 1.9927434009316933e-06,
+ "loss": 0.2582,
+ "step": 229
+ },
+ {
+ "epoch": 1.7293233082706767,
+ "grad_norm": 0.19375218377633982,
+ "learning_rate": 1.9925931950187853e-06,
+ "loss": 0.2628,
+ "step": 230
+ },
+ {
+ "epoch": 1.736842105263158,
+ "grad_norm": 0.19423769562821355,
+ "learning_rate": 1.9924414561952193e-06,
+ "loss": 0.255,
+ "step": 231
+ },
+ {
+ "epoch": 1.744360902255639,
+ "grad_norm": 0.19322103193945556,
+ "learning_rate": 1.992288184695333e-06,
+ "loss": 0.2636,
+ "step": 232
+ },
+ {
+ "epoch": 1.7518796992481203,
+ "grad_norm": 0.20794323830484612,
+ "learning_rate": 1.9921333807558316e-06,
+ "loss": 0.2564,
+ "step": 233
+ },
+ {
+ "epoch": 1.7593984962406015,
+ "grad_norm": 0.20292807263799545,
+ "learning_rate": 1.9919770446157865e-06,
+ "loss": 0.2641,
+ "step": 234
+ },
+ {
+ "epoch": 1.7669172932330826,
+ "grad_norm": 0.2243911410044154,
+ "learning_rate": 1.991819176516635e-06,
+ "loss": 0.2718,
+ "step": 235
+ },
+ {
+ "epoch": 1.774436090225564,
+ "grad_norm": 0.19267285230536385,
+ "learning_rate": 1.9916597767021807e-06,
+ "loss": 0.2618,
+ "step": 236
+ },
+ {
+ "epoch": 1.781954887218045,
+ "grad_norm": 0.19476402522822148,
+ "learning_rate": 1.991498845418592e-06,
+ "loss": 0.243,
+ "step": 237
+ },
+ {
+ "epoch": 1.7894736842105263,
+ "grad_norm": 0.19977827774855292,
+ "learning_rate": 1.991336382914404e-06,
+ "loss": 0.2486,
+ "step": 238
+ },
+ {
+ "epoch": 1.7969924812030076,
+ "grad_norm": 0.20098843172102904,
+ "learning_rate": 1.9911723894405154e-06,
+ "loss": 0.2566,
+ "step": 239
+ },
+ {
+ "epoch": 1.8045112781954886,
+ "grad_norm": 0.19871809201390567,
+ "learning_rate": 1.991006865250189e-06,
+ "loss": 0.2541,
+ "step": 240
+ },
+ {
+ "epoch": 1.8120300751879699,
+ "grad_norm": 0.19458609127803517,
+ "learning_rate": 1.990839810599052e-06,
+ "loss": 0.254,
+ "step": 241
+ },
+ {
+ "epoch": 1.8195488721804511,
+ "grad_norm": 0.19337621471200064,
+ "learning_rate": 1.990671225745096e-06,
+ "loss": 0.2506,
+ "step": 242
+ },
+ {
+ "epoch": 1.8270676691729322,
+ "grad_norm": 0.19376484196390106,
+ "learning_rate": 1.9905011109486733e-06,
+ "loss": 0.2592,
+ "step": 243
+ },
+ {
+ "epoch": 1.8345864661654137,
+ "grad_norm": 0.21053239521057265,
+ "learning_rate": 1.990329466472502e-06,
+ "loss": 0.2535,
+ "step": 244
+ },
+ {
+ "epoch": 1.8421052631578947,
+ "grad_norm": 0.19424589046973262,
+ "learning_rate": 1.9901562925816604e-06,
+ "loss": 0.2585,
+ "step": 245
+ },
+ {
+ "epoch": 1.849624060150376,
+ "grad_norm": 0.20492287562877878,
+ "learning_rate": 1.9899815895435898e-06,
+ "loss": 0.2686,
+ "step": 246
+ },
+ {
+ "epoch": 1.8571428571428572,
+ "grad_norm": 0.1811665027080508,
+ "learning_rate": 1.9898053576280926e-06,
+ "loss": 0.2519,
+ "step": 247
+ },
+ {
+ "epoch": 1.8646616541353382,
+ "grad_norm": 0.18778991481804724,
+ "learning_rate": 1.9896275971073322e-06,
+ "loss": 0.2702,
+ "step": 248
+ },
+ {
+ "epoch": 1.8721804511278195,
+ "grad_norm": 0.1929686998333934,
+ "learning_rate": 1.9894483082558335e-06,
+ "loss": 0.2509,
+ "step": 249
+ },
+ {
+ "epoch": 1.8796992481203008,
+ "grad_norm": 0.19988897780462278,
+ "learning_rate": 1.9892674913504807e-06,
+ "loss": 0.2632,
+ "step": 250
+ },
+ {
+ "epoch": 1.8872180451127818,
+ "grad_norm": 0.2135272949499086,
+ "learning_rate": 1.9890851466705183e-06,
+ "loss": 0.2616,
+ "step": 251
+ },
+ {
+ "epoch": 1.8947368421052633,
+ "grad_norm": 0.1885097619872213,
+ "learning_rate": 1.9889012744975504e-06,
+ "loss": 0.2521,
+ "step": 252
+ },
+ {
+ "epoch": 1.9022556390977443,
+ "grad_norm": 0.19773142891867643,
+ "learning_rate": 1.98871587511554e-06,
+ "loss": 0.2567,
+ "step": 253
+ },
+ {
+ "epoch": 1.9097744360902256,
+ "grad_norm": 0.1934046132745007,
+ "learning_rate": 1.9885289488108084e-06,
+ "loss": 0.2625,
+ "step": 254
+ },
+ {
+ "epoch": 1.9172932330827068,
+ "grad_norm": 0.19211299974022242,
+ "learning_rate": 1.988340495872035e-06,
+ "loss": 0.2492,
+ "step": 255
+ },
+ {
+ "epoch": 1.9248120300751879,
+ "grad_norm": 0.18612405924697573,
+ "learning_rate": 1.9881505165902565e-06,
+ "loss": 0.2487,
+ "step": 256
+ },
+ {
+ "epoch": 1.9323308270676691,
+ "grad_norm": 0.19586456188438545,
+ "learning_rate": 1.987959011258868e-06,
+ "loss": 0.2626,
+ "step": 257
+ },
+ {
+ "epoch": 1.9398496240601504,
+ "grad_norm": 0.19750026688799172,
+ "learning_rate": 1.9877659801736203e-06,
+ "loss": 0.2506,
+ "step": 258
+ },
+ {
+ "epoch": 1.9473684210526314,
+ "grad_norm": 0.1977930124376195,
+ "learning_rate": 1.987571423632621e-06,
+ "loss": 0.2482,
+ "step": 259
+ },
+ {
+ "epoch": 1.954887218045113,
+ "grad_norm": 0.19755123509522227,
+ "learning_rate": 1.987375341936333e-06,
+ "loss": 0.2472,
+ "step": 260
+ },
+ {
+ "epoch": 1.962406015037594,
+ "grad_norm": 0.1934436185782905,
+ "learning_rate": 1.9871777353875756e-06,
+ "loss": 0.2534,
+ "step": 261
+ },
+ {
+ "epoch": 1.9699248120300752,
+ "grad_norm": 0.1905402158238489,
+ "learning_rate": 1.986978604291522e-06,
+ "loss": 0.248,
+ "step": 262
+ },
+ {
+ "epoch": 1.9774436090225564,
+ "grad_norm": 0.20646429528527394,
+ "learning_rate": 1.9867779489557003e-06,
+ "loss": 0.2581,
+ "step": 263
+ },
+ {
+ "epoch": 1.9849624060150375,
+ "grad_norm": 0.19810997312175682,
+ "learning_rate": 1.986575769689992e-06,
+ "loss": 0.2669,
+ "step": 264
+ },
+ {
+ "epoch": 1.9924812030075187,
+ "grad_norm": 0.19117815924623283,
+ "learning_rate": 1.9863720668066327e-06,
+ "loss": 0.245,
+ "step": 265
+ },
+ {
+ "epoch": 2.0,
+ "grad_norm": 0.18477240621943006,
+ "learning_rate": 1.986166840620211e-06,
+ "loss": 0.2469,
+ "step": 266
+ },
+ {
+ "epoch": 2.0,
+ "eval_loss": 0.24453890323638916,
+ "eval_runtime": 35.9095,
+ "eval_samples_per_second": 12.448,
+ "eval_steps_per_second": 0.195,
+ "step": 266
+ },
+ {
+ "epoch": 2.007518796992481,
+ "grad_norm": 0.1948008114554541,
+ "learning_rate": 1.985960091447668e-06,
+ "loss": 0.2493,
+ "step": 267
+ },
+ {
+ "epoch": 2.0150375939849625,
+ "grad_norm": 0.19797484169908672,
+ "learning_rate": 1.9857518196082962e-06,
+ "loss": 0.257,
+ "step": 268
+ },
+ {
+ "epoch": 2.0225563909774436,
+ "grad_norm": 0.2010564515546128,
+ "learning_rate": 1.9855420254237407e-06,
+ "loss": 0.2505,
+ "step": 269
+ },
+ {
+ "epoch": 2.030075187969925,
+ "grad_norm": 0.19774197949493058,
+ "learning_rate": 1.985330709217996e-06,
+ "loss": 0.2583,
+ "step": 270
+ },
+ {
+ "epoch": 2.037593984962406,
+ "grad_norm": 0.18911594143479074,
+ "learning_rate": 1.985117871317409e-06,
+ "loss": 0.2462,
+ "step": 271
+ },
+ {
+ "epoch": 2.045112781954887,
+ "grad_norm": 0.21714384131416362,
+ "learning_rate": 1.9849035120506753e-06,
+ "loss": 0.2494,
+ "step": 272
+ },
+ {
+ "epoch": 2.0526315789473686,
+ "grad_norm": 0.19890772117754288,
+ "learning_rate": 1.984687631748841e-06,
+ "loss": 0.2452,
+ "step": 273
+ },
+ {
+ "epoch": 2.0601503759398496,
+ "grad_norm": 0.1785093953920682,
+ "learning_rate": 1.9844702307453005e-06,
+ "loss": 0.2466,
+ "step": 274
+ },
+ {
+ "epoch": 2.0676691729323307,
+ "grad_norm": 0.18912053636888365,
+ "learning_rate": 1.9842513093757964e-06,
+ "loss": 0.2613,
+ "step": 275
+ },
+ {
+ "epoch": 2.075187969924812,
+ "grad_norm": 0.2013840799945818,
+ "learning_rate": 1.9840308679784207e-06,
+ "loss": 0.253,
+ "step": 276
+ },
+ {
+ "epoch": 2.082706766917293,
+ "grad_norm": 0.19786751459929638,
+ "learning_rate": 1.983808906893611e-06,
+ "loss": 0.241,
+ "step": 277
+ },
+ {
+ "epoch": 2.090225563909774,
+ "grad_norm": 0.1835596565103364,
+ "learning_rate": 1.9835854264641535e-06,
+ "loss": 0.2366,
+ "step": 278
+ },
+ {
+ "epoch": 2.0977443609022557,
+ "grad_norm": 0.19243452792707919,
+ "learning_rate": 1.9833604270351795e-06,
+ "loss": 0.2528,
+ "step": 279
+ },
+ {
+ "epoch": 2.1052631578947367,
+ "grad_norm": 0.18619148244759168,
+ "learning_rate": 1.983133908954167e-06,
+ "loss": 0.2404,
+ "step": 280
+ },
+ {
+ "epoch": 2.112781954887218,
+ "grad_norm": 0.19132976243530284,
+ "learning_rate": 1.982905872570939e-06,
+ "loss": 0.2434,
+ "step": 281
+ },
+ {
+ "epoch": 2.1203007518796992,
+ "grad_norm": 0.2092981245665805,
+ "learning_rate": 1.9826763182376634e-06,
+ "loss": 0.2546,
+ "step": 282
+ },
+ {
+ "epoch": 2.1278195488721803,
+ "grad_norm": 0.19548252067634406,
+ "learning_rate": 1.9824452463088522e-06,
+ "loss": 0.2591,
+ "step": 283
+ },
+ {
+ "epoch": 2.1353383458646618,
+ "grad_norm": 0.20077910225028664,
+ "learning_rate": 1.9822126571413612e-06,
+ "loss": 0.2467,
+ "step": 284
+ },
+ {
+ "epoch": 2.142857142857143,
+ "grad_norm": 0.19354333882554095,
+ "learning_rate": 1.9819785510943896e-06,
+ "loss": 0.2412,
+ "step": 285
+ },
+ {
+ "epoch": 2.1503759398496243,
+ "grad_norm": 0.18735997983531305,
+ "learning_rate": 1.981742928529478e-06,
+ "loss": 0.2462,
+ "step": 286
+ },
+ {
+ "epoch": 2.1578947368421053,
+ "grad_norm": 0.203412330559819,
+ "learning_rate": 1.9815057898105116e-06,
+ "loss": 0.2519,
+ "step": 287
+ },
+ {
+ "epoch": 2.1654135338345863,
+ "grad_norm": 0.1993182306648384,
+ "learning_rate": 1.9812671353037137e-06,
+ "loss": 0.2465,
+ "step": 288
+ },
+ {
+ "epoch": 2.172932330827068,
+ "grad_norm": 0.19498303197054473,
+ "learning_rate": 1.9810269653776514e-06,
+ "loss": 0.2442,
+ "step": 289
+ },
+ {
+ "epoch": 2.180451127819549,
+ "grad_norm": 0.18527310140716355,
+ "learning_rate": 1.98078528040323e-06,
+ "loss": 0.2355,
+ "step": 290
+ },
+ {
+ "epoch": 2.18796992481203,
+ "grad_norm": 0.20765673726844283,
+ "learning_rate": 1.980542080753697e-06,
+ "loss": 0.2556,
+ "step": 291
+ },
+ {
+ "epoch": 2.1954887218045114,
+ "grad_norm": 0.1966966125119359,
+ "learning_rate": 1.9802973668046363e-06,
+ "loss": 0.24,
+ "step": 292
+ },
+ {
+ "epoch": 2.2030075187969924,
+ "grad_norm": 0.19684047756908354,
+ "learning_rate": 1.980051138933972e-06,
+ "loss": 0.2463,
+ "step": 293
+ },
+ {
+ "epoch": 2.2105263157894735,
+ "grad_norm": 0.1903970339146079,
+ "learning_rate": 1.979803397521966e-06,
+ "loss": 0.2478,
+ "step": 294
+ },
+ {
+ "epoch": 2.218045112781955,
+ "grad_norm": 0.19086832503998216,
+ "learning_rate": 1.9795541429512175e-06,
+ "loss": 0.2443,
+ "step": 295
+ },
+ {
+ "epoch": 2.225563909774436,
+ "grad_norm": 0.1879361007770226,
+ "learning_rate": 1.979303375606663e-06,
+ "loss": 0.2492,
+ "step": 296
+ },
+ {
+ "epoch": 2.2330827067669174,
+ "grad_norm": 0.19569453226088854,
+ "learning_rate": 1.9790510958755754e-06,
+ "loss": 0.2421,
+ "step": 297
+ },
+ {
+ "epoch": 2.2406015037593985,
+ "grad_norm": 0.1998416826997302,
+ "learning_rate": 1.9787973041475616e-06,
+ "loss": 0.2503,
+ "step": 298
+ },
+ {
+ "epoch": 2.2481203007518795,
+ "grad_norm": 0.19230404290963685,
+ "learning_rate": 1.978542000814565e-06,
+ "loss": 0.2582,
+ "step": 299
+ },
+ {
+ "epoch": 2.255639097744361,
+ "grad_norm": 0.193150790009315,
+ "learning_rate": 1.9782851862708634e-06,
+ "loss": 0.2327,
+ "step": 300
+ },
+ {
+ "epoch": 2.263157894736842,
+ "grad_norm": 0.19532888545273452,
+ "learning_rate": 1.9780268609130676e-06,
+ "loss": 0.2384,
+ "step": 301
+ },
+ {
+ "epoch": 2.2706766917293235,
+ "grad_norm": 0.19133154060257193,
+ "learning_rate": 1.977767025140123e-06,
+ "loss": 0.2438,
+ "step": 302
+ },
+ {
+ "epoch": 2.2781954887218046,
+ "grad_norm": 0.19461738158177314,
+ "learning_rate": 1.9775056793533064e-06,
+ "loss": 0.2448,
+ "step": 303
+ },
+ {
+ "epoch": 2.2857142857142856,
+ "grad_norm": 0.19142197249688991,
+ "learning_rate": 1.9772428239562273e-06,
+ "loss": 0.2427,
+ "step": 304
+ },
+ {
+ "epoch": 2.293233082706767,
+ "grad_norm": 0.20033938933727843,
+ "learning_rate": 1.9769784593548257e-06,
+ "loss": 0.244,
+ "step": 305
+ },
+ {
+ "epoch": 2.300751879699248,
+ "grad_norm": 0.20310138750921547,
+ "learning_rate": 1.9767125859573733e-06,
+ "loss": 0.2425,
+ "step": 306
+ },
+ {
+ "epoch": 2.308270676691729,
+ "grad_norm": 0.20124765878284714,
+ "learning_rate": 1.9764452041744713e-06,
+ "loss": 0.2354,
+ "step": 307
+ },
+ {
+ "epoch": 2.3157894736842106,
+ "grad_norm": 0.20550655275651147,
+ "learning_rate": 1.976176314419051e-06,
+ "loss": 0.2511,
+ "step": 308
+ },
+ {
+ "epoch": 2.3233082706766917,
+ "grad_norm": 0.19326148461827464,
+ "learning_rate": 1.9759059171063714e-06,
+ "loss": 0.2478,
+ "step": 309
+ },
+ {
+ "epoch": 2.3308270676691727,
+ "grad_norm": 0.2068299338882467,
+ "learning_rate": 1.975634012654021e-06,
+ "loss": 0.2442,
+ "step": 310
+ },
+ {
+ "epoch": 2.338345864661654,
+ "grad_norm": 0.21252922752760434,
+ "learning_rate": 1.9753606014819155e-06,
+ "loss": 0.2648,
+ "step": 311
+ },
+ {
+ "epoch": 2.345864661654135,
+ "grad_norm": 0.2004766430243233,
+ "learning_rate": 1.9750856840122965e-06,
+ "loss": 0.244,
+ "step": 312
+ },
+ {
+ "epoch": 2.3533834586466167,
+ "grad_norm": 0.20506643860175405,
+ "learning_rate": 1.9748092606697327e-06,
+ "loss": 0.2596,
+ "step": 313
+ },
+ {
+ "epoch": 2.3609022556390977,
+ "grad_norm": 0.20379477268327423,
+ "learning_rate": 1.9745313318811194e-06,
+ "loss": 0.24,
+ "step": 314
+ },
+ {
+ "epoch": 2.3684210526315788,
+ "grad_norm": 0.19764901624853173,
+ "learning_rate": 1.974251898075674e-06,
+ "loss": 0.2396,
+ "step": 315
+ },
+ {
+ "epoch": 2.3759398496240602,
+ "grad_norm": 0.18802573792391444,
+ "learning_rate": 1.9739709596849416e-06,
+ "loss": 0.2487,
+ "step": 316
+ },
+ {
+ "epoch": 2.3834586466165413,
+ "grad_norm": 0.19211810044178282,
+ "learning_rate": 1.973688517142788e-06,
+ "loss": 0.2402,
+ "step": 317
+ },
+ {
+ "epoch": 2.3909774436090228,
+ "grad_norm": 0.20115776401859464,
+ "learning_rate": 1.9734045708854043e-06,
+ "loss": 0.2459,
+ "step": 318
+ },
+ {
+ "epoch": 2.398496240601504,
+ "grad_norm": 0.20558615879232137,
+ "learning_rate": 1.9731191213513014e-06,
+ "loss": 0.2398,
+ "step": 319
+ },
+ {
+ "epoch": 2.406015037593985,
+ "grad_norm": 0.1955905545602931,
+ "learning_rate": 1.9728321689813137e-06,
+ "loss": 0.2342,
+ "step": 320
+ },
+ {
+ "epoch": 2.4135338345864663,
+ "grad_norm": 0.2031263633373533,
+ "learning_rate": 1.9725437142185965e-06,
+ "loss": 0.2495,
+ "step": 321
+ },
+ {
+ "epoch": 2.4210526315789473,
+ "grad_norm": 0.19907026985984907,
+ "learning_rate": 1.972253757508624e-06,
+ "loss": 0.2442,
+ "step": 322
+ },
+ {
+ "epoch": 2.4285714285714284,
+ "grad_norm": 0.18587112837519565,
+ "learning_rate": 1.9719622992991907e-06,
+ "loss": 0.241,
+ "step": 323
+ },
+ {
+ "epoch": 2.43609022556391,
+ "grad_norm": 0.19983575891592678,
+ "learning_rate": 1.9716693400404097e-06,
+ "loss": 0.244,
+ "step": 324
+ },
+ {
+ "epoch": 2.443609022556391,
+ "grad_norm": 0.19733882492484098,
+ "learning_rate": 1.9713748801847136e-06,
+ "loss": 0.2459,
+ "step": 325
+ },
+ {
+ "epoch": 2.451127819548872,
+ "grad_norm": 0.19546242427017047,
+ "learning_rate": 1.97107892018685e-06,
+ "loss": 0.2432,
+ "step": 326
+ },
+ {
+ "epoch": 2.4586466165413534,
+ "grad_norm": 0.20384209666941736,
+ "learning_rate": 1.970781460503885e-06,
+ "loss": 0.2422,
+ "step": 327
+ },
+ {
+ "epoch": 2.4661654135338344,
+ "grad_norm": 0.19984975367786856,
+ "learning_rate": 1.9704825015952003e-06,
+ "loss": 0.243,
+ "step": 328
+ },
+ {
+ "epoch": 2.473684210526316,
+ "grad_norm": 0.19116945247018327,
+ "learning_rate": 1.970182043922493e-06,
+ "loss": 0.2508,
+ "step": 329
+ },
+ {
+ "epoch": 2.481203007518797,
+ "grad_norm": 0.1970731214269339,
+ "learning_rate": 1.9698800879497745e-06,
+ "loss": 0.2435,
+ "step": 330
+ },
+ {
+ "epoch": 2.488721804511278,
+ "grad_norm": 0.1853378428940598,
+ "learning_rate": 1.96957663414337e-06,
+ "loss": 0.2472,
+ "step": 331
+ },
+ {
+ "epoch": 2.4962406015037595,
+ "grad_norm": 0.19009533727475947,
+ "learning_rate": 1.9692716829719194e-06,
+ "loss": 0.2465,
+ "step": 332
+ },
+ {
+ "epoch": 2.5037593984962405,
+ "grad_norm": 0.20268063346879422,
+ "learning_rate": 1.9689652349063723e-06,
+ "loss": 0.2475,
+ "step": 333
+ },
+ {
+ "epoch": 2.511278195488722,
+ "grad_norm": 0.18865242070505836,
+ "learning_rate": 1.9686572904199926e-06,
+ "loss": 0.2359,
+ "step": 334
+ },
+ {
+ "epoch": 2.518796992481203,
+ "grad_norm": 0.19287057992216802,
+ "learning_rate": 1.9683478499883537e-06,
+ "loss": 0.2311,
+ "step": 335
+ },
+ {
+ "epoch": 2.526315789473684,
+ "grad_norm": 0.1967340604072739,
+ "learning_rate": 1.9680369140893403e-06,
+ "loss": 0.2445,
+ "step": 336
+ },
+ {
+ "epoch": 2.5338345864661656,
+ "grad_norm": 0.20654413738617766,
+ "learning_rate": 1.9677244832031454e-06,
+ "loss": 0.2562,
+ "step": 337
+ },
+ {
+ "epoch": 2.5413533834586466,
+ "grad_norm": 0.19186423256068216,
+ "learning_rate": 1.9674105578122716e-06,
+ "loss": 0.231,
+ "step": 338
+ },
+ {
+ "epoch": 2.548872180451128,
+ "grad_norm": 0.19552758394600714,
+ "learning_rate": 1.9670951384015297e-06,
+ "loss": 0.2338,
+ "step": 339
+ },
+ {
+ "epoch": 2.556390977443609,
+ "grad_norm": 0.182132993161689,
+ "learning_rate": 1.9667782254580374e-06,
+ "loss": 0.2479,
+ "step": 340
+ },
+ {
+ "epoch": 2.56390977443609,
+ "grad_norm": 0.1915633975237416,
+ "learning_rate": 1.966459819471218e-06,
+ "loss": 0.2355,
+ "step": 341
+ },
+ {
+ "epoch": 2.571428571428571,
+ "grad_norm": 0.19457984215684543,
+ "learning_rate": 1.9661399209328027e-06,
+ "loss": 0.2461,
+ "step": 342
+ },
+ {
+ "epoch": 2.5789473684210527,
+ "grad_norm": 0.1950235701772267,
+ "learning_rate": 1.965818530336827e-06,
+ "loss": 0.2397,
+ "step": 343
+ },
+ {
+ "epoch": 2.5864661654135337,
+ "grad_norm": 0.19445938265267243,
+ "learning_rate": 1.965495648179629e-06,
+ "loss": 0.2443,
+ "step": 344
+ },
+ {
+ "epoch": 2.593984962406015,
+ "grad_norm": 0.18992872952140813,
+ "learning_rate": 1.9651712749598523e-06,
+ "loss": 0.2356,
+ "step": 345
+ },
+ {
+ "epoch": 2.601503759398496,
+ "grad_norm": 0.21283317045968791,
+ "learning_rate": 1.9648454111784418e-06,
+ "loss": 0.252,
+ "step": 346
+ },
+ {
+ "epoch": 2.6090225563909772,
+ "grad_norm": 0.1958070954179829,
+ "learning_rate": 1.964518057338646e-06,
+ "loss": 0.241,
+ "step": 347
+ },
+ {
+ "epoch": 2.6165413533834587,
+ "grad_norm": 0.18985442853305448,
+ "learning_rate": 1.964189213946013e-06,
+ "loss": 0.2309,
+ "step": 348
+ },
+ {
+ "epoch": 2.6240601503759398,
+ "grad_norm": 0.19847111440281454,
+ "learning_rate": 1.963858881508392e-06,
+ "loss": 0.2319,
+ "step": 349
+ },
+ {
+ "epoch": 2.6315789473684212,
+ "grad_norm": 0.19725056461549087,
+ "learning_rate": 1.9635270605359315e-06,
+ "loss": 0.2414,
+ "step": 350
+ },
+ {
+ "epoch": 2.6390977443609023,
+ "grad_norm": 0.19130379959307953,
+ "learning_rate": 1.963193751541079e-06,
+ "loss": 0.2413,
+ "step": 351
+ },
+ {
+ "epoch": 2.6466165413533833,
+ "grad_norm": 0.19547347809242288,
+ "learning_rate": 1.962858955038581e-06,
+ "loss": 0.2427,
+ "step": 352
+ },
+ {
+ "epoch": 2.654135338345865,
+ "grad_norm": 0.19622877217607582,
+ "learning_rate": 1.9625226715454787e-06,
+ "loss": 0.25,
+ "step": 353
+ },
+ {
+ "epoch": 2.661654135338346,
+ "grad_norm": 0.19655766224710372,
+ "learning_rate": 1.9621849015811122e-06,
+ "loss": 0.2412,
+ "step": 354
+ },
+ {
+ "epoch": 2.6691729323308273,
+ "grad_norm": 0.20744169295367576,
+ "learning_rate": 1.9618456456671163e-06,
+ "loss": 0.2457,
+ "step": 355
+ },
+ {
+ "epoch": 2.6766917293233083,
+ "grad_norm": 0.1966150744959704,
+ "learning_rate": 1.9615049043274204e-06,
+ "loss": 0.2403,
+ "step": 356
+ },
+ {
+ "epoch": 2.6842105263157894,
+ "grad_norm": 0.1987303234032689,
+ "learning_rate": 1.9611626780882484e-06,
+ "loss": 0.2483,
+ "step": 357
+ },
+ {
+ "epoch": 2.6917293233082704,
+ "grad_norm": 0.19477501864212735,
+ "learning_rate": 1.960818967478117e-06,
+ "loss": 0.2336,
+ "step": 358
+ },
+ {
+ "epoch": 2.699248120300752,
+ "grad_norm": 0.20151474667032387,
+ "learning_rate": 1.9604737730278354e-06,
+ "loss": 0.2436,
+ "step": 359
+ },
+ {
+ "epoch": 2.706766917293233,
+ "grad_norm": 0.19492124914672437,
+ "learning_rate": 1.960127095270505e-06,
+ "loss": 0.2474,
+ "step": 360
+ },
+ {
+ "epoch": 2.7142857142857144,
+ "grad_norm": 0.2224145296805687,
+ "learning_rate": 1.9597789347415167e-06,
+ "loss": 0.2594,
+ "step": 361
+ },
+ {
+ "epoch": 2.7218045112781954,
+ "grad_norm": 0.2111889607814855,
+ "learning_rate": 1.959429291978552e-06,
+ "loss": 0.2419,
+ "step": 362
+ },
+ {
+ "epoch": 2.7293233082706765,
+ "grad_norm": 0.1989768479824121,
+ "learning_rate": 1.959078167521582e-06,
+ "loss": 0.2553,
+ "step": 363
+ },
+ {
+ "epoch": 2.736842105263158,
+ "grad_norm": 0.1942836722164412,
+ "learning_rate": 1.9587255619128646e-06,
+ "loss": 0.2379,
+ "step": 364
+ },
+ {
+ "epoch": 2.744360902255639,
+ "grad_norm": 0.2126741293179537,
+ "learning_rate": 1.9583714756969473e-06,
+ "loss": 0.2358,
+ "step": 365
+ },
+ {
+ "epoch": 2.7518796992481205,
+ "grad_norm": 0.2082438507030802,
+ "learning_rate": 1.9580159094206617e-06,
+ "loss": 0.2486,
+ "step": 366
+ },
+ {
+ "epoch": 2.7593984962406015,
+ "grad_norm": 0.19330234250031067,
+ "learning_rate": 1.9576588636331273e-06,
+ "loss": 0.2404,
+ "step": 367
+ },
+ {
+ "epoch": 2.7669172932330826,
+ "grad_norm": 0.19654323086419412,
+ "learning_rate": 1.9573003388857475e-06,
+ "loss": 0.24,
+ "step": 368
+ },
+ {
+ "epoch": 2.774436090225564,
+ "grad_norm": 0.22092551249845724,
+ "learning_rate": 1.956940335732209e-06,
+ "loss": 0.2389,
+ "step": 369
+ },
+ {
+ "epoch": 2.781954887218045,
+ "grad_norm": 0.18559889990850353,
+ "learning_rate": 1.9565788547284824e-06,
+ "loss": 0.2418,
+ "step": 370
+ },
+ {
+ "epoch": 2.7894736842105265,
+ "grad_norm": 0.20787538793216276,
+ "learning_rate": 1.956215896432822e-06,
+ "loss": 0.2361,
+ "step": 371
+ },
+ {
+ "epoch": 2.7969924812030076,
+ "grad_norm": 0.20375467225778632,
+ "learning_rate": 1.9558514614057607e-06,
+ "loss": 0.2312,
+ "step": 372
+ },
+ {
+ "epoch": 2.8045112781954886,
+ "grad_norm": 0.18715731048639028,
+ "learning_rate": 1.955485550210114e-06,
+ "loss": 0.2398,
+ "step": 373
+ },
+ {
+ "epoch": 2.8120300751879697,
+ "grad_norm": 0.20936755513939143,
+ "learning_rate": 1.955118163410977e-06,
+ "loss": 0.2384,
+ "step": 374
+ },
+ {
+ "epoch": 2.819548872180451,
+ "grad_norm": 0.21169834455256317,
+ "learning_rate": 1.9547493015757233e-06,
+ "loss": 0.2439,
+ "step": 375
+ },
+ {
+ "epoch": 2.827067669172932,
+ "grad_norm": 0.20946134256340637,
+ "learning_rate": 1.954378965274004e-06,
+ "loss": 0.2485,
+ "step": 376
+ },
+ {
+ "epoch": 2.8345864661654137,
+ "grad_norm": 0.20207397510844535,
+ "learning_rate": 1.9540071550777475e-06,
+ "loss": 0.2362,
+ "step": 377
+ },
+ {
+ "epoch": 2.8421052631578947,
+ "grad_norm": 0.21118588081226847,
+ "learning_rate": 1.9536338715611593e-06,
+ "loss": 0.2335,
+ "step": 378
+ },
+ {
+ "epoch": 2.8496240601503757,
+ "grad_norm": 0.20278139229070902,
+ "learning_rate": 1.953259115300719e-06,
+ "loss": 0.2422,
+ "step": 379
+ },
+ {
+ "epoch": 2.857142857142857,
+ "grad_norm": 0.21158310631932312,
+ "learning_rate": 1.9528828868751815e-06,
+ "loss": 0.2353,
+ "step": 380
+ },
+ {
+ "epoch": 2.8646616541353382,
+ "grad_norm": 0.21819443430561322,
+ "learning_rate": 1.9525051868655753e-06,
+ "loss": 0.2531,
+ "step": 381
+ },
+ {
+ "epoch": 2.8721804511278197,
+ "grad_norm": 0.19927017841336564,
+ "learning_rate": 1.9521260158552004e-06,
+ "loss": 0.2485,
+ "step": 382
+ },
+ {
+ "epoch": 2.8796992481203008,
+ "grad_norm": 0.20137909059684517,
+ "learning_rate": 1.9517453744296294e-06,
+ "loss": 0.2369,
+ "step": 383
+ },
+ {
+ "epoch": 2.887218045112782,
+ "grad_norm": 0.20118967634633347,
+ "learning_rate": 1.9513632631767062e-06,
+ "loss": 0.2451,
+ "step": 384
+ },
+ {
+ "epoch": 2.8947368421052633,
+ "grad_norm": 0.19305958750055985,
+ "learning_rate": 1.9509796826865433e-06,
+ "loss": 0.2417,
+ "step": 385
+ },
+ {
+ "epoch": 2.9022556390977443,
+ "grad_norm": 0.19545854301379467,
+ "learning_rate": 1.950594633551524e-06,
+ "loss": 0.246,
+ "step": 386
+ },
+ {
+ "epoch": 2.909774436090226,
+ "grad_norm": 0.19511566760558136,
+ "learning_rate": 1.950208116366298e-06,
+ "loss": 0.2405,
+ "step": 387
+ },
+ {
+ "epoch": 2.917293233082707,
+ "grad_norm": 0.20379181287518927,
+ "learning_rate": 1.949820131727783e-06,
+ "loss": 0.2368,
+ "step": 388
+ },
+ {
+ "epoch": 2.924812030075188,
+ "grad_norm": 0.20261322659803302,
+ "learning_rate": 1.949430680235162e-06,
+ "loss": 0.2382,
+ "step": 389
+ },
+ {
+ "epoch": 2.932330827067669,
+ "grad_norm": 0.20292716905169822,
+ "learning_rate": 1.9490397624898857e-06,
+ "loss": 0.2391,
+ "step": 390
+ },
+ {
+ "epoch": 2.9398496240601504,
+ "grad_norm": 0.20543165871441496,
+ "learning_rate": 1.9486473790956668e-06,
+ "loss": 0.2527,
+ "step": 391
+ },
+ {
+ "epoch": 2.9473684210526314,
+ "grad_norm": 0.19761313315033452,
+ "learning_rate": 1.9482535306584824e-06,
+ "loss": 0.2455,
+ "step": 392
+ },
+ {
+ "epoch": 2.954887218045113,
+ "grad_norm": 0.18253307216617667,
+ "learning_rate": 1.947858217786572e-06,
+ "loss": 0.2382,
+ "step": 393
+ },
+ {
+ "epoch": 2.962406015037594,
+ "grad_norm": 0.2137243032720428,
+ "learning_rate": 1.947461441090437e-06,
+ "loss": 0.2439,
+ "step": 394
+ },
+ {
+ "epoch": 2.969924812030075,
+ "grad_norm": 0.20600501066171836,
+ "learning_rate": 1.9470632011828395e-06,
+ "loss": 0.2468,
+ "step": 395
+ },
+ {
+ "epoch": 2.9774436090225564,
+ "grad_norm": 0.2062922907756319,
+ "learning_rate": 1.9466634986788002e-06,
+ "loss": 0.2314,
+ "step": 396
+ },
+ {
+ "epoch": 2.9849624060150375,
+ "grad_norm": 0.19102357358556024,
+ "learning_rate": 1.9462623341956005e-06,
+ "loss": 0.2438,
+ "step": 397
+ },
+ {
+ "epoch": 2.992481203007519,
+ "grad_norm": 0.19527642295935607,
+ "learning_rate": 1.945859708352777e-06,
+ "loss": 0.2398,
+ "step": 398
+ },
+ {
+ "epoch": 3.0,
+ "grad_norm": 0.21461114451132063,
+ "learning_rate": 1.945455621772126e-06,
+ "loss": 0.2449,
+ "step": 399
+ },
+ {
+ "epoch": 3.0,
+ "eval_loss": 0.23847579956054688,
+ "eval_runtime": 36.2594,
+ "eval_samples_per_second": 12.328,
+ "eval_steps_per_second": 0.193,
+ "step": 399
+ },
+ {
+ "epoch": 3.007518796992481,
+ "grad_norm": 0.22735719083546027,
+ "learning_rate": 1.9450500750776984e-06,
+ "loss": 0.2347,
+ "step": 400
+ },
+ {
+ "epoch": 3.0150375939849625,
+ "grad_norm": 0.18891434245058275,
+ "learning_rate": 1.9446430688957987e-06,
+ "loss": 0.2247,
+ "step": 401
+ },
+ {
+ "epoch": 3.0225563909774436,
+ "grad_norm": 0.21985837658809348,
+ "learning_rate": 1.944234603854988e-06,
+ "loss": 0.2302,
+ "step": 402
+ },
+ {
+ "epoch": 3.030075187969925,
+ "grad_norm": 0.21657735471044617,
+ "learning_rate": 1.9438246805860783e-06,
+ "loss": 0.2289,
+ "step": 403
+ },
+ {
+ "epoch": 3.037593984962406,
+ "grad_norm": 0.19615964239445943,
+ "learning_rate": 1.9434132997221345e-06,
+ "loss": 0.235,
+ "step": 404
+ },
+ {
+ "epoch": 3.045112781954887,
+ "grad_norm": 0.20453980754007275,
+ "learning_rate": 1.943000461898472e-06,
+ "loss": 0.2309,
+ "step": 405
+ },
+ {
+ "epoch": 3.0526315789473686,
+ "grad_norm": 0.2164744222059578,
+ "learning_rate": 1.9425861677526575e-06,
+ "loss": 0.2256,
+ "step": 406
+ },
+ {
+ "epoch": 3.0601503759398496,
+ "grad_norm": 0.19404821724439614,
+ "learning_rate": 1.942170417924505e-06,
+ "loss": 0.2439,
+ "step": 407
+ },
+ {
+ "epoch": 3.0676691729323307,
+ "grad_norm": 0.20948019576623994,
+ "learning_rate": 1.941753213056078e-06,
+ "loss": 0.2353,
+ "step": 408
+ },
+ {
+ "epoch": 3.075187969924812,
+ "grad_norm": 0.2206945217510342,
+ "learning_rate": 1.9413345537916864e-06,
+ "loss": 0.2249,
+ "step": 409
+ },
+ {
+ "epoch": 3.082706766917293,
+ "grad_norm": 0.19222656923802034,
+ "learning_rate": 1.9409144407778865e-06,
+ "loss": 0.2209,
+ "step": 410
+ },
+ {
+ "epoch": 3.090225563909774,
+ "grad_norm": 0.20914493940502554,
+ "learning_rate": 1.9404928746634793e-06,
+ "loss": 0.237,
+ "step": 411
+ },
+ {
+ "epoch": 3.0977443609022557,
+ "grad_norm": 0.2019554770052209,
+ "learning_rate": 1.94006985609951e-06,
+ "loss": 0.2394,
+ "step": 412
+ },
+ {
+ "epoch": 3.1052631578947367,
+ "grad_norm": 0.19776293093069106,
+ "learning_rate": 1.9396453857392677e-06,
+ "loss": 0.2371,
+ "step": 413
+ },
+ {
+ "epoch": 3.112781954887218,
+ "grad_norm": 0.19627698094833446,
+ "learning_rate": 1.9392194642382825e-06,
+ "loss": 0.2199,
+ "step": 414
+ },
+ {
+ "epoch": 3.1203007518796992,
+ "grad_norm": 0.1945281108418733,
+ "learning_rate": 1.938792092254326e-06,
+ "loss": 0.2229,
+ "step": 415
+ },
+ {
+ "epoch": 3.1278195488721803,
+ "grad_norm": 0.2092316132536349,
+ "learning_rate": 1.9383632704474103e-06,
+ "loss": 0.2249,
+ "step": 416
+ },
+ {
+ "epoch": 3.1353383458646618,
+ "grad_norm": 0.20211635419042479,
+ "learning_rate": 1.9379329994797854e-06,
+ "loss": 0.2311,
+ "step": 417
+ },
+ {
+ "epoch": 3.142857142857143,
+ "grad_norm": 0.2023756264068621,
+ "learning_rate": 1.9375012800159404e-06,
+ "loss": 0.2276,
+ "step": 418
+ },
+ {
+ "epoch": 3.1503759398496243,
+ "grad_norm": 0.22462651224010444,
+ "learning_rate": 1.9370681127226004e-06,
+ "loss": 0.2278,
+ "step": 419
+ },
+ {
+ "epoch": 3.1578947368421053,
+ "grad_norm": 0.19703184913444993,
+ "learning_rate": 1.936633498268728e-06,
+ "loss": 0.2409,
+ "step": 420
+ },
+ {
+ "epoch": 3.1654135338345863,
+ "grad_norm": 0.19238575687266218,
+ "learning_rate": 1.9361974373255187e-06,
+ "loss": 0.2308,
+ "step": 421
+ },
+ {
+ "epoch": 3.172932330827068,
+ "grad_norm": 0.21558877289384376,
+ "learning_rate": 1.935759930566404e-06,
+ "loss": 0.238,
+ "step": 422
+ },
+ {
+ "epoch": 3.180451127819549,
+ "grad_norm": 0.19182516643861944,
+ "learning_rate": 1.9353209786670465e-06,
+ "loss": 0.2287,
+ "step": 423
+ },
+ {
+ "epoch": 3.18796992481203,
+ "grad_norm": 0.1928675430940991,
+ "learning_rate": 1.934880582305341e-06,
+ "loss": 0.2371,
+ "step": 424
+ },
+ {
+ "epoch": 3.1954887218045114,
+ "grad_norm": 0.19786049021082694,
+ "learning_rate": 1.934438742161414e-06,
+ "loss": 0.2472,
+ "step": 425
+ },
+ {
+ "epoch": 3.2030075187969924,
+ "grad_norm": 0.20856651124858802,
+ "learning_rate": 1.933995458917621e-06,
+ "loss": 0.2377,
+ "step": 426
+ },
+ {
+ "epoch": 3.2105263157894735,
+ "grad_norm": 0.20006020643964978,
+ "learning_rate": 1.933550733258546e-06,
+ "loss": 0.2346,
+ "step": 427
+ },
+ {
+ "epoch": 3.218045112781955,
+ "grad_norm": 0.1965923809488627,
+ "learning_rate": 1.9331045658710007e-06,
+ "loss": 0.229,
+ "step": 428
+ },
+ {
+ "epoch": 3.225563909774436,
+ "grad_norm": 0.1940679620814447,
+ "learning_rate": 1.9326569574440237e-06,
+ "loss": 0.2308,
+ "step": 429
+ },
+ {
+ "epoch": 3.2330827067669174,
+ "grad_norm": 0.20133312649896476,
+ "learning_rate": 1.9322079086688784e-06,
+ "loss": 0.235,
+ "step": 430
+ },
+ {
+ "epoch": 3.2406015037593985,
+ "grad_norm": 0.2068441599247633,
+ "learning_rate": 1.931757420239053e-06,
+ "loss": 0.2302,
+ "step": 431
+ },
+ {
+ "epoch": 3.2481203007518795,
+ "grad_norm": 0.19669320941153315,
+ "learning_rate": 1.9313054928502594e-06,
+ "loss": 0.228,
+ "step": 432
+ },
+ {
+ "epoch": 3.255639097744361,
+ "grad_norm": 0.18777182229960168,
+ "learning_rate": 1.930852127200431e-06,
+ "loss": 0.2225,
+ "step": 433
+ },
+ {
+ "epoch": 3.263157894736842,
+ "grad_norm": 0.19464605167755833,
+ "learning_rate": 1.930397323989723e-06,
+ "loss": 0.2299,
+ "step": 434
+ },
+ {
+ "epoch": 3.2706766917293235,
+ "grad_norm": 0.1991922099054772,
+ "learning_rate": 1.9299410839205105e-06,
+ "loss": 0.2283,
+ "step": 435
+ },
+ {
+ "epoch": 3.2781954887218046,
+ "grad_norm": 0.20270645065732054,
+ "learning_rate": 1.9294834076973868e-06,
+ "loss": 0.2273,
+ "step": 436
+ },
+ {
+ "epoch": 3.2857142857142856,
+ "grad_norm": 0.2031929180409059,
+ "learning_rate": 1.929024296027165e-06,
+ "loss": 0.2224,
+ "step": 437
+ },
+ {
+ "epoch": 3.293233082706767,
+ "grad_norm": 0.2017846878027564,
+ "learning_rate": 1.9285637496188733e-06,
+ "loss": 0.2353,
+ "step": 438
+ },
+ {
+ "epoch": 3.300751879699248,
+ "grad_norm": 0.2041345857649578,
+ "learning_rate": 1.9281017691837564e-06,
+ "loss": 0.2386,
+ "step": 439
+ },
+ {
+ "epoch": 3.308270676691729,
+ "grad_norm": 0.20186285019685934,
+ "learning_rate": 1.927638355435273e-06,
+ "loss": 0.2269,
+ "step": 440
+ },
+ {
+ "epoch": 3.3157894736842106,
+ "grad_norm": 0.20068602052236392,
+ "learning_rate": 1.9271735090890967e-06,
+ "loss": 0.2187,
+ "step": 441
+ },
+ {
+ "epoch": 3.3233082706766917,
+ "grad_norm": 0.19504317681861305,
+ "learning_rate": 1.926707230863112e-06,
+ "loss": 0.2235,
+ "step": 442
+ },
+ {
+ "epoch": 3.3308270676691727,
+ "grad_norm": 0.19858327125938505,
+ "learning_rate": 1.9262395214774157e-06,
+ "loss": 0.2291,
+ "step": 443
+ },
+ {
+ "epoch": 3.338345864661654,
+ "grad_norm": 0.2015463700142236,
+ "learning_rate": 1.925770381654314e-06,
+ "loss": 0.2395,
+ "step": 444
+ },
+ {
+ "epoch": 3.345864661654135,
+ "grad_norm": 0.21147368037353234,
+ "learning_rate": 1.9252998121183235e-06,
+ "loss": 0.2373,
+ "step": 445
+ },
+ {
+ "epoch": 3.3533834586466167,
+ "grad_norm": 0.19037417110654814,
+ "learning_rate": 1.9248278135961674e-06,
+ "loss": 0.2303,
+ "step": 446
+ },
+ {
+ "epoch": 3.3609022556390977,
+ "grad_norm": 0.19974020168352363,
+ "learning_rate": 1.9243543868167766e-06,
+ "loss": 0.2325,
+ "step": 447
+ },
+ {
+ "epoch": 3.3684210526315788,
+ "grad_norm": 0.2028392144021246,
+ "learning_rate": 1.9238795325112867e-06,
+ "loss": 0.2292,
+ "step": 448
+ },
+ {
+ "epoch": 3.3759398496240602,
+ "grad_norm": 0.19213917980687195,
+ "learning_rate": 1.9234032514130392e-06,
+ "loss": 0.2327,
+ "step": 449
+ },
+ {
+ "epoch": 3.3834586466165413,
+ "grad_norm": 0.20049336717854135,
+ "learning_rate": 1.922925544257579e-06,
+ "loss": 0.2311,
+ "step": 450
+ },
+ {
+ "epoch": 3.3909774436090228,
+ "grad_norm": 0.19482211839870706,
+ "learning_rate": 1.922446411782652e-06,
+ "loss": 0.2382,
+ "step": 451
+ },
+ {
+ "epoch": 3.398496240601504,
+ "grad_norm": 0.20253576740427545,
+ "learning_rate": 1.9219658547282065e-06,
+ "loss": 0.2239,
+ "step": 452
+ },
+ {
+ "epoch": 3.406015037593985,
+ "grad_norm": 0.19026278826323273,
+ "learning_rate": 1.9214838738363904e-06,
+ "loss": 0.2287,
+ "step": 453
+ },
+ {
+ "epoch": 3.4135338345864663,
+ "grad_norm": 0.22130558597435887,
+ "learning_rate": 1.921000469851551e-06,
+ "loss": 0.2325,
+ "step": 454
+ },
+ {
+ "epoch": 3.4210526315789473,
+ "grad_norm": 0.1987435946782899,
+ "learning_rate": 1.920515643520232e-06,
+ "loss": 0.2246,
+ "step": 455
+ },
+ {
+ "epoch": 3.4285714285714284,
+ "grad_norm": 0.20238395083094632,
+ "learning_rate": 1.9200293955911755e-06,
+ "loss": 0.2265,
+ "step": 456
+ },
+ {
+ "epoch": 3.43609022556391,
+ "grad_norm": 0.20409263494664734,
+ "learning_rate": 1.919541726815318e-06,
+ "loss": 0.2265,
+ "step": 457
+ },
+ {
+ "epoch": 3.443609022556391,
+ "grad_norm": 0.20956264391253082,
+ "learning_rate": 1.91905263794579e-06,
+ "loss": 0.2289,
+ "step": 458
+ },
+ {
+ "epoch": 3.451127819548872,
+ "grad_norm": 0.19012930628084693,
+ "learning_rate": 1.9185621297379155e-06,
+ "loss": 0.2236,
+ "step": 459
+ },
+ {
+ "epoch": 3.4586466165413534,
+ "grad_norm": 0.21110107730979777,
+ "learning_rate": 1.9180702029492114e-06,
+ "loss": 0.2277,
+ "step": 460
+ },
+ {
+ "epoch": 3.4661654135338344,
+ "grad_norm": 0.19652367618135355,
+ "learning_rate": 1.9175768583393843e-06,
+ "loss": 0.2294,
+ "step": 461
+ },
+ {
+ "epoch": 3.473684210526316,
+ "grad_norm": 0.19566357753232413,
+ "learning_rate": 1.9170820966703297e-06,
+ "loss": 0.2301,
+ "step": 462
+ },
+ {
+ "epoch": 3.481203007518797,
+ "grad_norm": 0.1934754478735947,
+ "learning_rate": 1.9165859187061336e-06,
+ "loss": 0.2383,
+ "step": 463
+ },
+ {
+ "epoch": 3.488721804511278,
+ "grad_norm": 0.20625030926956103,
+ "learning_rate": 1.9160883252130674e-06,
+ "loss": 0.2307,
+ "step": 464
+ },
+ {
+ "epoch": 3.4962406015037595,
+ "grad_norm": 0.20522599066242414,
+ "learning_rate": 1.9155893169595898e-06,
+ "loss": 0.2267,
+ "step": 465
+ },
+ {
+ "epoch": 3.5037593984962405,
+ "grad_norm": 0.2024423545702658,
+ "learning_rate": 1.9150888947163436e-06,
+ "loss": 0.2318,
+ "step": 466
+ },
+ {
+ "epoch": 3.511278195488722,
+ "grad_norm": 0.1985134265274267,
+ "learning_rate": 1.914587059256155e-06,
+ "loss": 0.2253,
+ "step": 467
+ },
+ {
+ "epoch": 3.518796992481203,
+ "grad_norm": 0.20612236542406084,
+ "learning_rate": 1.9140838113540346e-06,
+ "loss": 0.2325,
+ "step": 468
+ },
+ {
+ "epoch": 3.526315789473684,
+ "grad_norm": 0.20814462686729437,
+ "learning_rate": 1.913579151787172e-06,
+ "loss": 0.2282,
+ "step": 469
+ },
+ {
+ "epoch": 3.5338345864661656,
+ "grad_norm": 0.21153779781802692,
+ "learning_rate": 1.913073081334938e-06,
+ "loss": 0.2338,
+ "step": 470
+ },
+ {
+ "epoch": 3.5413533834586466,
+ "grad_norm": 0.19712395892652476,
+ "learning_rate": 1.912565600778882e-06,
+ "loss": 0.2221,
+ "step": 471
+ },
+ {
+ "epoch": 3.548872180451128,
+ "grad_norm": 0.202510369785287,
+ "learning_rate": 1.912056710902732e-06,
+ "loss": 0.2297,
+ "step": 472
+ },
+ {
+ "epoch": 3.556390977443609,
+ "grad_norm": 0.19753558910028685,
+ "learning_rate": 1.911546412492391e-06,
+ "loss": 0.2344,
+ "step": 473
+ },
+ {
+ "epoch": 3.56390977443609,
+ "grad_norm": 0.1998927422644448,
+ "learning_rate": 1.9110347063359382e-06,
+ "loss": 0.221,
+ "step": 474
+ },
+ {
+ "epoch": 3.571428571428571,
+ "grad_norm": 0.20486802965738626,
+ "learning_rate": 1.910521593223627e-06,
+ "loss": 0.2424,
+ "step": 475
+ },
+ {
+ "epoch": 3.5789473684210527,
+ "grad_norm": 0.20429814315423317,
+ "learning_rate": 1.910007073947883e-06,
+ "loss": 0.2277,
+ "step": 476
+ },
+ {
+ "epoch": 3.5864661654135337,
+ "grad_norm": 0.1908701028091995,
+ "learning_rate": 1.9094911493033035e-06,
+ "loss": 0.2186,
+ "step": 477
+ },
+ {
+ "epoch": 3.593984962406015,
+ "grad_norm": 0.19369743784407595,
+ "learning_rate": 1.908973820086657e-06,
+ "loss": 0.2397,
+ "step": 478
+ },
+ {
+ "epoch": 3.601503759398496,
+ "grad_norm": 0.201306298613633,
+ "learning_rate": 1.9084550870968805e-06,
+ "loss": 0.2267,
+ "step": 479
+ },
+ {
+ "epoch": 3.6090225563909772,
+ "grad_norm": 0.19541201658559798,
+ "learning_rate": 1.9079349511350783e-06,
+ "loss": 0.2274,
+ "step": 480
+ },
+ {
+ "epoch": 3.6165413533834587,
+ "grad_norm": 0.20992843504460798,
+ "learning_rate": 1.9074134130045223e-06,
+ "loss": 0.2256,
+ "step": 481
+ },
+ {
+ "epoch": 3.6240601503759398,
+ "grad_norm": 0.19527139585360248,
+ "learning_rate": 1.9068904735106499e-06,
+ "loss": 0.2306,
+ "step": 482
+ },
+ {
+ "epoch": 3.6315789473684212,
+ "grad_norm": 0.20063127762768995,
+ "learning_rate": 1.9063661334610622e-06,
+ "loss": 0.2332,
+ "step": 483
+ },
+ {
+ "epoch": 3.6390977443609023,
+ "grad_norm": 0.19794549514272364,
+ "learning_rate": 1.9058403936655232e-06,
+ "loss": 0.225,
+ "step": 484
+ },
+ {
+ "epoch": 3.6466165413533833,
+ "grad_norm": 0.20663431349104816,
+ "learning_rate": 1.905313254935959e-06,
+ "loss": 0.2344,
+ "step": 485
+ },
+ {
+ "epoch": 3.654135338345865,
+ "grad_norm": 0.19591344752071216,
+ "learning_rate": 1.9047847180864558e-06,
+ "loss": 0.2254,
+ "step": 486
+ },
+ {
+ "epoch": 3.661654135338346,
+ "grad_norm": 0.19128603941706102,
+ "learning_rate": 1.9042547839332595e-06,
+ "loss": 0.2265,
+ "step": 487
+ },
+ {
+ "epoch": 3.6691729323308273,
+ "grad_norm": 0.1950170802512337,
+ "learning_rate": 1.9037234532947735e-06,
+ "loss": 0.2248,
+ "step": 488
+ },
+ {
+ "epoch": 3.6766917293233083,
+ "grad_norm": 0.18947214882184568,
+ "learning_rate": 1.9031907269915574e-06,
+ "loss": 0.222,
+ "step": 489
+ },
+ {
+ "epoch": 3.6842105263157894,
+ "grad_norm": 0.19315326727374638,
+ "learning_rate": 1.9026566058463274e-06,
+ "loss": 0.2382,
+ "step": 490
+ },
+ {
+ "epoch": 3.6917293233082704,
+ "grad_norm": 0.2028569707677176,
+ "learning_rate": 1.9021210906839527e-06,
+ "loss": 0.2374,
+ "step": 491
+ },
+ {
+ "epoch": 3.699248120300752,
+ "grad_norm": 0.20937874719623992,
+ "learning_rate": 1.9015841823314558e-06,
+ "loss": 0.2246,
+ "step": 492
+ },
+ {
+ "epoch": 3.706766917293233,
+ "grad_norm": 0.19198746962279178,
+ "learning_rate": 1.901045881618011e-06,
+ "loss": 0.2168,
+ "step": 493
+ },
+ {
+ "epoch": 3.7142857142857144,
+ "grad_norm": 0.19642138712651477,
+ "learning_rate": 1.9005061893749427e-06,
+ "loss": 0.2294,
+ "step": 494
+ },
+ {
+ "epoch": 3.7218045112781954,
+ "grad_norm": 0.19622742865459594,
+ "learning_rate": 1.899965106435724e-06,
+ "loss": 0.2332,
+ "step": 495
+ },
+ {
+ "epoch": 3.7293233082706765,
+ "grad_norm": 0.20394224567009117,
+ "learning_rate": 1.899422633635976e-06,
+ "loss": 0.2354,
+ "step": 496
+ },
+ {
+ "epoch": 3.736842105263158,
+ "grad_norm": 0.19515305880212153,
+ "learning_rate": 1.8988787718134664e-06,
+ "loss": 0.2394,
+ "step": 497
+ },
+ {
+ "epoch": 3.744360902255639,
+ "grad_norm": 0.19477698022076606,
+ "learning_rate": 1.8983335218081078e-06,
+ "loss": 0.2262,
+ "step": 498
+ },
+ {
+ "epoch": 3.7518796992481205,
+ "grad_norm": 0.2021534910868333,
+ "learning_rate": 1.8977868844619569e-06,
+ "loss": 0.2179,
+ "step": 499
+ },
+ {
+ "epoch": 3.7593984962406015,
+ "grad_norm": 0.20206295997770263,
+ "learning_rate": 1.8972388606192122e-06,
+ "loss": 0.2337,
+ "step": 500
+ },
+ {
+ "epoch": 3.7669172932330826,
+ "grad_norm": 0.19882933829911015,
+ "learning_rate": 1.8966894511262144e-06,
+ "loss": 0.2201,
+ "step": 501
+ },
+ {
+ "epoch": 3.774436090225564,
+ "grad_norm": 0.20137019718832141,
+ "learning_rate": 1.8961386568314435e-06,
+ "loss": 0.2382,
+ "step": 502
+ },
+ {
+ "epoch": 3.781954887218045,
+ "grad_norm": 0.20461608638279044,
+ "learning_rate": 1.8955864785855185e-06,
+ "loss": 0.2266,
+ "step": 503
+ },
+ {
+ "epoch": 3.7894736842105265,
+ "grad_norm": 0.20221819904526592,
+ "learning_rate": 1.8950329172411951e-06,
+ "loss": 0.229,
+ "step": 504
+ },
+ {
+ "epoch": 3.7969924812030076,
+ "grad_norm": 0.20129913881762512,
+ "learning_rate": 1.8944779736533661e-06,
+ "loss": 0.221,
+ "step": 505
+ },
+ {
+ "epoch": 3.8045112781954886,
+ "grad_norm": 0.20403126542135588,
+ "learning_rate": 1.8939216486790574e-06,
+ "loss": 0.2227,
+ "step": 506
+ },
+ {
+ "epoch": 3.8120300751879697,
+ "grad_norm": 0.2126702973105884,
+ "learning_rate": 1.8933639431774298e-06,
+ "loss": 0.2362,
+ "step": 507
+ },
+ {
+ "epoch": 3.819548872180451,
+ "grad_norm": 0.20935219712018965,
+ "learning_rate": 1.8928048580097756e-06,
+ "loss": 0.2358,
+ "step": 508
+ },
+ {
+ "epoch": 3.827067669172932,
+ "grad_norm": 0.2007294772399819,
+ "learning_rate": 1.8922443940395168e-06,
+ "loss": 0.2243,
+ "step": 509
+ },
+ {
+ "epoch": 3.8345864661654137,
+ "grad_norm": 0.20928714669447468,
+ "learning_rate": 1.891682552132206e-06,
+ "loss": 0.2218,
+ "step": 510
+ },
+ {
+ "epoch": 3.8421052631578947,
+ "grad_norm": 0.21335226295910942,
+ "learning_rate": 1.8911193331555232e-06,
+ "loss": 0.2261,
+ "step": 511
+ },
+ {
+ "epoch": 3.8496240601503757,
+ "grad_norm": 0.19351164253959738,
+ "learning_rate": 1.8905547379792757e-06,
+ "loss": 0.2227,
+ "step": 512
+ },
+ {
+ "epoch": 3.857142857142857,
+ "grad_norm": 0.19974640824480266,
+ "learning_rate": 1.8899887674753957e-06,
+ "loss": 0.2306,
+ "step": 513
+ },
+ {
+ "epoch": 3.8646616541353382,
+ "grad_norm": 0.19580038180455786,
+ "learning_rate": 1.8894214225179387e-06,
+ "loss": 0.2239,
+ "step": 514
+ },
+ {
+ "epoch": 3.8721804511278197,
+ "grad_norm": 0.21386327989060014,
+ "learning_rate": 1.8888527039830841e-06,
+ "loss": 0.2482,
+ "step": 515
+ },
+ {
+ "epoch": 3.8796992481203008,
+ "grad_norm": 0.1991978986466627,
+ "learning_rate": 1.8882826127491318e-06,
+ "loss": 0.2412,
+ "step": 516
+ },
+ {
+ "epoch": 3.887218045112782,
+ "grad_norm": 0.20056960105503202,
+ "learning_rate": 1.887711149696502e-06,
+ "loss": 0.2375,
+ "step": 517
+ },
+ {
+ "epoch": 3.8947368421052633,
+ "grad_norm": 0.1975807900366465,
+ "learning_rate": 1.887138315707733e-06,
+ "loss": 0.2273,
+ "step": 518
+ },
+ {
+ "epoch": 3.9022556390977443,
+ "grad_norm": 0.21650648685578147,
+ "learning_rate": 1.8865641116674808e-06,
+ "loss": 0.2206,
+ "step": 519
+ },
+ {
+ "epoch": 3.909774436090226,
+ "grad_norm": 0.21079473796198325,
+ "learning_rate": 1.885988538462517e-06,
+ "loss": 0.2262,
+ "step": 520
+ },
+ {
+ "epoch": 3.917293233082707,
+ "grad_norm": 0.20692792745046262,
+ "learning_rate": 1.8854115969817276e-06,
+ "loss": 0.2216,
+ "step": 521
+ },
+ {
+ "epoch": 3.924812030075188,
+ "grad_norm": 0.2079297656826261,
+ "learning_rate": 1.8848332881161121e-06,
+ "loss": 0.2362,
+ "step": 522
+ },
+ {
+ "epoch": 3.932330827067669,
+ "grad_norm": 0.19831223073108925,
+ "learning_rate": 1.8842536127587812e-06,
+ "loss": 0.2308,
+ "step": 523
+ },
+ {
+ "epoch": 3.9398496240601504,
+ "grad_norm": 0.20863095832765988,
+ "learning_rate": 1.883672571804956e-06,
+ "loss": 0.2348,
+ "step": 524
+ },
+ {
+ "epoch": 3.9473684210526314,
+ "grad_norm": 0.20227595832624987,
+ "learning_rate": 1.8830901661519672e-06,
+ "loss": 0.232,
+ "step": 525
+ },
+ {
+ "epoch": 3.954887218045113,
+ "grad_norm": 0.19142117679825407,
+ "learning_rate": 1.8825063966992523e-06,
+ "loss": 0.2234,
+ "step": 526
+ },
+ {
+ "epoch": 3.962406015037594,
+ "grad_norm": 0.21465010056892803,
+ "learning_rate": 1.8819212643483548e-06,
+ "loss": 0.2337,
+ "step": 527
+ },
+ {
+ "epoch": 3.969924812030075,
+ "grad_norm": 0.20665882579883493,
+ "learning_rate": 1.8813347700029242e-06,
+ "loss": 0.2239,
+ "step": 528
+ },
+ {
+ "epoch": 3.9774436090225564,
+ "grad_norm": 0.19465222161526435,
+ "learning_rate": 1.8807469145687127e-06,
+ "loss": 0.2306,
+ "step": 529
+ },
+ {
+ "epoch": 3.9849624060150375,
+ "grad_norm": 0.22044630316263056,
+ "learning_rate": 1.8801576989535741e-06,
+ "loss": 0.2209,
+ "step": 530
+ },
+ {
+ "epoch": 3.992481203007519,
+ "grad_norm": 0.19344101189522747,
+ "learning_rate": 1.8795671240674631e-06,
+ "loss": 0.2356,
+ "step": 531
+ },
+ {
+ "epoch": 4.0,
+ "grad_norm": 0.19248216375305013,
+ "learning_rate": 1.8789751908224336e-06,
+ "loss": 0.2206,
+ "step": 532
+ },
+ {
+ "epoch": 4.0,
+ "eval_loss": 0.23594319820404053,
+ "eval_runtime": 36.505,
+ "eval_samples_per_second": 12.245,
+ "eval_steps_per_second": 0.192,
+ "step": 532
+ },
+ {
+ "epoch": 4.007518796992481,
+ "grad_norm": 0.22550307443507323,
+ "learning_rate": 1.8783819001326378e-06,
+ "loss": 0.2167,
+ "step": 533
+ },
+ {
+ "epoch": 4.015037593984962,
+ "grad_norm": 0.2049263903613823,
+ "learning_rate": 1.8777872529143233e-06,
+ "loss": 0.2221,
+ "step": 534
+ },
+ {
+ "epoch": 4.022556390977444,
+ "grad_norm": 0.2158097684680537,
+ "learning_rate": 1.8771912500858333e-06,
+ "loss": 0.2232,
+ "step": 535
+ },
+ {
+ "epoch": 4.030075187969925,
+ "grad_norm": 0.22115355650872343,
+ "learning_rate": 1.8765938925676044e-06,
+ "loss": 0.2079,
+ "step": 536
+ },
+ {
+ "epoch": 4.037593984962406,
+ "grad_norm": 0.22405542611443827,
+ "learning_rate": 1.8759951812821654e-06,
+ "loss": 0.2302,
+ "step": 537
+ },
+ {
+ "epoch": 4.045112781954887,
+ "grad_norm": 0.2135192613829278,
+ "learning_rate": 1.8753951171541357e-06,
+ "loss": 0.2162,
+ "step": 538
+ },
+ {
+ "epoch": 4.052631578947368,
+ "grad_norm": 0.20702012929185165,
+ "learning_rate": 1.8747937011102237e-06,
+ "loss": 0.2239,
+ "step": 539
+ },
+ {
+ "epoch": 4.06015037593985,
+ "grad_norm": 0.2142756420729311,
+ "learning_rate": 1.8741909340792259e-06,
+ "loss": 0.2137,
+ "step": 540
+ },
+ {
+ "epoch": 4.067669172932331,
+ "grad_norm": 0.23454923773738107,
+ "learning_rate": 1.8735868169920255e-06,
+ "loss": 0.2209,
+ "step": 541
+ },
+ {
+ "epoch": 4.075187969924812,
+ "grad_norm": 0.2092969685339865,
+ "learning_rate": 1.8729813507815901e-06,
+ "loss": 0.2201,
+ "step": 542
+ },
+ {
+ "epoch": 4.082706766917293,
+ "grad_norm": 0.18881233452795668,
+ "learning_rate": 1.8723745363829711e-06,
+ "loss": 0.2203,
+ "step": 543
+ },
+ {
+ "epoch": 4.090225563909774,
+ "grad_norm": 0.21626865440038207,
+ "learning_rate": 1.8717663747333016e-06,
+ "loss": 0.2131,
+ "step": 544
+ },
+ {
+ "epoch": 4.097744360902255,
+ "grad_norm": 0.221595054851236,
+ "learning_rate": 1.871156866771796e-06,
+ "loss": 0.2172,
+ "step": 545
+ },
+ {
+ "epoch": 4.105263157894737,
+ "grad_norm": 0.20006220815316436,
+ "learning_rate": 1.870546013439748e-06,
+ "loss": 0.2123,
+ "step": 546
+ },
+ {
+ "epoch": 4.112781954887218,
+ "grad_norm": 0.19937438855805062,
+ "learning_rate": 1.8699338156805275e-06,
+ "loss": 0.2217,
+ "step": 547
+ },
+ {
+ "epoch": 4.120300751879699,
+ "grad_norm": 0.20766114395087557,
+ "learning_rate": 1.8693202744395827e-06,
+ "loss": 0.2169,
+ "step": 548
+ },
+ {
+ "epoch": 4.12781954887218,
+ "grad_norm": 0.19399507547790584,
+ "learning_rate": 1.8687053906644347e-06,
+ "loss": 0.2076,
+ "step": 549
+ },
+ {
+ "epoch": 4.135338345864661,
+ "grad_norm": 0.23758662818743473,
+ "learning_rate": 1.8680891653046796e-06,
+ "loss": 0.2044,
+ "step": 550
+ },
+ {
+ "epoch": 4.142857142857143,
+ "grad_norm": 0.20626980472771786,
+ "learning_rate": 1.8674715993119842e-06,
+ "loss": 0.224,
+ "step": 551
+ },
+ {
+ "epoch": 4.150375939849624,
+ "grad_norm": 0.19490335202762268,
+ "learning_rate": 1.866852693640086e-06,
+ "loss": 0.2214,
+ "step": 552
+ },
+ {
+ "epoch": 4.157894736842105,
+ "grad_norm": 0.20359305414743661,
+ "learning_rate": 1.866232449244792e-06,
+ "loss": 0.2207,
+ "step": 553
+ },
+ {
+ "epoch": 4.165413533834586,
+ "grad_norm": 0.1984071981790606,
+ "learning_rate": 1.8656108670839764e-06,
+ "loss": 0.2179,
+ "step": 554
+ },
+ {
+ "epoch": 4.172932330827067,
+ "grad_norm": 0.19441461116307968,
+ "learning_rate": 1.8649879481175788e-06,
+ "loss": 0.2126,
+ "step": 555
+ },
+ {
+ "epoch": 4.180451127819548,
+ "grad_norm": 0.20428551675457948,
+ "learning_rate": 1.8643636933076036e-06,
+ "loss": 0.2146,
+ "step": 556
+ },
+ {
+ "epoch": 4.18796992481203,
+ "grad_norm": 0.20869400625878837,
+ "learning_rate": 1.8637381036181188e-06,
+ "loss": 0.2197,
+ "step": 557
+ },
+ {
+ "epoch": 4.195488721804511,
+ "grad_norm": 0.20096994626302586,
+ "learning_rate": 1.863111180015253e-06,
+ "loss": 0.2186,
+ "step": 558
+ },
+ {
+ "epoch": 4.203007518796992,
+ "grad_norm": 0.2071609158535847,
+ "learning_rate": 1.8624829234671956e-06,
+ "loss": 0.2095,
+ "step": 559
+ },
+ {
+ "epoch": 4.2105263157894735,
+ "grad_norm": 0.200404982206074,
+ "learning_rate": 1.8618533349441936e-06,
+ "loss": 0.2169,
+ "step": 560
+ },
+ {
+ "epoch": 4.2180451127819545,
+ "grad_norm": 0.2043994932186772,
+ "learning_rate": 1.8612224154185524e-06,
+ "loss": 0.2176,
+ "step": 561
+ },
+ {
+ "epoch": 4.225563909774436,
+ "grad_norm": 0.2018129365010245,
+ "learning_rate": 1.8605901658646316e-06,
+ "loss": 0.2199,
+ "step": 562
+ },
+ {
+ "epoch": 4.2330827067669174,
+ "grad_norm": 0.1865700663057004,
+ "learning_rate": 1.8599565872588454e-06,
+ "loss": 0.2193,
+ "step": 563
+ },
+ {
+ "epoch": 4.2406015037593985,
+ "grad_norm": 0.20411040584803042,
+ "learning_rate": 1.859321680579661e-06,
+ "loss": 0.2152,
+ "step": 564
+ },
+ {
+ "epoch": 4.2481203007518795,
+ "grad_norm": 0.1907811503539639,
+ "learning_rate": 1.8586854468075955e-06,
+ "loss": 0.2137,
+ "step": 565
+ },
+ {
+ "epoch": 4.2556390977443606,
+ "grad_norm": 0.19499880633554847,
+ "learning_rate": 1.8580478869252167e-06,
+ "loss": 0.216,
+ "step": 566
+ },
+ {
+ "epoch": 4.2631578947368425,
+ "grad_norm": 0.2002676691417769,
+ "learning_rate": 1.8574090019171393e-06,
+ "loss": 0.2142,
+ "step": 567
+ },
+ {
+ "epoch": 4.2706766917293235,
+ "grad_norm": 0.20281183249528997,
+ "learning_rate": 1.8567687927700252e-06,
+ "loss": 0.2241,
+ "step": 568
+ },
+ {
+ "epoch": 4.2781954887218046,
+ "grad_norm": 0.2011033023605719,
+ "learning_rate": 1.856127260472581e-06,
+ "loss": 0.2166,
+ "step": 569
+ },
+ {
+ "epoch": 4.285714285714286,
+ "grad_norm": 0.19664766697385466,
+ "learning_rate": 1.8554844060155569e-06,
+ "loss": 0.2088,
+ "step": 570
+ },
+ {
+ "epoch": 4.293233082706767,
+ "grad_norm": 0.1996609785990658,
+ "learning_rate": 1.854840230391744e-06,
+ "loss": 0.2209,
+ "step": 571
+ },
+ {
+ "epoch": 4.3007518796992485,
+ "grad_norm": 0.2041105368937696,
+ "learning_rate": 1.8541947345959753e-06,
+ "loss": 0.2239,
+ "step": 572
+ },
+ {
+ "epoch": 4.30827067669173,
+ "grad_norm": 0.20505518953881233,
+ "learning_rate": 1.8535479196251215e-06,
+ "loss": 0.221,
+ "step": 573
+ },
+ {
+ "epoch": 4.315789473684211,
+ "grad_norm": 0.1933651490548001,
+ "learning_rate": 1.852899786478091e-06,
+ "loss": 0.2169,
+ "step": 574
+ },
+ {
+ "epoch": 4.323308270676692,
+ "grad_norm": 0.20196708147751172,
+ "learning_rate": 1.8522503361558273e-06,
+ "loss": 0.2144,
+ "step": 575
+ },
+ {
+ "epoch": 4.330827067669173,
+ "grad_norm": 0.19376053926617398,
+ "learning_rate": 1.8515995696613093e-06,
+ "loss": 0.2124,
+ "step": 576
+ },
+ {
+ "epoch": 4.338345864661654,
+ "grad_norm": 0.20236050146160553,
+ "learning_rate": 1.8509474879995475e-06,
+ "loss": 0.2134,
+ "step": 577
+ },
+ {
+ "epoch": 4.345864661654136,
+ "grad_norm": 0.18873720508166633,
+ "learning_rate": 1.8502940921775837e-06,
+ "loss": 0.2109,
+ "step": 578
+ },
+ {
+ "epoch": 4.353383458646617,
+ "grad_norm": 0.19410922218005378,
+ "learning_rate": 1.8496393832044893e-06,
+ "loss": 0.2138,
+ "step": 579
+ },
+ {
+ "epoch": 4.360902255639098,
+ "grad_norm": 0.20675096851100894,
+ "learning_rate": 1.848983362091364e-06,
+ "loss": 0.2177,
+ "step": 580
+ },
+ {
+ "epoch": 4.368421052631579,
+ "grad_norm": 0.19250960886056315,
+ "learning_rate": 1.848326029851333e-06,
+ "loss": 0.2123,
+ "step": 581
+ },
+ {
+ "epoch": 4.37593984962406,
+ "grad_norm": 0.2143952117489643,
+ "learning_rate": 1.8476673874995477e-06,
+ "loss": 0.2168,
+ "step": 582
+ },
+ {
+ "epoch": 4.383458646616542,
+ "grad_norm": 0.2044473088719358,
+ "learning_rate": 1.8470074360531813e-06,
+ "loss": 0.2136,
+ "step": 583
+ },
+ {
+ "epoch": 4.390977443609023,
+ "grad_norm": 0.20350967762274905,
+ "learning_rate": 1.84634617653143e-06,
+ "loss": 0.2172,
+ "step": 584
+ },
+ {
+ "epoch": 4.398496240601504,
+ "grad_norm": 0.20374784419477693,
+ "learning_rate": 1.8456836099555085e-06,
+ "loss": 0.2073,
+ "step": 585
+ },
+ {
+ "epoch": 4.406015037593985,
+ "grad_norm": 0.20036826709453012,
+ "learning_rate": 1.8450197373486526e-06,
+ "loss": 0.2033,
+ "step": 586
+ },
+ {
+ "epoch": 4.413533834586466,
+ "grad_norm": 0.20846006421800953,
+ "learning_rate": 1.8443545597361122e-06,
+ "loss": 0.2212,
+ "step": 587
+ },
+ {
+ "epoch": 4.421052631578947,
+ "grad_norm": 0.1998992445731674,
+ "learning_rate": 1.8436880781451543e-06,
+ "loss": 0.2228,
+ "step": 588
+ },
+ {
+ "epoch": 4.428571428571429,
+ "grad_norm": 0.19595971101321555,
+ "learning_rate": 1.8430202936050594e-06,
+ "loss": 0.2162,
+ "step": 589
+ },
+ {
+ "epoch": 4.43609022556391,
+ "grad_norm": 0.19798814257191283,
+ "learning_rate": 1.8423512071471204e-06,
+ "loss": 0.2249,
+ "step": 590
+ },
+ {
+ "epoch": 4.443609022556391,
+ "grad_norm": 0.20374163841125117,
+ "learning_rate": 1.84168081980464e-06,
+ "loss": 0.2161,
+ "step": 591
+ },
+ {
+ "epoch": 4.451127819548872,
+ "grad_norm": 0.20162487055988504,
+ "learning_rate": 1.841009132612931e-06,
+ "loss": 0.2152,
+ "step": 592
+ },
+ {
+ "epoch": 4.458646616541353,
+ "grad_norm": 0.2038472962332242,
+ "learning_rate": 1.8403361466093123e-06,
+ "loss": 0.2208,
+ "step": 593
+ },
+ {
+ "epoch": 4.466165413533835,
+ "grad_norm": 0.21539305271037443,
+ "learning_rate": 1.8396618628331101e-06,
+ "loss": 0.2066,
+ "step": 594
+ },
+ {
+ "epoch": 4.473684210526316,
+ "grad_norm": 0.19277533977348368,
+ "learning_rate": 1.8389862823256542e-06,
+ "loss": 0.206,
+ "step": 595
+ },
+ {
+ "epoch": 4.481203007518797,
+ "grad_norm": 0.21005304992272,
+ "learning_rate": 1.8383094061302765e-06,
+ "loss": 0.2115,
+ "step": 596
+ },
+ {
+ "epoch": 4.488721804511278,
+ "grad_norm": 0.2154549723399797,
+ "learning_rate": 1.8376312352923105e-06,
+ "loss": 0.2148,
+ "step": 597
+ },
+ {
+ "epoch": 4.496240601503759,
+ "grad_norm": 0.20886850837459928,
+ "learning_rate": 1.8369517708590885e-06,
+ "loss": 0.2261,
+ "step": 598
+ },
+ {
+ "epoch": 4.503759398496241,
+ "grad_norm": 0.20660386257406652,
+ "learning_rate": 1.8362710138799415e-06,
+ "loss": 0.2169,
+ "step": 599
+ },
+ {
+ "epoch": 4.511278195488722,
+ "grad_norm": 0.19395556168920355,
+ "learning_rate": 1.8355889654061959e-06,
+ "loss": 0.2196,
+ "step": 600
+ },
+ {
+ "epoch": 4.518796992481203,
+ "grad_norm": 0.20220408437766857,
+ "learning_rate": 1.8349056264911729e-06,
+ "loss": 0.2095,
+ "step": 601
+ },
+ {
+ "epoch": 4.526315789473684,
+ "grad_norm": 0.20748249171350464,
+ "learning_rate": 1.834220998190186e-06,
+ "loss": 0.209,
+ "step": 602
+ },
+ {
+ "epoch": 4.533834586466165,
+ "grad_norm": 0.21161257346980433,
+ "learning_rate": 1.8335350815605414e-06,
+ "loss": 0.2246,
+ "step": 603
+ },
+ {
+ "epoch": 4.541353383458647,
+ "grad_norm": 0.19999315226215372,
+ "learning_rate": 1.8328478776615333e-06,
+ "loss": 0.217,
+ "step": 604
+ },
+ {
+ "epoch": 4.548872180451128,
+ "grad_norm": 0.1957645507444795,
+ "learning_rate": 1.8321593875544449e-06,
+ "loss": 0.2175,
+ "step": 605
+ },
+ {
+ "epoch": 4.556390977443609,
+ "grad_norm": 0.20853959835027236,
+ "learning_rate": 1.8314696123025452e-06,
+ "loss": 0.2209,
+ "step": 606
+ },
+ {
+ "epoch": 4.56390977443609,
+ "grad_norm": 0.210188301215194,
+ "learning_rate": 1.8307785529710884e-06,
+ "loss": 0.2283,
+ "step": 607
+ },
+ {
+ "epoch": 4.571428571428571,
+ "grad_norm": 0.1958153359847759,
+ "learning_rate": 1.8300862106273111e-06,
+ "loss": 0.2149,
+ "step": 608
+ },
+ {
+ "epoch": 4.578947368421053,
+ "grad_norm": 0.19203095716382707,
+ "learning_rate": 1.8293925863404325e-06,
+ "loss": 0.2126,
+ "step": 609
+ },
+ {
+ "epoch": 4.586466165413534,
+ "grad_norm": 0.20522275645018723,
+ "learning_rate": 1.8286976811816504e-06,
+ "loss": 0.2219,
+ "step": 610
+ },
+ {
+ "epoch": 4.593984962406015,
+ "grad_norm": 0.21212215163996218,
+ "learning_rate": 1.8280014962241408e-06,
+ "loss": 0.2165,
+ "step": 611
+ },
+ {
+ "epoch": 4.601503759398496,
+ "grad_norm": 0.21106566921911607,
+ "learning_rate": 1.8273040325430573e-06,
+ "loss": 0.2227,
+ "step": 612
+ },
+ {
+ "epoch": 4.609022556390977,
+ "grad_norm": 0.20246554142750614,
+ "learning_rate": 1.8266052912155265e-06,
+ "loss": 0.2175,
+ "step": 613
+ },
+ {
+ "epoch": 4.616541353383458,
+ "grad_norm": 0.2001354176738558,
+ "learning_rate": 1.8259052733206502e-06,
+ "loss": 0.2091,
+ "step": 614
+ },
+ {
+ "epoch": 4.62406015037594,
+ "grad_norm": 0.19919444627866473,
+ "learning_rate": 1.8252039799394993e-06,
+ "loss": 0.2081,
+ "step": 615
+ },
+ {
+ "epoch": 4.631578947368421,
+ "grad_norm": 0.21431716662706468,
+ "learning_rate": 1.8245014121551172e-06,
+ "loss": 0.2258,
+ "step": 616
+ },
+ {
+ "epoch": 4.639097744360902,
+ "grad_norm": 0.20786041595015872,
+ "learning_rate": 1.8237975710525129e-06,
+ "loss": 0.2082,
+ "step": 617
+ },
+ {
+ "epoch": 4.646616541353383,
+ "grad_norm": 0.20536741430381997,
+ "learning_rate": 1.8230924577186632e-06,
+ "loss": 0.2243,
+ "step": 618
+ },
+ {
+ "epoch": 4.654135338345864,
+ "grad_norm": 0.19725744234479,
+ "learning_rate": 1.82238607324251e-06,
+ "loss": 0.2191,
+ "step": 619
+ },
+ {
+ "epoch": 4.661654135338345,
+ "grad_norm": 0.2108813814260746,
+ "learning_rate": 1.8216784187149567e-06,
+ "loss": 0.2102,
+ "step": 620
+ },
+ {
+ "epoch": 4.669172932330827,
+ "grad_norm": 0.20684932250177454,
+ "learning_rate": 1.8209694952288702e-06,
+ "loss": 0.2149,
+ "step": 621
+ },
+ {
+ "epoch": 4.676691729323308,
+ "grad_norm": 0.2137812530955982,
+ "learning_rate": 1.8202593038790752e-06,
+ "loss": 0.2127,
+ "step": 622
+ },
+ {
+ "epoch": 4.684210526315789,
+ "grad_norm": 0.2139176772128159,
+ "learning_rate": 1.8195478457623556e-06,
+ "loss": 0.2192,
+ "step": 623
+ },
+ {
+ "epoch": 4.69172932330827,
+ "grad_norm": 0.20074742180508623,
+ "learning_rate": 1.8188351219774515e-06,
+ "loss": 0.2217,
+ "step": 624
+ },
+ {
+ "epoch": 4.6992481203007515,
+ "grad_norm": 0.20967738530019392,
+ "learning_rate": 1.8181211336250569e-06,
+ "loss": 0.219,
+ "step": 625
+ },
+ {
+ "epoch": 4.706766917293233,
+ "grad_norm": 0.2010845679361143,
+ "learning_rate": 1.8174058818078198e-06,
+ "loss": 0.2102,
+ "step": 626
+ },
+ {
+ "epoch": 4.714285714285714,
+ "grad_norm": 0.20391406961000663,
+ "learning_rate": 1.8166893676303384e-06,
+ "loss": 0.2184,
+ "step": 627
+ },
+ {
+ "epoch": 4.7218045112781954,
+ "grad_norm": 0.21141862437961376,
+ "learning_rate": 1.8159715921991609e-06,
+ "loss": 0.2237,
+ "step": 628
+ },
+ {
+ "epoch": 4.7293233082706765,
+ "grad_norm": 0.20454352203644965,
+ "learning_rate": 1.8152525566227838e-06,
+ "loss": 0.2249,
+ "step": 629
+ },
+ {
+ "epoch": 4.7368421052631575,
+ "grad_norm": 0.20812675187033358,
+ "learning_rate": 1.8145322620116487e-06,
+ "loss": 0.2154,
+ "step": 630
+ },
+ {
+ "epoch": 4.7443609022556394,
+ "grad_norm": 0.20000113633748218,
+ "learning_rate": 1.8138107094781426e-06,
+ "loss": 0.2151,
+ "step": 631
+ },
+ {
+ "epoch": 4.7518796992481205,
+ "grad_norm": 0.21170860050281975,
+ "learning_rate": 1.8130879001365942e-06,
+ "loss": 0.2164,
+ "step": 632
+ },
+ {
+ "epoch": 4.7593984962406015,
+ "grad_norm": 0.22100248916456822,
+ "learning_rate": 1.8123638351032739e-06,
+ "loss": 0.2267,
+ "step": 633
+ },
+ {
+ "epoch": 4.7669172932330826,
+ "grad_norm": 0.20853155602501797,
+ "learning_rate": 1.8116385154963912e-06,
+ "loss": 0.2143,
+ "step": 634
+ },
+ {
+ "epoch": 4.774436090225564,
+ "grad_norm": 0.20285118442888278,
+ "learning_rate": 1.8109119424360928e-06,
+ "loss": 0.2218,
+ "step": 635
+ },
+ {
+ "epoch": 4.7819548872180455,
+ "grad_norm": 0.21270539240083405,
+ "learning_rate": 1.8101841170444613e-06,
+ "loss": 0.2144,
+ "step": 636
+ },
+ {
+ "epoch": 4.7894736842105265,
+ "grad_norm": 0.211475396559907,
+ "learning_rate": 1.8094550404455132e-06,
+ "loss": 0.2127,
+ "step": 637
+ },
+ {
+ "epoch": 4.796992481203008,
+ "grad_norm": 0.22638519498687715,
+ "learning_rate": 1.8087247137651982e-06,
+ "loss": 0.2206,
+ "step": 638
+ },
+ {
+ "epoch": 4.804511278195489,
+ "grad_norm": 0.21223515175319155,
+ "learning_rate": 1.8079931381313951e-06,
+ "loss": 0.211,
+ "step": 639
+ },
+ {
+ "epoch": 4.81203007518797,
+ "grad_norm": 0.2161166952181586,
+ "learning_rate": 1.8072603146739124e-06,
+ "loss": 0.2069,
+ "step": 640
+ },
+ {
+ "epoch": 4.819548872180452,
+ "grad_norm": 0.20147320171965372,
+ "learning_rate": 1.8065262445244859e-06,
+ "loss": 0.2209,
+ "step": 641
+ },
+ {
+ "epoch": 4.827067669172933,
+ "grad_norm": 0.21130164547269342,
+ "learning_rate": 1.8057909288167757e-06,
+ "loss": 0.2215,
+ "step": 642
+ },
+ {
+ "epoch": 4.834586466165414,
+ "grad_norm": 0.21459980253721617,
+ "learning_rate": 1.8050543686863666e-06,
+ "loss": 0.219,
+ "step": 643
+ },
+ {
+ "epoch": 4.842105263157895,
+ "grad_norm": 0.20618180330746375,
+ "learning_rate": 1.8043165652707648e-06,
+ "loss": 0.2111,
+ "step": 644
+ },
+ {
+ "epoch": 4.849624060150376,
+ "grad_norm": 0.19286496997369423,
+ "learning_rate": 1.8035775197093963e-06,
+ "loss": 0.2176,
+ "step": 645
+ },
+ {
+ "epoch": 4.857142857142857,
+ "grad_norm": 0.22279415058853677,
+ "learning_rate": 1.8028372331436057e-06,
+ "loss": 0.216,
+ "step": 646
+ },
+ {
+ "epoch": 4.864661654135339,
+ "grad_norm": 0.21626450639715258,
+ "learning_rate": 1.8020957067166542e-06,
+ "loss": 0.2212,
+ "step": 647
+ },
+ {
+ "epoch": 4.87218045112782,
+ "grad_norm": 0.19783277457678566,
+ "learning_rate": 1.8013529415737175e-06,
+ "loss": 0.2086,
+ "step": 648
+ },
+ {
+ "epoch": 4.879699248120301,
+ "grad_norm": 0.19407090073874644,
+ "learning_rate": 1.8006089388618846e-06,
+ "loss": 0.218,
+ "step": 649
+ },
+ {
+ "epoch": 4.887218045112782,
+ "grad_norm": 0.21579172782029868,
+ "learning_rate": 1.7998636997301558e-06,
+ "loss": 0.2191,
+ "step": 650
+ },
+ {
+ "epoch": 4.894736842105263,
+ "grad_norm": 0.20122694712805084,
+ "learning_rate": 1.7991172253294397e-06,
+ "loss": 0.2142,
+ "step": 651
+ },
+ {
+ "epoch": 4.902255639097744,
+ "grad_norm": 0.19837436214062726,
+ "learning_rate": 1.798369516812555e-06,
+ "loss": 0.2223,
+ "step": 652
+ },
+ {
+ "epoch": 4.909774436090226,
+ "grad_norm": 0.20214247935496812,
+ "learning_rate": 1.797620575334224e-06,
+ "loss": 0.2181,
+ "step": 653
+ },
+ {
+ "epoch": 4.917293233082707,
+ "grad_norm": 0.20687552819103086,
+ "learning_rate": 1.7968704020510739e-06,
+ "loss": 0.2085,
+ "step": 654
+ },
+ {
+ "epoch": 4.924812030075188,
+ "grad_norm": 0.2098154347878191,
+ "learning_rate": 1.7961189981216345e-06,
+ "loss": 0.2099,
+ "step": 655
+ },
+ {
+ "epoch": 4.932330827067669,
+ "grad_norm": 0.19172194596940284,
+ "learning_rate": 1.7953663647063363e-06,
+ "loss": 0.2135,
+ "step": 656
+ },
+ {
+ "epoch": 4.93984962406015,
+ "grad_norm": 0.19466494349185445,
+ "learning_rate": 1.794612502967508e-06,
+ "loss": 0.2224,
+ "step": 657
+ },
+ {
+ "epoch": 4.947368421052632,
+ "grad_norm": 0.2034280986802737,
+ "learning_rate": 1.793857414069375e-06,
+ "loss": 0.2213,
+ "step": 658
+ },
+ {
+ "epoch": 4.954887218045113,
+ "grad_norm": 0.20619754248294708,
+ "learning_rate": 1.7931010991780591e-06,
+ "loss": 0.2201,
+ "step": 659
+ },
+ {
+ "epoch": 4.962406015037594,
+ "grad_norm": 0.19436635755269327,
+ "learning_rate": 1.7923435594615742e-06,
+ "loss": 0.2038,
+ "step": 660
+ },
+ {
+ "epoch": 4.969924812030075,
+ "grad_norm": 0.20094648174593077,
+ "learning_rate": 1.7915847960898266e-06,
+ "loss": 0.2089,
+ "step": 661
+ },
+ {
+ "epoch": 4.977443609022556,
+ "grad_norm": 0.1952930478540414,
+ "learning_rate": 1.790824810234612e-06,
+ "loss": 0.2,
+ "step": 662
+ },
+ {
+ "epoch": 4.984962406015038,
+ "grad_norm": 0.21573299869166512,
+ "learning_rate": 1.7900636030696136e-06,
+ "loss": 0.2127,
+ "step": 663
+ },
+ {
+ "epoch": 4.992481203007519,
+ "grad_norm": 0.2035409806590866,
+ "learning_rate": 1.789301175770402e-06,
+ "loss": 0.208,
+ "step": 664
+ },
+ {
+ "epoch": 5.0,
+ "grad_norm": 0.20827256544683237,
+ "learning_rate": 1.7885375295144304e-06,
+ "loss": 0.2151,
+ "step": 665
+ },
+ {
+ "epoch": 5.0,
+ "eval_loss": 0.2360389232635498,
+ "eval_runtime": 35.8813,
+ "eval_samples_per_second": 12.458,
+ "eval_steps_per_second": 0.195,
+ "step": 665
+ },
+ {
+ "epoch": 5.007518796992481,
+ "grad_norm": 0.2637600356286211,
+ "learning_rate": 1.7877726654810363e-06,
+ "loss": 0.1952,
+ "step": 666
+ },
+ {
+ "epoch": 5.015037593984962,
+ "grad_norm": 0.19977525862434287,
+ "learning_rate": 1.7870065848514364e-06,
+ "loss": 0.2053,
+ "step": 667
+ },
+ {
+ "epoch": 5.022556390977444,
+ "grad_norm": 0.23612898073456198,
+ "learning_rate": 1.7862392888087267e-06,
+ "loss": 0.2077,
+ "step": 668
+ },
+ {
+ "epoch": 5.030075187969925,
+ "grad_norm": 0.2479369895625932,
+ "learning_rate": 1.785470778537881e-06,
+ "loss": 0.2013,
+ "step": 669
+ },
+ {
+ "epoch": 5.037593984962406,
+ "grad_norm": 0.1991647870861913,
+ "learning_rate": 1.7847010552257467e-06,
+ "loss": 0.2099,
+ "step": 670
+ },
+ {
+ "epoch": 5.045112781954887,
+ "grad_norm": 0.23398840671116702,
+ "learning_rate": 1.7839301200610463e-06,
+ "loss": 0.2025,
+ "step": 671
+ },
+ {
+ "epoch": 5.052631578947368,
+ "grad_norm": 0.20214653354297227,
+ "learning_rate": 1.7831579742343727e-06,
+ "loss": 0.199,
+ "step": 672
+ },
+ {
+ "epoch": 5.06015037593985,
+ "grad_norm": 0.1929456211995631,
+ "learning_rate": 1.7823846189381891e-06,
+ "loss": 0.1938,
+ "step": 673
+ },
+ {
+ "epoch": 5.067669172932331,
+ "grad_norm": 0.2154815577273467,
+ "learning_rate": 1.7816100553668258e-06,
+ "loss": 0.1993,
+ "step": 674
+ },
+ {
+ "epoch": 5.075187969924812,
+ "grad_norm": 0.22013153197449056,
+ "learning_rate": 1.7808342847164796e-06,
+ "loss": 0.207,
+ "step": 675
+ },
+ {
+ "epoch": 5.082706766917293,
+ "grad_norm": 0.19854215123579758,
+ "learning_rate": 1.780057308185212e-06,
+ "loss": 0.1834,
+ "step": 676
+ },
+ {
+ "epoch": 5.090225563909774,
+ "grad_norm": 0.20301239270359195,
+ "learning_rate": 1.7792791269729456e-06,
+ "loss": 0.2028,
+ "step": 677
+ },
+ {
+ "epoch": 5.097744360902255,
+ "grad_norm": 0.2278778544862765,
+ "learning_rate": 1.7784997422814643e-06,
+ "loss": 0.2079,
+ "step": 678
+ },
+ {
+ "epoch": 5.105263157894737,
+ "grad_norm": 0.20381652875881828,
+ "learning_rate": 1.77771915531441e-06,
+ "loss": 0.2045,
+ "step": 679
+ },
+ {
+ "epoch": 5.112781954887218,
+ "grad_norm": 0.20885591572220313,
+ "learning_rate": 1.776937367277282e-06,
+ "loss": 0.1937,
+ "step": 680
+ },
+ {
+ "epoch": 5.120300751879699,
+ "grad_norm": 0.21399943662803889,
+ "learning_rate": 1.7761543793774343e-06,
+ "loss": 0.2137,
+ "step": 681
+ },
+ {
+ "epoch": 5.12781954887218,
+ "grad_norm": 0.2184029857906422,
+ "learning_rate": 1.7753701928240733e-06,
+ "loss": 0.1997,
+ "step": 682
+ },
+ {
+ "epoch": 5.135338345864661,
+ "grad_norm": 0.2052759603436561,
+ "learning_rate": 1.7745848088282575e-06,
+ "loss": 0.1972,
+ "step": 683
+ },
+ {
+ "epoch": 5.142857142857143,
+ "grad_norm": 0.20323435279352636,
+ "learning_rate": 1.7737982286028937e-06,
+ "loss": 0.194,
+ "step": 684
+ },
+ {
+ "epoch": 5.150375939849624,
+ "grad_norm": 0.20368818317832088,
+ "learning_rate": 1.773010453362737e-06,
+ "loss": 0.2038,
+ "step": 685
+ },
+ {
+ "epoch": 5.157894736842105,
+ "grad_norm": 0.20449702218834612,
+ "learning_rate": 1.7722214843243873e-06,
+ "loss": 0.1988,
+ "step": 686
+ },
+ {
+ "epoch": 5.165413533834586,
+ "grad_norm": 0.208673553034556,
+ "learning_rate": 1.771431322706288e-06,
+ "loss": 0.2008,
+ "step": 687
+ },
+ {
+ "epoch": 5.172932330827067,
+ "grad_norm": 0.21069615949159456,
+ "learning_rate": 1.7706399697287258e-06,
+ "loss": 0.2066,
+ "step": 688
+ },
+ {
+ "epoch": 5.180451127819548,
+ "grad_norm": 0.19885336295260733,
+ "learning_rate": 1.769847426613825e-06,
+ "loss": 0.195,
+ "step": 689
+ },
+ {
+ "epoch": 5.18796992481203,
+ "grad_norm": 0.20594670164557244,
+ "learning_rate": 1.76905369458555e-06,
+ "loss": 0.1973,
+ "step": 690
+ },
+ {
+ "epoch": 5.195488721804511,
+ "grad_norm": 0.20597440979591553,
+ "learning_rate": 1.7682587748696996e-06,
+ "loss": 0.2047,
+ "step": 691
+ },
+ {
+ "epoch": 5.203007518796992,
+ "grad_norm": 0.208218294582099,
+ "learning_rate": 1.7674626686939077e-06,
+ "loss": 0.2111,
+ "step": 692
+ },
+ {
+ "epoch": 5.2105263157894735,
+ "grad_norm": 0.19971870426692764,
+ "learning_rate": 1.766665377287641e-06,
+ "loss": 0.2011,
+ "step": 693
+ },
+ {
+ "epoch": 5.2180451127819545,
+ "grad_norm": 0.20249881002630643,
+ "learning_rate": 1.7658669018821952e-06,
+ "loss": 0.2039,
+ "step": 694
+ },
+ {
+ "epoch": 5.225563909774436,
+ "grad_norm": 0.20115166746017465,
+ "learning_rate": 1.7650672437106957e-06,
+ "loss": 0.1912,
+ "step": 695
+ },
+ {
+ "epoch": 5.2330827067669174,
+ "grad_norm": 0.20109653921469475,
+ "learning_rate": 1.7642664040080937e-06,
+ "loss": 0.1981,
+ "step": 696
+ },
+ {
+ "epoch": 5.2406015037593985,
+ "grad_norm": 0.1965192731655488,
+ "learning_rate": 1.763464384011166e-06,
+ "loss": 0.2022,
+ "step": 697
+ },
+ {
+ "epoch": 5.2481203007518795,
+ "grad_norm": 0.2086627932714566,
+ "learning_rate": 1.762661184958511e-06,
+ "loss": 0.1975,
+ "step": 698
+ },
+ {
+ "epoch": 5.2556390977443606,
+ "grad_norm": 0.21107284006762767,
+ "learning_rate": 1.7618568080905491e-06,
+ "loss": 0.2015,
+ "step": 699
+ },
+ {
+ "epoch": 5.2631578947368425,
+ "grad_norm": 0.2051987902910989,
+ "learning_rate": 1.7610512546495192e-06,
+ "loss": 0.2037,
+ "step": 700
+ },
+ {
+ "epoch": 5.2706766917293235,
+ "grad_norm": 0.20697461962853733,
+ "learning_rate": 1.7602445258794772e-06,
+ "loss": 0.2028,
+ "step": 701
+ },
+ {
+ "epoch": 5.2781954887218046,
+ "grad_norm": 0.19926343000273303,
+ "learning_rate": 1.759436623026294e-06,
+ "loss": 0.204,
+ "step": 702
+ },
+ {
+ "epoch": 5.285714285714286,
+ "grad_norm": 0.1992713263226058,
+ "learning_rate": 1.7586275473376539e-06,
+ "loss": 0.199,
+ "step": 703
+ },
+ {
+ "epoch": 5.293233082706767,
+ "grad_norm": 0.2006085064344309,
+ "learning_rate": 1.7578173000630525e-06,
+ "loss": 0.2024,
+ "step": 704
+ },
+ {
+ "epoch": 5.3007518796992485,
+ "grad_norm": 0.20526990548407387,
+ "learning_rate": 1.7570058824537948e-06,
+ "loss": 0.2122,
+ "step": 705
+ },
+ {
+ "epoch": 5.30827067669173,
+ "grad_norm": 0.21139900919386984,
+ "learning_rate": 1.7561932957629926e-06,
+ "loss": 0.2059,
+ "step": 706
+ },
+ {
+ "epoch": 5.315789473684211,
+ "grad_norm": 0.20209448477123654,
+ "learning_rate": 1.755379541245564e-06,
+ "loss": 0.203,
+ "step": 707
+ },
+ {
+ "epoch": 5.323308270676692,
+ "grad_norm": 0.21816398858798802,
+ "learning_rate": 1.75456462015823e-06,
+ "loss": 0.1961,
+ "step": 708
+ },
+ {
+ "epoch": 5.330827067669173,
+ "grad_norm": 0.20771540002549133,
+ "learning_rate": 1.7537485337595137e-06,
+ "loss": 0.2045,
+ "step": 709
+ },
+ {
+ "epoch": 5.338345864661654,
+ "grad_norm": 0.21247132642320332,
+ "learning_rate": 1.7529312833097376e-06,
+ "loss": 0.2014,
+ "step": 710
+ },
+ {
+ "epoch": 5.345864661654136,
+ "grad_norm": 0.212471012524146,
+ "learning_rate": 1.7521128700710216e-06,
+ "loss": 0.2022,
+ "step": 711
+ },
+ {
+ "epoch": 5.353383458646617,
+ "grad_norm": 0.1985833181268932,
+ "learning_rate": 1.7512932953072824e-06,
+ "loss": 0.2007,
+ "step": 712
+ },
+ {
+ "epoch": 5.360902255639098,
+ "grad_norm": 0.20579263273235682,
+ "learning_rate": 1.7504725602842287e-06,
+ "loss": 0.1989,
+ "step": 713
+ },
+ {
+ "epoch": 5.368421052631579,
+ "grad_norm": 0.21844306423379095,
+ "learning_rate": 1.7496506662693628e-06,
+ "loss": 0.2046,
+ "step": 714
+ },
+ {
+ "epoch": 5.37593984962406,
+ "grad_norm": 0.2006478551302087,
+ "learning_rate": 1.748827614531976e-06,
+ "loss": 0.1969,
+ "step": 715
+ },
+ {
+ "epoch": 5.383458646616542,
+ "grad_norm": 0.21771140932523525,
+ "learning_rate": 1.7480034063431478e-06,
+ "loss": 0.2022,
+ "step": 716
+ },
+ {
+ "epoch": 5.390977443609023,
+ "grad_norm": 0.212094158626375,
+ "learning_rate": 1.7471780429757434e-06,
+ "loss": 0.2188,
+ "step": 717
+ },
+ {
+ "epoch": 5.398496240601504,
+ "grad_norm": 0.19664517276582924,
+ "learning_rate": 1.7463515257044127e-06,
+ "loss": 0.2059,
+ "step": 718
+ },
+ {
+ "epoch": 5.406015037593985,
+ "grad_norm": 0.21024960772330042,
+ "learning_rate": 1.7455238558055862e-06,
+ "loss": 0.2001,
+ "step": 719
+ },
+ {
+ "epoch": 5.413533834586466,
+ "grad_norm": 0.20683190717270053,
+ "learning_rate": 1.744695034557476e-06,
+ "loss": 0.2072,
+ "step": 720
+ },
+ {
+ "epoch": 5.421052631578947,
+ "grad_norm": 0.21367723795947371,
+ "learning_rate": 1.7438650632400717e-06,
+ "loss": 0.2008,
+ "step": 721
+ },
+ {
+ "epoch": 5.428571428571429,
+ "grad_norm": 0.20840618634486974,
+ "learning_rate": 1.7430339431351387e-06,
+ "loss": 0.1997,
+ "step": 722
+ },
+ {
+ "epoch": 5.43609022556391,
+ "grad_norm": 0.20693860881274226,
+ "learning_rate": 1.7422016755262167e-06,
+ "loss": 0.193,
+ "step": 723
+ },
+ {
+ "epoch": 5.443609022556391,
+ "grad_norm": 0.2070046667869305,
+ "learning_rate": 1.7413682616986183e-06,
+ "loss": 0.2023,
+ "step": 724
+ },
+ {
+ "epoch": 5.451127819548872,
+ "grad_norm": 0.2043717631493107,
+ "learning_rate": 1.7405337029394247e-06,
+ "loss": 0.1981,
+ "step": 725
+ },
+ {
+ "epoch": 5.458646616541353,
+ "grad_norm": 0.20948398323630127,
+ "learning_rate": 1.7396980005374869e-06,
+ "loss": 0.2017,
+ "step": 726
+ },
+ {
+ "epoch": 5.466165413533835,
+ "grad_norm": 0.2057035338704813,
+ "learning_rate": 1.738861155783421e-06,
+ "loss": 0.2,
+ "step": 727
+ },
+ {
+ "epoch": 5.473684210526316,
+ "grad_norm": 0.21760471263301906,
+ "learning_rate": 1.7380231699696077e-06,
+ "loss": 0.2055,
+ "step": 728
+ },
+ {
+ "epoch": 5.481203007518797,
+ "grad_norm": 0.19621559182339568,
+ "learning_rate": 1.73718404439019e-06,
+ "loss": 0.2107,
+ "step": 729
+ },
+ {
+ "epoch": 5.488721804511278,
+ "grad_norm": 0.2055527559622529,
+ "learning_rate": 1.7363437803410707e-06,
+ "loss": 0.1951,
+ "step": 730
+ },
+ {
+ "epoch": 5.496240601503759,
+ "grad_norm": 0.20306230323106386,
+ "learning_rate": 1.7355023791199113e-06,
+ "loss": 0.2088,
+ "step": 731
+ },
+ {
+ "epoch": 5.503759398496241,
+ "grad_norm": 0.2159643314718235,
+ "learning_rate": 1.7346598420261294e-06,
+ "loss": 0.2027,
+ "step": 732
+ },
+ {
+ "epoch": 5.511278195488722,
+ "grad_norm": 0.20101613992102027,
+ "learning_rate": 1.7338161703608958e-06,
+ "loss": 0.2002,
+ "step": 733
+ },
+ {
+ "epoch": 5.518796992481203,
+ "grad_norm": 0.20993538103811976,
+ "learning_rate": 1.7329713654271352e-06,
+ "loss": 0.1874,
+ "step": 734
+ },
+ {
+ "epoch": 5.526315789473684,
+ "grad_norm": 0.204230798951143,
+ "learning_rate": 1.732125428529521e-06,
+ "loss": 0.211,
+ "step": 735
+ },
+ {
+ "epoch": 5.533834586466165,
+ "grad_norm": 0.20597836244424206,
+ "learning_rate": 1.7312783609744753e-06,
+ "loss": 0.2113,
+ "step": 736
+ },
+ {
+ "epoch": 5.541353383458647,
+ "grad_norm": 0.20327379234146886,
+ "learning_rate": 1.7304301640701669e-06,
+ "loss": 0.2042,
+ "step": 737
+ },
+ {
+ "epoch": 5.548872180451128,
+ "grad_norm": 0.21248790336585996,
+ "learning_rate": 1.729580839126507e-06,
+ "loss": 0.2091,
+ "step": 738
+ },
+ {
+ "epoch": 5.556390977443609,
+ "grad_norm": 0.20498784035462614,
+ "learning_rate": 1.7287303874551515e-06,
+ "loss": 0.1946,
+ "step": 739
+ },
+ {
+ "epoch": 5.56390977443609,
+ "grad_norm": 0.21077571558346597,
+ "learning_rate": 1.7278788103694942e-06,
+ "loss": 0.1909,
+ "step": 740
+ },
+ {
+ "epoch": 5.571428571428571,
+ "grad_norm": 0.20180974188676012,
+ "learning_rate": 1.7270261091846673e-06,
+ "loss": 0.2038,
+ "step": 741
+ },
+ {
+ "epoch": 5.578947368421053,
+ "grad_norm": 0.20069917349004376,
+ "learning_rate": 1.7261722852175393e-06,
+ "loss": 0.1904,
+ "step": 742
+ },
+ {
+ "epoch": 5.586466165413534,
+ "grad_norm": 0.21140451166785268,
+ "learning_rate": 1.7253173397867133e-06,
+ "loss": 0.2042,
+ "step": 743
+ },
+ {
+ "epoch": 5.593984962406015,
+ "grad_norm": 0.20394861499297884,
+ "learning_rate": 1.7244612742125236e-06,
+ "loss": 0.196,
+ "step": 744
+ },
+ {
+ "epoch": 5.601503759398496,
+ "grad_norm": 0.19463371122456738,
+ "learning_rate": 1.723604089817034e-06,
+ "loss": 0.2002,
+ "step": 745
+ },
+ {
+ "epoch": 5.609022556390977,
+ "grad_norm": 0.21271158762518355,
+ "learning_rate": 1.7227457879240371e-06,
+ "loss": 0.2113,
+ "step": 746
+ },
+ {
+ "epoch": 5.616541353383458,
+ "grad_norm": 0.20498698735489804,
+ "learning_rate": 1.7218863698590508e-06,
+ "loss": 0.2056,
+ "step": 747
+ },
+ {
+ "epoch": 5.62406015037594,
+ "grad_norm": 0.21077241097613317,
+ "learning_rate": 1.7210258369493169e-06,
+ "loss": 0.2179,
+ "step": 748
+ },
+ {
+ "epoch": 5.631578947368421,
+ "grad_norm": 0.20280065970650488,
+ "learning_rate": 1.7201641905237984e-06,
+ "loss": 0.2071,
+ "step": 749
+ },
+ {
+ "epoch": 5.639097744360902,
+ "grad_norm": 0.21725317239846573,
+ "learning_rate": 1.719301431913179e-06,
+ "loss": 0.1963,
+ "step": 750
+ },
+ {
+ "epoch": 5.646616541353383,
+ "grad_norm": 0.20411981596036063,
+ "learning_rate": 1.718437562449859e-06,
+ "loss": 0.2009,
+ "step": 751
+ },
+ {
+ "epoch": 5.654135338345864,
+ "grad_norm": 0.20256196836411214,
+ "learning_rate": 1.7175725834679548e-06,
+ "loss": 0.2046,
+ "step": 752
+ },
+ {
+ "epoch": 5.661654135338345,
+ "grad_norm": 0.21149712330767395,
+ "learning_rate": 1.7167064963032963e-06,
+ "loss": 0.2093,
+ "step": 753
+ },
+ {
+ "epoch": 5.669172932330827,
+ "grad_norm": 0.22187172553520523,
+ "learning_rate": 1.7158393022934243e-06,
+ "loss": 0.2031,
+ "step": 754
+ },
+ {
+ "epoch": 5.676691729323308,
+ "grad_norm": 0.19263557824389366,
+ "learning_rate": 1.7149710027775895e-06,
+ "loss": 0.2066,
+ "step": 755
+ },
+ {
+ "epoch": 5.684210526315789,
+ "grad_norm": 0.22141017346218214,
+ "learning_rate": 1.7141015990967498e-06,
+ "loss": 0.208,
+ "step": 756
+ },
+ {
+ "epoch": 5.69172932330827,
+ "grad_norm": 0.19513215428958489,
+ "learning_rate": 1.7132310925935677e-06,
+ "loss": 0.1919,
+ "step": 757
+ },
+ {
+ "epoch": 5.6992481203007515,
+ "grad_norm": 0.19487102536178455,
+ "learning_rate": 1.71235948461241e-06,
+ "loss": 0.1966,
+ "step": 758
+ },
+ {
+ "epoch": 5.706766917293233,
+ "grad_norm": 0.21308220007469647,
+ "learning_rate": 1.7114867764993436e-06,
+ "loss": 0.212,
+ "step": 759
+ },
+ {
+ "epoch": 5.714285714285714,
+ "grad_norm": 0.21298568878808133,
+ "learning_rate": 1.7106129696021349e-06,
+ "loss": 0.2107,
+ "step": 760
+ },
+ {
+ "epoch": 5.7218045112781954,
+ "grad_norm": 0.20233908180005397,
+ "learning_rate": 1.7097380652702467e-06,
+ "loss": 0.2008,
+ "step": 761
+ },
+ {
+ "epoch": 5.7293233082706765,
+ "grad_norm": 0.21397022126550835,
+ "learning_rate": 1.7088620648548374e-06,
+ "loss": 0.1974,
+ "step": 762
+ },
+ {
+ "epoch": 5.7368421052631575,
+ "grad_norm": 0.21154949769684928,
+ "learning_rate": 1.707984969708757e-06,
+ "loss": 0.2098,
+ "step": 763
+ },
+ {
+ "epoch": 5.7443609022556394,
+ "grad_norm": 0.20399875918431762,
+ "learning_rate": 1.7071067811865474e-06,
+ "loss": 0.2072,
+ "step": 764
+ },
+ {
+ "epoch": 5.7518796992481205,
+ "grad_norm": 0.23154538183053006,
+ "learning_rate": 1.7062275006444384e-06,
+ "loss": 0.2095,
+ "step": 765
+ },
+ {
+ "epoch": 5.7593984962406015,
+ "grad_norm": 0.20814238922743727,
+ "learning_rate": 1.7053471294403461e-06,
+ "loss": 0.1989,
+ "step": 766
+ },
+ {
+ "epoch": 5.7669172932330826,
+ "grad_norm": 0.19839085653700525,
+ "learning_rate": 1.7044656689338713e-06,
+ "loss": 0.2082,
+ "step": 767
+ },
+ {
+ "epoch": 5.774436090225564,
+ "grad_norm": 0.2233522339124349,
+ "learning_rate": 1.703583120486297e-06,
+ "loss": 0.1994,
+ "step": 768
+ },
+ {
+ "epoch": 5.7819548872180455,
+ "grad_norm": 0.22548327030080467,
+ "learning_rate": 1.7026994854605862e-06,
+ "loss": 0.213,
+ "step": 769
+ },
+ {
+ "epoch": 5.7894736842105265,
+ "grad_norm": 0.20645849704056862,
+ "learning_rate": 1.7018147652213804e-06,
+ "loss": 0.2126,
+ "step": 770
+ },
+ {
+ "epoch": 5.796992481203008,
+ "grad_norm": 0.23613908515635304,
+ "learning_rate": 1.7009289611349963e-06,
+ "loss": 0.2063,
+ "step": 771
+ },
+ {
+ "epoch": 5.804511278195489,
+ "grad_norm": 0.21805353385084902,
+ "learning_rate": 1.7000420745694253e-06,
+ "loss": 0.2061,
+ "step": 772
+ },
+ {
+ "epoch": 5.81203007518797,
+ "grad_norm": 0.20776540134754903,
+ "learning_rate": 1.6991541068943297e-06,
+ "loss": 0.2057,
+ "step": 773
+ },
+ {
+ "epoch": 5.819548872180452,
+ "grad_norm": 0.2135784542275375,
+ "learning_rate": 1.6982650594810422e-06,
+ "loss": 0.2005,
+ "step": 774
+ },
+ {
+ "epoch": 5.827067669172933,
+ "grad_norm": 0.19752635833862398,
+ "learning_rate": 1.6973749337025622e-06,
+ "loss": 0.2019,
+ "step": 775
+ },
+ {
+ "epoch": 5.834586466165414,
+ "grad_norm": 0.20288617371925508,
+ "learning_rate": 1.696483730933555e-06,
+ "loss": 0.199,
+ "step": 776
+ },
+ {
+ "epoch": 5.842105263157895,
+ "grad_norm": 0.21484465050554244,
+ "learning_rate": 1.695591452550349e-06,
+ "loss": 0.1975,
+ "step": 777
+ },
+ {
+ "epoch": 5.849624060150376,
+ "grad_norm": 0.20696575894495703,
+ "learning_rate": 1.6946980999309341e-06,
+ "loss": 0.2005,
+ "step": 778
+ },
+ {
+ "epoch": 5.857142857142857,
+ "grad_norm": 0.20043376394417892,
+ "learning_rate": 1.6938036744549585e-06,
+ "loss": 0.1956,
+ "step": 779
+ },
+ {
+ "epoch": 5.864661654135339,
+ "grad_norm": 0.2071393564072276,
+ "learning_rate": 1.6929081775037276e-06,
+ "loss": 0.1984,
+ "step": 780
+ },
+ {
+ "epoch": 5.87218045112782,
+ "grad_norm": 0.20978834824828743,
+ "learning_rate": 1.6920116104602013e-06,
+ "loss": 0.2022,
+ "step": 781
+ },
+ {
+ "epoch": 5.879699248120301,
+ "grad_norm": 0.2098625958117764,
+ "learning_rate": 1.6911139747089931e-06,
+ "loss": 0.1928,
+ "step": 782
+ },
+ {
+ "epoch": 5.887218045112782,
+ "grad_norm": 0.20530184405190322,
+ "learning_rate": 1.6902152716363654e-06,
+ "loss": 0.2093,
+ "step": 783
+ },
+ {
+ "epoch": 5.894736842105263,
+ "grad_norm": 0.20975197820281183,
+ "learning_rate": 1.68931550263023e-06,
+ "loss": 0.1985,
+ "step": 784
+ },
+ {
+ "epoch": 5.902255639097744,
+ "grad_norm": 0.19468093306583503,
+ "learning_rate": 1.688414669080145e-06,
+ "loss": 0.2093,
+ "step": 785
+ },
+ {
+ "epoch": 5.909774436090226,
+ "grad_norm": 0.20973060657198417,
+ "learning_rate": 1.6875127723773114e-06,
+ "loss": 0.2085,
+ "step": 786
+ },
+ {
+ "epoch": 5.917293233082707,
+ "grad_norm": 0.21323351484995592,
+ "learning_rate": 1.6866098139145728e-06,
+ "loss": 0.1949,
+ "step": 787
+ },
+ {
+ "epoch": 5.924812030075188,
+ "grad_norm": 0.23473277486072783,
+ "learning_rate": 1.685705795086413e-06,
+ "loss": 0.2133,
+ "step": 788
+ },
+ {
+ "epoch": 5.932330827067669,
+ "grad_norm": 0.2135402066250093,
+ "learning_rate": 1.684800717288953e-06,
+ "loss": 0.2051,
+ "step": 789
+ },
+ {
+ "epoch": 5.93984962406015,
+ "grad_norm": 0.21514953898999292,
+ "learning_rate": 1.6838945819199485e-06,
+ "loss": 0.2122,
+ "step": 790
+ },
+ {
+ "epoch": 5.947368421052632,
+ "grad_norm": 0.19998383136452946,
+ "learning_rate": 1.6829873903787898e-06,
+ "loss": 0.1962,
+ "step": 791
+ },
+ {
+ "epoch": 5.954887218045113,
+ "grad_norm": 0.2091941666014995,
+ "learning_rate": 1.6820791440664969e-06,
+ "loss": 0.2128,
+ "step": 792
+ },
+ {
+ "epoch": 5.962406015037594,
+ "grad_norm": 0.205060805286806,
+ "learning_rate": 1.6811698443857197e-06,
+ "loss": 0.2074,
+ "step": 793
+ },
+ {
+ "epoch": 5.969924812030075,
+ "grad_norm": 0.20531952186366464,
+ "learning_rate": 1.6802594927407344e-06,
+ "loss": 0.1972,
+ "step": 794
+ },
+ {
+ "epoch": 5.977443609022556,
+ "grad_norm": 0.21011744855268757,
+ "learning_rate": 1.679348090537442e-06,
+ "loss": 0.2021,
+ "step": 795
+ },
+ {
+ "epoch": 5.984962406015038,
+ "grad_norm": 0.20280038976510592,
+ "learning_rate": 1.6784356391833662e-06,
+ "loss": 0.2031,
+ "step": 796
+ },
+ {
+ "epoch": 5.992481203007519,
+ "grad_norm": 0.2050493172985504,
+ "learning_rate": 1.6775221400876504e-06,
+ "loss": 0.215,
+ "step": 797
+ },
+ {
+ "epoch": 6.0,
+ "grad_norm": 0.2052237832342712,
+ "learning_rate": 1.6766075946610565e-06,
+ "loss": 0.2075,
+ "step": 798
+ },
+ {
+ "epoch": 6.0,
+ "eval_loss": 0.23800283670425415,
+ "eval_runtime": 35.9754,
+ "eval_samples_per_second": 12.425,
+ "eval_steps_per_second": 0.195,
+ "step": 798
+ },
+ {
+ "epoch": 6.007518796992481,
+ "grad_norm": 0.27768611377891333,
+ "learning_rate": 1.675692004315962e-06,
+ "loss": 0.1869,
+ "step": 799
+ },
+ {
+ "epoch": 6.015037593984962,
+ "grad_norm": 0.2220658862314162,
+ "learning_rate": 1.6747753704663584e-06,
+ "loss": 0.1847,
+ "step": 800
+ },
+ {
+ "epoch": 6.022556390977444,
+ "grad_norm": 0.25148729004417725,
+ "learning_rate": 1.6738576945278485e-06,
+ "loss": 0.2006,
+ "step": 801
+ },
+ {
+ "epoch": 6.030075187969925,
+ "grad_norm": 0.26146813300734545,
+ "learning_rate": 1.6729389779176443e-06,
+ "loss": 0.1837,
+ "step": 802
+ },
+ {
+ "epoch": 6.037593984962406,
+ "grad_norm": 0.21270917239882164,
+ "learning_rate": 1.6720192220545658e-06,
+ "loss": 0.1929,
+ "step": 803
+ },
+ {
+ "epoch": 6.045112781954887,
+ "grad_norm": 0.21529860241944337,
+ "learning_rate": 1.6710984283590367e-06,
+ "loss": 0.1872,
+ "step": 804
+ },
+ {
+ "epoch": 6.052631578947368,
+ "grad_norm": 0.26403458216824194,
+ "learning_rate": 1.6701765982530845e-06,
+ "loss": 0.1889,
+ "step": 805
+ },
+ {
+ "epoch": 6.06015037593985,
+ "grad_norm": 0.23110771862220658,
+ "learning_rate": 1.6692537331603372e-06,
+ "loss": 0.1856,
+ "step": 806
+ },
+ {
+ "epoch": 6.067669172932331,
+ "grad_norm": 0.22251366290173905,
+ "learning_rate": 1.6683298345060202e-06,
+ "loss": 0.1912,
+ "step": 807
+ },
+ {
+ "epoch": 6.075187969924812,
+ "grad_norm": 0.25977506385460963,
+ "learning_rate": 1.6674049037169562e-06,
+ "loss": 0.1993,
+ "step": 808
+ },
+ {
+ "epoch": 6.082706766917293,
+ "grad_norm": 0.2175462902001085,
+ "learning_rate": 1.6664789422215615e-06,
+ "loss": 0.196,
+ "step": 809
+ },
+ {
+ "epoch": 6.090225563909774,
+ "grad_norm": 0.21497045637754097,
+ "learning_rate": 1.665551951449844e-06,
+ "loss": 0.1875,
+ "step": 810
+ },
+ {
+ "epoch": 6.097744360902255,
+ "grad_norm": 0.22014965687384452,
+ "learning_rate": 1.6646239328334018e-06,
+ "loss": 0.191,
+ "step": 811
+ },
+ {
+ "epoch": 6.105263157894737,
+ "grad_norm": 0.22077061048868674,
+ "learning_rate": 1.6636948878054189e-06,
+ "loss": 0.1858,
+ "step": 812
+ },
+ {
+ "epoch": 6.112781954887218,
+ "grad_norm": 0.21646992499401557,
+ "learning_rate": 1.662764817800666e-06,
+ "loss": 0.1881,
+ "step": 813
+ },
+ {
+ "epoch": 6.120300751879699,
+ "grad_norm": 0.21211102298554763,
+ "learning_rate": 1.6618337242554961e-06,
+ "loss": 0.1914,
+ "step": 814
+ },
+ {
+ "epoch": 6.12781954887218,
+ "grad_norm": 0.2172003045566366,
+ "learning_rate": 1.660901608607843e-06,
+ "loss": 0.1896,
+ "step": 815
+ },
+ {
+ "epoch": 6.135338345864661,
+ "grad_norm": 0.21551278343788702,
+ "learning_rate": 1.6599684722972187e-06,
+ "loss": 0.1883,
+ "step": 816
+ },
+ {
+ "epoch": 6.142857142857143,
+ "grad_norm": 0.21884035260334198,
+ "learning_rate": 1.6590343167647114e-06,
+ "loss": 0.19,
+ "step": 817
+ },
+ {
+ "epoch": 6.150375939849624,
+ "grad_norm": 0.21468385168387916,
+ "learning_rate": 1.6580991434529841e-06,
+ "loss": 0.193,
+ "step": 818
+ },
+ {
+ "epoch": 6.157894736842105,
+ "grad_norm": 0.20446407386826587,
+ "learning_rate": 1.6571629538062707e-06,
+ "loss": 0.1846,
+ "step": 819
+ },
+ {
+ "epoch": 6.165413533834586,
+ "grad_norm": 0.2226532722406865,
+ "learning_rate": 1.6562257492703755e-06,
+ "loss": 0.1965,
+ "step": 820
+ },
+ {
+ "epoch": 6.172932330827067,
+ "grad_norm": 0.2096537990223172,
+ "learning_rate": 1.6552875312926692e-06,
+ "loss": 0.1918,
+ "step": 821
+ },
+ {
+ "epoch": 6.180451127819548,
+ "grad_norm": 0.20845290864950267,
+ "learning_rate": 1.6543483013220887e-06,
+ "loss": 0.1897,
+ "step": 822
+ },
+ {
+ "epoch": 6.18796992481203,
+ "grad_norm": 0.2053072581642141,
+ "learning_rate": 1.653408060809133e-06,
+ "loss": 0.1837,
+ "step": 823
+ },
+ {
+ "epoch": 6.195488721804511,
+ "grad_norm": 0.19966207573813569,
+ "learning_rate": 1.6524668112058615e-06,
+ "loss": 0.1799,
+ "step": 824
+ },
+ {
+ "epoch": 6.203007518796992,
+ "grad_norm": 0.20330848206705188,
+ "learning_rate": 1.6515245539658929e-06,
+ "loss": 0.1921,
+ "step": 825
+ },
+ {
+ "epoch": 6.2105263157894735,
+ "grad_norm": 0.2043453972454978,
+ "learning_rate": 1.6505812905444012e-06,
+ "loss": 0.1937,
+ "step": 826
+ },
+ {
+ "epoch": 6.2180451127819545,
+ "grad_norm": 0.3651325955961774,
+ "learning_rate": 1.649637022398115e-06,
+ "loss": 0.1871,
+ "step": 827
+ },
+ {
+ "epoch": 6.225563909774436,
+ "grad_norm": 0.19868316737287683,
+ "learning_rate": 1.6486917509853137e-06,
+ "loss": 0.191,
+ "step": 828
+ },
+ {
+ "epoch": 6.2330827067669174,
+ "grad_norm": 0.20597272181032636,
+ "learning_rate": 1.6477454777658273e-06,
+ "loss": 0.1841,
+ "step": 829
+ },
+ {
+ "epoch": 6.2406015037593985,
+ "grad_norm": 0.2265599966484634,
+ "learning_rate": 1.646798204201032e-06,
+ "loss": 0.1898,
+ "step": 830
+ },
+ {
+ "epoch": 6.2481203007518795,
+ "grad_norm": 0.21280753704005995,
+ "learning_rate": 1.6458499317538487e-06,
+ "loss": 0.1919,
+ "step": 831
+ },
+ {
+ "epoch": 6.2556390977443606,
+ "grad_norm": 0.21050590454344095,
+ "learning_rate": 1.6449006618887418e-06,
+ "loss": 0.1885,
+ "step": 832
+ },
+ {
+ "epoch": 6.2631578947368425,
+ "grad_norm": 0.2097731442684287,
+ "learning_rate": 1.6439503960717154e-06,
+ "loss": 0.1884,
+ "step": 833
+ },
+ {
+ "epoch": 6.2706766917293235,
+ "grad_norm": 0.2107420724311614,
+ "learning_rate": 1.642999135770312e-06,
+ "loss": 0.1929,
+ "step": 834
+ },
+ {
+ "epoch": 6.2781954887218046,
+ "grad_norm": 0.21136345911278426,
+ "learning_rate": 1.6420468824536094e-06,
+ "loss": 0.1937,
+ "step": 835
+ },
+ {
+ "epoch": 6.285714285714286,
+ "grad_norm": 0.19991905344725375,
+ "learning_rate": 1.64109363759222e-06,
+ "loss": 0.1912,
+ "step": 836
+ },
+ {
+ "epoch": 6.293233082706767,
+ "grad_norm": 0.20327427190560068,
+ "learning_rate": 1.6401394026582867e-06,
+ "loss": 0.19,
+ "step": 837
+ },
+ {
+ "epoch": 6.3007518796992485,
+ "grad_norm": 0.20551497643274225,
+ "learning_rate": 1.6391841791254816e-06,
+ "loss": 0.1865,
+ "step": 838
+ },
+ {
+ "epoch": 6.30827067669173,
+ "grad_norm": 0.2059766772801898,
+ "learning_rate": 1.6382279684690033e-06,
+ "loss": 0.1949,
+ "step": 839
+ },
+ {
+ "epoch": 6.315789473684211,
+ "grad_norm": 0.21676035552246462,
+ "learning_rate": 1.6372707721655755e-06,
+ "loss": 0.188,
+ "step": 840
+ },
+ {
+ "epoch": 6.323308270676692,
+ "grad_norm": 0.20740070600240681,
+ "learning_rate": 1.6363125916934434e-06,
+ "loss": 0.1948,
+ "step": 841
+ },
+ {
+ "epoch": 6.330827067669173,
+ "grad_norm": 0.200495146366542,
+ "learning_rate": 1.6353534285323722e-06,
+ "loss": 0.1886,
+ "step": 842
+ },
+ {
+ "epoch": 6.338345864661654,
+ "grad_norm": 0.21562619645937706,
+ "learning_rate": 1.6343932841636455e-06,
+ "loss": 0.1875,
+ "step": 843
+ },
+ {
+ "epoch": 6.345864661654136,
+ "grad_norm": 0.20492706697055546,
+ "learning_rate": 1.6334321600700611e-06,
+ "loss": 0.189,
+ "step": 844
+ },
+ {
+ "epoch": 6.353383458646617,
+ "grad_norm": 0.23399988897360227,
+ "learning_rate": 1.6324700577359308e-06,
+ "loss": 0.1942,
+ "step": 845
+ },
+ {
+ "epoch": 6.360902255639098,
+ "grad_norm": 0.21801281765604358,
+ "learning_rate": 1.6315069786470765e-06,
+ "loss": 0.1884,
+ "step": 846
+ },
+ {
+ "epoch": 6.368421052631579,
+ "grad_norm": 0.20169629399167494,
+ "learning_rate": 1.6305429242908287e-06,
+ "loss": 0.1819,
+ "step": 847
+ },
+ {
+ "epoch": 6.37593984962406,
+ "grad_norm": 0.22212167997538615,
+ "learning_rate": 1.629577896156024e-06,
+ "loss": 0.2029,
+ "step": 848
+ },
+ {
+ "epoch": 6.383458646616542,
+ "grad_norm": 0.22977799005959498,
+ "learning_rate": 1.6286118957330035e-06,
+ "loss": 0.1871,
+ "step": 849
+ },
+ {
+ "epoch": 6.390977443609023,
+ "grad_norm": 0.20764168803274344,
+ "learning_rate": 1.6276449245136088e-06,
+ "loss": 0.189,
+ "step": 850
+ },
+ {
+ "epoch": 6.398496240601504,
+ "grad_norm": 0.2031343257960209,
+ "learning_rate": 1.6266769839911815e-06,
+ "loss": 0.1972,
+ "step": 851
+ },
+ {
+ "epoch": 6.406015037593985,
+ "grad_norm": 0.20804096057915558,
+ "learning_rate": 1.6257080756605598e-06,
+ "loss": 0.1804,
+ "step": 852
+ },
+ {
+ "epoch": 6.413533834586466,
+ "grad_norm": 0.21347563182787138,
+ "learning_rate": 1.624738201018077e-06,
+ "loss": 0.189,
+ "step": 853
+ },
+ {
+ "epoch": 6.421052631578947,
+ "grad_norm": 0.20852954187642395,
+ "learning_rate": 1.623767361561558e-06,
+ "loss": 0.1901,
+ "step": 854
+ },
+ {
+ "epoch": 6.428571428571429,
+ "grad_norm": 0.20730501065466447,
+ "learning_rate": 1.6227955587903179e-06,
+ "loss": 0.1799,
+ "step": 855
+ },
+ {
+ "epoch": 6.43609022556391,
+ "grad_norm": 0.21510023981867332,
+ "learning_rate": 1.6218227942051602e-06,
+ "loss": 0.1892,
+ "step": 856
+ },
+ {
+ "epoch": 6.443609022556391,
+ "grad_norm": 0.21286177660571987,
+ "learning_rate": 1.6208490693083734e-06,
+ "loss": 0.1839,
+ "step": 857
+ },
+ {
+ "epoch": 6.451127819548872,
+ "grad_norm": 0.21089423908431468,
+ "learning_rate": 1.6198743856037283e-06,
+ "loss": 0.1862,
+ "step": 858
+ },
+ {
+ "epoch": 6.458646616541353,
+ "grad_norm": 0.20888597973009043,
+ "learning_rate": 1.618898744596477e-06,
+ "loss": 0.1922,
+ "step": 859
+ },
+ {
+ "epoch": 6.466165413533835,
+ "grad_norm": 0.21100400944680012,
+ "learning_rate": 1.6179221477933507e-06,
+ "loss": 0.1899,
+ "step": 860
+ },
+ {
+ "epoch": 6.473684210526316,
+ "grad_norm": 0.21680404972021133,
+ "learning_rate": 1.6169445967025555e-06,
+ "loss": 0.1824,
+ "step": 861
+ },
+ {
+ "epoch": 6.481203007518797,
+ "grad_norm": 0.20677216362229459,
+ "learning_rate": 1.6159660928337721e-06,
+ "loss": 0.1936,
+ "step": 862
+ },
+ {
+ "epoch": 6.488721804511278,
+ "grad_norm": 0.209006132170478,
+ "learning_rate": 1.6149866376981524e-06,
+ "loss": 0.1857,
+ "step": 863
+ },
+ {
+ "epoch": 6.496240601503759,
+ "grad_norm": 0.2162874009972458,
+ "learning_rate": 1.6140062328083168e-06,
+ "loss": 0.2042,
+ "step": 864
+ },
+ {
+ "epoch": 6.503759398496241,
+ "grad_norm": 0.2077845566024994,
+ "learning_rate": 1.6130248796783535e-06,
+ "loss": 0.1884,
+ "step": 865
+ },
+ {
+ "epoch": 6.511278195488722,
+ "grad_norm": 0.2048883159062837,
+ "learning_rate": 1.6120425798238143e-06,
+ "loss": 0.1788,
+ "step": 866
+ },
+ {
+ "epoch": 6.518796992481203,
+ "grad_norm": 0.22203511915976737,
+ "learning_rate": 1.6110593347617132e-06,
+ "loss": 0.1902,
+ "step": 867
+ },
+ {
+ "epoch": 6.526315789473684,
+ "grad_norm": 0.21196618575486373,
+ "learning_rate": 1.6100751460105243e-06,
+ "loss": 0.1958,
+ "step": 868
+ },
+ {
+ "epoch": 6.533834586466165,
+ "grad_norm": 0.211339341351388,
+ "learning_rate": 1.609090015090179e-06,
+ "loss": 0.1791,
+ "step": 869
+ },
+ {
+ "epoch": 6.541353383458647,
+ "grad_norm": 0.20231489041077508,
+ "learning_rate": 1.6081039435220634e-06,
+ "loss": 0.1891,
+ "step": 870
+ },
+ {
+ "epoch": 6.548872180451128,
+ "grad_norm": 0.21822583381507699,
+ "learning_rate": 1.6071169328290162e-06,
+ "loss": 0.1961,
+ "step": 871
+ },
+ {
+ "epoch": 6.556390977443609,
+ "grad_norm": 0.2132028229819378,
+ "learning_rate": 1.6061289845353274e-06,
+ "loss": 0.1936,
+ "step": 872
+ },
+ {
+ "epoch": 6.56390977443609,
+ "grad_norm": 0.20773942161036077,
+ "learning_rate": 1.6051401001667336e-06,
+ "loss": 0.1919,
+ "step": 873
+ },
+ {
+ "epoch": 6.571428571428571,
+ "grad_norm": 0.21453873065549098,
+ "learning_rate": 1.6041502812504185e-06,
+ "loss": 0.1997,
+ "step": 874
+ },
+ {
+ "epoch": 6.578947368421053,
+ "grad_norm": 0.21617777391314802,
+ "learning_rate": 1.6031595293150075e-06,
+ "loss": 0.1865,
+ "step": 875
+ },
+ {
+ "epoch": 6.586466165413534,
+ "grad_norm": 0.21226668760470313,
+ "learning_rate": 1.6021678458905683e-06,
+ "loss": 0.1911,
+ "step": 876
+ },
+ {
+ "epoch": 6.593984962406015,
+ "grad_norm": 0.21581391533271396,
+ "learning_rate": 1.6011752325086064e-06,
+ "loss": 0.1883,
+ "step": 877
+ },
+ {
+ "epoch": 6.601503759398496,
+ "grad_norm": 0.2104038379630153,
+ "learning_rate": 1.6001816907020633e-06,
+ "loss": 0.1897,
+ "step": 878
+ },
+ {
+ "epoch": 6.609022556390977,
+ "grad_norm": 0.21155551956491123,
+ "learning_rate": 1.599187222005315e-06,
+ "loss": 0.1923,
+ "step": 879
+ },
+ {
+ "epoch": 6.616541353383458,
+ "grad_norm": 0.2047793326408386,
+ "learning_rate": 1.5981918279541685e-06,
+ "loss": 0.1805,
+ "step": 880
+ },
+ {
+ "epoch": 6.62406015037594,
+ "grad_norm": 0.20681391853425019,
+ "learning_rate": 1.5971955100858603e-06,
+ "loss": 0.1792,
+ "step": 881
+ },
+ {
+ "epoch": 6.631578947368421,
+ "grad_norm": 0.20524667393674906,
+ "learning_rate": 1.5961982699390525e-06,
+ "loss": 0.1881,
+ "step": 882
+ },
+ {
+ "epoch": 6.639097744360902,
+ "grad_norm": 0.21088136758822862,
+ "learning_rate": 1.5952001090538332e-06,
+ "loss": 0.1846,
+ "step": 883
+ },
+ {
+ "epoch": 6.646616541353383,
+ "grad_norm": 0.2149779977437084,
+ "learning_rate": 1.5942010289717105e-06,
+ "loss": 0.1795,
+ "step": 884
+ },
+ {
+ "epoch": 6.654135338345864,
+ "grad_norm": 0.22477781972910563,
+ "learning_rate": 1.5932010312356137e-06,
+ "loss": 0.1879,
+ "step": 885
+ },
+ {
+ "epoch": 6.661654135338345,
+ "grad_norm": 0.2066447576249302,
+ "learning_rate": 1.5922001173898887e-06,
+ "loss": 0.1899,
+ "step": 886
+ },
+ {
+ "epoch": 6.669172932330827,
+ "grad_norm": 0.21668971565378484,
+ "learning_rate": 1.591198288980296e-06,
+ "loss": 0.1801,
+ "step": 887
+ },
+ {
+ "epoch": 6.676691729323308,
+ "grad_norm": 0.20638743286796699,
+ "learning_rate": 1.5901955475540083e-06,
+ "loss": 0.1882,
+ "step": 888
+ },
+ {
+ "epoch": 6.684210526315789,
+ "grad_norm": 0.23912036997276723,
+ "learning_rate": 1.5891918946596095e-06,
+ "loss": 0.1828,
+ "step": 889
+ },
+ {
+ "epoch": 6.69172932330827,
+ "grad_norm": 0.21462102389722368,
+ "learning_rate": 1.5881873318470893e-06,
+ "loss": 0.1851,
+ "step": 890
+ },
+ {
+ "epoch": 6.6992481203007515,
+ "grad_norm": 0.21043159484141516,
+ "learning_rate": 1.5871818606678447e-06,
+ "loss": 0.1854,
+ "step": 891
+ },
+ {
+ "epoch": 6.706766917293233,
+ "grad_norm": 0.215971542420956,
+ "learning_rate": 1.5861754826746733e-06,
+ "loss": 0.1993,
+ "step": 892
+ },
+ {
+ "epoch": 6.714285714285714,
+ "grad_norm": 0.218562805974016,
+ "learning_rate": 1.5851681994217754e-06,
+ "loss": 0.1963,
+ "step": 893
+ },
+ {
+ "epoch": 6.7218045112781954,
+ "grad_norm": 0.22052438837451402,
+ "learning_rate": 1.5841600124647477e-06,
+ "loss": 0.1977,
+ "step": 894
+ },
+ {
+ "epoch": 6.7293233082706765,
+ "grad_norm": 0.2211168977204507,
+ "learning_rate": 1.5831509233605829e-06,
+ "loss": 0.1809,
+ "step": 895
+ },
+ {
+ "epoch": 6.7368421052631575,
+ "grad_norm": 0.2073501931095508,
+ "learning_rate": 1.5821409336676674e-06,
+ "loss": 0.1871,
+ "step": 896
+ },
+ {
+ "epoch": 6.7443609022556394,
+ "grad_norm": 0.2126104012148589,
+ "learning_rate": 1.581130044945778e-06,
+ "loss": 0.1892,
+ "step": 897
+ },
+ {
+ "epoch": 6.7518796992481205,
+ "grad_norm": 0.21929977561607983,
+ "learning_rate": 1.5801182587560803e-06,
+ "loss": 0.1949,
+ "step": 898
+ },
+ {
+ "epoch": 6.7593984962406015,
+ "grad_norm": 0.20465290161948074,
+ "learning_rate": 1.5791055766611255e-06,
+ "loss": 0.1833,
+ "step": 899
+ },
+ {
+ "epoch": 6.7669172932330826,
+ "grad_norm": 0.2162498662986876,
+ "learning_rate": 1.5780920002248483e-06,
+ "loss": 0.1877,
+ "step": 900
+ },
+ {
+ "epoch": 6.774436090225564,
+ "grad_norm": 0.21379369412164856,
+ "learning_rate": 1.5770775310125651e-06,
+ "loss": 0.1841,
+ "step": 901
+ },
+ {
+ "epoch": 6.7819548872180455,
+ "grad_norm": 0.21999848623593538,
+ "learning_rate": 1.5760621705909705e-06,
+ "loss": 0.1911,
+ "step": 902
+ },
+ {
+ "epoch": 6.7894736842105265,
+ "grad_norm": 0.20784700042609563,
+ "learning_rate": 1.5750459205281361e-06,
+ "loss": 0.1756,
+ "step": 903
+ },
+ {
+ "epoch": 6.796992481203008,
+ "grad_norm": 0.21130146707354394,
+ "learning_rate": 1.5740287823935066e-06,
+ "loss": 0.1827,
+ "step": 904
+ },
+ {
+ "epoch": 6.804511278195489,
+ "grad_norm": 0.21441010526199247,
+ "learning_rate": 1.573010757757899e-06,
+ "loss": 0.1862,
+ "step": 905
+ },
+ {
+ "epoch": 6.81203007518797,
+ "grad_norm": 0.21685696251989317,
+ "learning_rate": 1.5719918481934986e-06,
+ "loss": 0.182,
+ "step": 906
+ },
+ {
+ "epoch": 6.819548872180452,
+ "grad_norm": 0.2191914119766328,
+ "learning_rate": 1.570972055273858e-06,
+ "loss": 0.1827,
+ "step": 907
+ },
+ {
+ "epoch": 6.827067669172933,
+ "grad_norm": 0.20952917241472369,
+ "learning_rate": 1.569951380573894e-06,
+ "loss": 0.188,
+ "step": 908
+ },
+ {
+ "epoch": 6.834586466165414,
+ "grad_norm": 0.20254054862504803,
+ "learning_rate": 1.5689298256698845e-06,
+ "loss": 0.1892,
+ "step": 909
+ },
+ {
+ "epoch": 6.842105263157895,
+ "grad_norm": 0.21679075018348326,
+ "learning_rate": 1.567907392139467e-06,
+ "loss": 0.1946,
+ "step": 910
+ },
+ {
+ "epoch": 6.849624060150376,
+ "grad_norm": 0.21383579522405452,
+ "learning_rate": 1.5668840815616364e-06,
+ "loss": 0.1925,
+ "step": 911
+ },
+ {
+ "epoch": 6.857142857142857,
+ "grad_norm": 0.2109973515879204,
+ "learning_rate": 1.5658598955167418e-06,
+ "loss": 0.1937,
+ "step": 912
+ },
+ {
+ "epoch": 6.864661654135339,
+ "grad_norm": 0.21057868137532487,
+ "learning_rate": 1.5648348355864838e-06,
+ "loss": 0.1842,
+ "step": 913
+ },
+ {
+ "epoch": 6.87218045112782,
+ "grad_norm": 0.21215859340624627,
+ "learning_rate": 1.5638089033539132e-06,
+ "loss": 0.1827,
+ "step": 914
+ },
+ {
+ "epoch": 6.879699248120301,
+ "grad_norm": 0.20560313741930225,
+ "learning_rate": 1.562782100403428e-06,
+ "loss": 0.1841,
+ "step": 915
+ },
+ {
+ "epoch": 6.887218045112782,
+ "grad_norm": 0.20776658069258191,
+ "learning_rate": 1.5617544283207708e-06,
+ "loss": 0.1903,
+ "step": 916
+ },
+ {
+ "epoch": 6.894736842105263,
+ "grad_norm": 0.22157652819640153,
+ "learning_rate": 1.5607258886930259e-06,
+ "loss": 0.1874,
+ "step": 917
+ },
+ {
+ "epoch": 6.902255639097744,
+ "grad_norm": 0.21429949218852315,
+ "learning_rate": 1.5596964831086181e-06,
+ "loss": 0.1904,
+ "step": 918
+ },
+ {
+ "epoch": 6.909774436090226,
+ "grad_norm": 0.21280754909289565,
+ "learning_rate": 1.5586662131573092e-06,
+ "loss": 0.1969,
+ "step": 919
+ },
+ {
+ "epoch": 6.917293233082707,
+ "grad_norm": 0.20276867713076357,
+ "learning_rate": 1.5576350804301957e-06,
+ "loss": 0.1912,
+ "step": 920
+ },
+ {
+ "epoch": 6.924812030075188,
+ "grad_norm": 0.21739776236958644,
+ "learning_rate": 1.556603086519707e-06,
+ "loss": 0.1973,
+ "step": 921
+ },
+ {
+ "epoch": 6.932330827067669,
+ "grad_norm": 0.2137221670324067,
+ "learning_rate": 1.5555702330196021e-06,
+ "loss": 0.183,
+ "step": 922
+ },
+ {
+ "epoch": 6.93984962406015,
+ "grad_norm": 0.23396287003982305,
+ "learning_rate": 1.5545365215249676e-06,
+ "loss": 0.1869,
+ "step": 923
+ },
+ {
+ "epoch": 6.947368421052632,
+ "grad_norm": 0.20676347300020592,
+ "learning_rate": 1.5535019536322157e-06,
+ "loss": 0.1779,
+ "step": 924
+ },
+ {
+ "epoch": 6.954887218045113,
+ "grad_norm": 0.22002421586462886,
+ "learning_rate": 1.5524665309390801e-06,
+ "loss": 0.188,
+ "step": 925
+ },
+ {
+ "epoch": 6.962406015037594,
+ "grad_norm": 0.21057030892594492,
+ "learning_rate": 1.551430255044615e-06,
+ "loss": 0.1897,
+ "step": 926
+ },
+ {
+ "epoch": 6.969924812030075,
+ "grad_norm": 0.21374211899895193,
+ "learning_rate": 1.5503931275491928e-06,
+ "loss": 0.1865,
+ "step": 927
+ },
+ {
+ "epoch": 6.977443609022556,
+ "grad_norm": 0.21390588775725228,
+ "learning_rate": 1.5493551500545005e-06,
+ "loss": 0.1897,
+ "step": 928
+ },
+ {
+ "epoch": 6.984962406015038,
+ "grad_norm": 0.2574966947063672,
+ "learning_rate": 1.5483163241635383e-06,
+ "loss": 0.1938,
+ "step": 929
+ },
+ {
+ "epoch": 6.992481203007519,
+ "grad_norm": 0.21857474509840535,
+ "learning_rate": 1.547276651480616e-06,
+ "loss": 0.1947,
+ "step": 930
+ },
+ {
+ "epoch": 7.0,
+ "grad_norm": 0.20852158513606997,
+ "learning_rate": 1.5462361336113511e-06,
+ "loss": 0.1893,
+ "step": 931
+ },
+ {
+ "epoch": 7.0,
+ "eval_loss": 0.2427692860364914,
+ "eval_runtime": 36.8649,
+ "eval_samples_per_second": 12.125,
+ "eval_steps_per_second": 0.19,
+ "step": 931
+ },
+ {
+ "epoch": 7.007518796992481,
+ "grad_norm": 0.26505257679055283,
+ "learning_rate": 1.5451947721626675e-06,
+ "loss": 0.1778,
+ "step": 932
+ },
+ {
+ "epoch": 7.015037593984962,
+ "grad_norm": 0.22520849685608596,
+ "learning_rate": 1.5441525687427906e-06,
+ "loss": 0.1762,
+ "step": 933
+ },
+ {
+ "epoch": 7.022556390977444,
+ "grad_norm": 0.24421243778883733,
+ "learning_rate": 1.5431095249612464e-06,
+ "loss": 0.1827,
+ "step": 934
+ },
+ {
+ "epoch": 7.030075187969925,
+ "grad_norm": 0.22946480397547603,
+ "learning_rate": 1.5420656424288595e-06,
+ "loss": 0.1753,
+ "step": 935
+ },
+ {
+ "epoch": 7.037593984962406,
+ "grad_norm": 0.23456790278698067,
+ "learning_rate": 1.5410209227577485e-06,
+ "loss": 0.1733,
+ "step": 936
+ },
+ {
+ "epoch": 7.045112781954887,
+ "grad_norm": 0.22473296186899017,
+ "learning_rate": 1.5399753675613257e-06,
+ "loss": 0.165,
+ "step": 937
+ },
+ {
+ "epoch": 7.052631578947368,
+ "grad_norm": 0.24174432379749838,
+ "learning_rate": 1.5389289784542943e-06,
+ "loss": 0.1735,
+ "step": 938
+ },
+ {
+ "epoch": 7.06015037593985,
+ "grad_norm": 0.2144698863937405,
+ "learning_rate": 1.5378817570526437e-06,
+ "loss": 0.1725,
+ "step": 939
+ },
+ {
+ "epoch": 7.067669172932331,
+ "grad_norm": 0.25553812375096957,
+ "learning_rate": 1.53683370497365e-06,
+ "loss": 0.165,
+ "step": 940
+ },
+ {
+ "epoch": 7.075187969924812,
+ "grad_norm": 0.21916472405518053,
+ "learning_rate": 1.5357848238358719e-06,
+ "loss": 0.1785,
+ "step": 941
+ },
+ {
+ "epoch": 7.082706766917293,
+ "grad_norm": 0.21896507636743057,
+ "learning_rate": 1.5347351152591484e-06,
+ "loss": 0.183,
+ "step": 942
+ },
+ {
+ "epoch": 7.090225563909774,
+ "grad_norm": 0.21952327780765873,
+ "learning_rate": 1.5336845808645955e-06,
+ "loss": 0.1744,
+ "step": 943
+ },
+ {
+ "epoch": 7.097744360902255,
+ "grad_norm": 0.24223521572645362,
+ "learning_rate": 1.532633222274606e-06,
+ "loss": 0.1773,
+ "step": 944
+ },
+ {
+ "epoch": 7.105263157894737,
+ "grad_norm": 0.20969965749238753,
+ "learning_rate": 1.5315810411128447e-06,
+ "loss": 0.1709,
+ "step": 945
+ },
+ {
+ "epoch": 7.112781954887218,
+ "grad_norm": 0.21257297067266598,
+ "learning_rate": 1.5305280390042468e-06,
+ "loss": 0.1742,
+ "step": 946
+ },
+ {
+ "epoch": 7.120300751879699,
+ "grad_norm": 0.23408694649483552,
+ "learning_rate": 1.5294742175750156e-06,
+ "loss": 0.1756,
+ "step": 947
+ },
+ {
+ "epoch": 7.12781954887218,
+ "grad_norm": 0.21255131039613523,
+ "learning_rate": 1.5284195784526194e-06,
+ "loss": 0.1746,
+ "step": 948
+ },
+ {
+ "epoch": 7.135338345864661,
+ "grad_norm": 0.2242139782749053,
+ "learning_rate": 1.5273641232657894e-06,
+ "loss": 0.1786,
+ "step": 949
+ },
+ {
+ "epoch": 7.142857142857143,
+ "grad_norm": 0.22482414712610285,
+ "learning_rate": 1.5263078536445172e-06,
+ "loss": 0.1705,
+ "step": 950
+ },
+ {
+ "epoch": 7.150375939849624,
+ "grad_norm": 0.22196033495037595,
+ "learning_rate": 1.5252507712200525e-06,
+ "loss": 0.178,
+ "step": 951
+ },
+ {
+ "epoch": 7.157894736842105,
+ "grad_norm": 0.21383699790765098,
+ "learning_rate": 1.524192877624899e-06,
+ "loss": 0.173,
+ "step": 952
+ },
+ {
+ "epoch": 7.165413533834586,
+ "grad_norm": 0.21112181151067969,
+ "learning_rate": 1.523134174492815e-06,
+ "loss": 0.1668,
+ "step": 953
+ },
+ {
+ "epoch": 7.172932330827067,
+ "grad_norm": 0.21302406038002006,
+ "learning_rate": 1.5220746634588074e-06,
+ "loss": 0.1805,
+ "step": 954
+ },
+ {
+ "epoch": 7.180451127819548,
+ "grad_norm": 0.21884369871207637,
+ "learning_rate": 1.521014346159131e-06,
+ "loss": 0.1794,
+ "step": 955
+ },
+ {
+ "epoch": 7.18796992481203,
+ "grad_norm": 0.21409051017209468,
+ "learning_rate": 1.519953224231287e-06,
+ "loss": 0.1789,
+ "step": 956
+ },
+ {
+ "epoch": 7.195488721804511,
+ "grad_norm": 0.21799071379100246,
+ "learning_rate": 1.5188912993140174e-06,
+ "loss": 0.1734,
+ "step": 957
+ },
+ {
+ "epoch": 7.203007518796992,
+ "grad_norm": 0.21323932050490393,
+ "learning_rate": 1.5178285730473067e-06,
+ "loss": 0.1723,
+ "step": 958
+ },
+ {
+ "epoch": 7.2105263157894735,
+ "grad_norm": 0.22885271003256574,
+ "learning_rate": 1.5167650470723739e-06,
+ "loss": 0.176,
+ "step": 959
+ },
+ {
+ "epoch": 7.2180451127819545,
+ "grad_norm": 0.212042610894159,
+ "learning_rate": 1.5157007230316756e-06,
+ "loss": 0.1755,
+ "step": 960
+ },
+ {
+ "epoch": 7.225563909774436,
+ "grad_norm": 0.2201938840095021,
+ "learning_rate": 1.5146356025688998e-06,
+ "loss": 0.1746,
+ "step": 961
+ },
+ {
+ "epoch": 7.2330827067669174,
+ "grad_norm": 0.21785482034349357,
+ "learning_rate": 1.5135696873289646e-06,
+ "loss": 0.1671,
+ "step": 962
+ },
+ {
+ "epoch": 7.2406015037593985,
+ "grad_norm": 0.21818953130698057,
+ "learning_rate": 1.512502978958015e-06,
+ "loss": 0.1703,
+ "step": 963
+ },
+ {
+ "epoch": 7.2481203007518795,
+ "grad_norm": 0.20543761768376806,
+ "learning_rate": 1.5114354791034222e-06,
+ "loss": 0.1825,
+ "step": 964
+ },
+ {
+ "epoch": 7.2556390977443606,
+ "grad_norm": 0.20220789023066957,
+ "learning_rate": 1.5103671894137784e-06,
+ "loss": 0.1779,
+ "step": 965
+ },
+ {
+ "epoch": 7.2631578947368425,
+ "grad_norm": 0.20980830899427663,
+ "learning_rate": 1.509298111538896e-06,
+ "loss": 0.1704,
+ "step": 966
+ },
+ {
+ "epoch": 7.2706766917293235,
+ "grad_norm": 0.21644725351015526,
+ "learning_rate": 1.5082282471298054e-06,
+ "loss": 0.1773,
+ "step": 967
+ },
+ {
+ "epoch": 7.2781954887218046,
+ "grad_norm": 0.2261382532577826,
+ "learning_rate": 1.5071575978387502e-06,
+ "loss": 0.1678,
+ "step": 968
+ },
+ {
+ "epoch": 7.285714285714286,
+ "grad_norm": 0.23151503571183762,
+ "learning_rate": 1.5060861653191874e-06,
+ "loss": 0.171,
+ "step": 969
+ },
+ {
+ "epoch": 7.293233082706767,
+ "grad_norm": 0.2084199293588356,
+ "learning_rate": 1.5050139512257829e-06,
+ "loss": 0.1841,
+ "step": 970
+ },
+ {
+ "epoch": 7.3007518796992485,
+ "grad_norm": 0.22862153244219813,
+ "learning_rate": 1.50394095721441e-06,
+ "loss": 0.1716,
+ "step": 971
+ },
+ {
+ "epoch": 7.30827067669173,
+ "grad_norm": 0.2167019907344874,
+ "learning_rate": 1.502867184942146e-06,
+ "loss": 0.1669,
+ "step": 972
+ },
+ {
+ "epoch": 7.315789473684211,
+ "grad_norm": 0.20972612679725483,
+ "learning_rate": 1.5017926360672709e-06,
+ "loss": 0.1717,
+ "step": 973
+ },
+ {
+ "epoch": 7.323308270676692,
+ "grad_norm": 0.21168695431898943,
+ "learning_rate": 1.5007173122492634e-06,
+ "loss": 0.1802,
+ "step": 974
+ },
+ {
+ "epoch": 7.330827067669173,
+ "grad_norm": 0.23398130214404367,
+ "learning_rate": 1.4996412151487986e-06,
+ "loss": 0.1751,
+ "step": 975
+ },
+ {
+ "epoch": 7.338345864661654,
+ "grad_norm": 0.21678531533551917,
+ "learning_rate": 1.4985643464277474e-06,
+ "loss": 0.1768,
+ "step": 976
+ },
+ {
+ "epoch": 7.345864661654136,
+ "grad_norm": 0.22119801736688877,
+ "learning_rate": 1.4974867077491704e-06,
+ "loss": 0.1762,
+ "step": 977
+ },
+ {
+ "epoch": 7.353383458646617,
+ "grad_norm": 0.2124896007109167,
+ "learning_rate": 1.4964083007773188e-06,
+ "loss": 0.1764,
+ "step": 978
+ },
+ {
+ "epoch": 7.360902255639098,
+ "grad_norm": 0.21147095344958536,
+ "learning_rate": 1.4953291271776292e-06,
+ "loss": 0.1792,
+ "step": 979
+ },
+ {
+ "epoch": 7.368421052631579,
+ "grad_norm": 0.21732474277992977,
+ "learning_rate": 1.4942491886167227e-06,
+ "loss": 0.1667,
+ "step": 980
+ },
+ {
+ "epoch": 7.37593984962406,
+ "grad_norm": 0.2131871421348897,
+ "learning_rate": 1.493168486762402e-06,
+ "loss": 0.1793,
+ "step": 981
+ },
+ {
+ "epoch": 7.383458646616542,
+ "grad_norm": 0.22844300593706618,
+ "learning_rate": 1.4920870232836484e-06,
+ "loss": 0.1743,
+ "step": 982
+ },
+ {
+ "epoch": 7.390977443609023,
+ "grad_norm": 0.21817360239908914,
+ "learning_rate": 1.4910047998506192e-06,
+ "loss": 0.1681,
+ "step": 983
+ },
+ {
+ "epoch": 7.398496240601504,
+ "grad_norm": 0.218040996145761,
+ "learning_rate": 1.489921818134645e-06,
+ "loss": 0.1706,
+ "step": 984
+ },
+ {
+ "epoch": 7.406015037593985,
+ "grad_norm": 0.22085334330041662,
+ "learning_rate": 1.4888380798082287e-06,
+ "loss": 0.1706,
+ "step": 985
+ },
+ {
+ "epoch": 7.413533834586466,
+ "grad_norm": 0.21603473868601347,
+ "learning_rate": 1.4877535865450405e-06,
+ "loss": 0.1723,
+ "step": 986
+ },
+ {
+ "epoch": 7.421052631578947,
+ "grad_norm": 0.22168200980251596,
+ "learning_rate": 1.4866683400199165e-06,
+ "loss": 0.1682,
+ "step": 987
+ },
+ {
+ "epoch": 7.428571428571429,
+ "grad_norm": 0.2266247654128682,
+ "learning_rate": 1.4855823419088573e-06,
+ "loss": 0.1786,
+ "step": 988
+ },
+ {
+ "epoch": 7.43609022556391,
+ "grad_norm": 0.23026535145847707,
+ "learning_rate": 1.4844955938890226e-06,
+ "loss": 0.1706,
+ "step": 989
+ },
+ {
+ "epoch": 7.443609022556391,
+ "grad_norm": 0.22072773444572644,
+ "learning_rate": 1.4834080976387313e-06,
+ "loss": 0.1753,
+ "step": 990
+ },
+ {
+ "epoch": 7.451127819548872,
+ "grad_norm": 0.21442048530896207,
+ "learning_rate": 1.4823198548374577e-06,
+ "loss": 0.1702,
+ "step": 991
+ },
+ {
+ "epoch": 7.458646616541353,
+ "grad_norm": 0.20603469492060156,
+ "learning_rate": 1.4812308671658283e-06,
+ "loss": 0.1703,
+ "step": 992
+ },
+ {
+ "epoch": 7.466165413533835,
+ "grad_norm": 0.21870062508940677,
+ "learning_rate": 1.480141136305621e-06,
+ "loss": 0.1735,
+ "step": 993
+ },
+ {
+ "epoch": 7.473684210526316,
+ "grad_norm": 0.22037187549386375,
+ "learning_rate": 1.479050663939761e-06,
+ "loss": 0.1825,
+ "step": 994
+ },
+ {
+ "epoch": 7.481203007518797,
+ "grad_norm": 0.21526694205085614,
+ "learning_rate": 1.4779594517523184e-06,
+ "loss": 0.1817,
+ "step": 995
+ },
+ {
+ "epoch": 7.488721804511278,
+ "grad_norm": 0.22067226016335834,
+ "learning_rate": 1.476867501428506e-06,
+ "loss": 0.1796,
+ "step": 996
+ },
+ {
+ "epoch": 7.496240601503759,
+ "grad_norm": 0.23506186975910262,
+ "learning_rate": 1.4757748146546769e-06,
+ "loss": 0.1739,
+ "step": 997
+ },
+ {
+ "epoch": 7.503759398496241,
+ "grad_norm": 0.23762807220475712,
+ "learning_rate": 1.4746813931183205e-06,
+ "loss": 0.1798,
+ "step": 998
+ },
+ {
+ "epoch": 7.511278195488722,
+ "grad_norm": 0.2128030999660742,
+ "learning_rate": 1.4735872385080625e-06,
+ "loss": 0.1803,
+ "step": 999
+ },
+ {
+ "epoch": 7.518796992481203,
+ "grad_norm": 0.22445432104692994,
+ "learning_rate": 1.4724923525136595e-06,
+ "loss": 0.1708,
+ "step": 1000
+ },
+ {
+ "epoch": 7.526315789473684,
+ "grad_norm": 0.22592943094949827,
+ "learning_rate": 1.4713967368259978e-06,
+ "loss": 0.1783,
+ "step": 1001
+ },
+ {
+ "epoch": 7.533834586466165,
+ "grad_norm": 0.23442943135690836,
+ "learning_rate": 1.4703003931370908e-06,
+ "loss": 0.171,
+ "step": 1002
+ },
+ {
+ "epoch": 7.541353383458647,
+ "grad_norm": 0.24121263647375604,
+ "learning_rate": 1.4692033231400763e-06,
+ "loss": 0.1651,
+ "step": 1003
+ },
+ {
+ "epoch": 7.548872180451128,
+ "grad_norm": 0.21886742462052433,
+ "learning_rate": 1.4681055285292136e-06,
+ "loss": 0.1783,
+ "step": 1004
+ },
+ {
+ "epoch": 7.556390977443609,
+ "grad_norm": 0.23303581856928796,
+ "learning_rate": 1.4670070109998814e-06,
+ "loss": 0.1738,
+ "step": 1005
+ },
+ {
+ "epoch": 7.56390977443609,
+ "grad_norm": 0.21471791167088822,
+ "learning_rate": 1.465907772248574e-06,
+ "loss": 0.1716,
+ "step": 1006
+ },
+ {
+ "epoch": 7.571428571428571,
+ "grad_norm": 0.21577637203440786,
+ "learning_rate": 1.4648078139729004e-06,
+ "loss": 0.1672,
+ "step": 1007
+ },
+ {
+ "epoch": 7.578947368421053,
+ "grad_norm": 0.20755091089467734,
+ "learning_rate": 1.4637071378715804e-06,
+ "loss": 0.1837,
+ "step": 1008
+ },
+ {
+ "epoch": 7.586466165413534,
+ "grad_norm": 0.22343814714468155,
+ "learning_rate": 1.4626057456444423e-06,
+ "loss": 0.1831,
+ "step": 1009
+ },
+ {
+ "epoch": 7.593984962406015,
+ "grad_norm": 0.22162852119083062,
+ "learning_rate": 1.4615036389924206e-06,
+ "loss": 0.1753,
+ "step": 1010
+ },
+ {
+ "epoch": 7.601503759398496,
+ "grad_norm": 0.20822804235747766,
+ "learning_rate": 1.460400819617553e-06,
+ "loss": 0.1808,
+ "step": 1011
+ },
+ {
+ "epoch": 7.609022556390977,
+ "grad_norm": 0.22698591451782998,
+ "learning_rate": 1.4592972892229778e-06,
+ "loss": 0.1673,
+ "step": 1012
+ },
+ {
+ "epoch": 7.616541353383458,
+ "grad_norm": 0.22481628967329828,
+ "learning_rate": 1.4581930495129316e-06,
+ "loss": 0.1764,
+ "step": 1013
+ },
+ {
+ "epoch": 7.62406015037594,
+ "grad_norm": 0.23229632866474165,
+ "learning_rate": 1.457088102192746e-06,
+ "loss": 0.1737,
+ "step": 1014
+ },
+ {
+ "epoch": 7.631578947368421,
+ "grad_norm": 0.22589626938956903,
+ "learning_rate": 1.455982448968846e-06,
+ "loss": 0.1715,
+ "step": 1015
+ },
+ {
+ "epoch": 7.639097744360902,
+ "grad_norm": 0.2519005197357146,
+ "learning_rate": 1.454876091548746e-06,
+ "loss": 0.1738,
+ "step": 1016
+ },
+ {
+ "epoch": 7.646616541353383,
+ "grad_norm": 0.22838657732028406,
+ "learning_rate": 1.4537690316410489e-06,
+ "loss": 0.1714,
+ "step": 1017
+ },
+ {
+ "epoch": 7.654135338345864,
+ "grad_norm": 0.21890369884352193,
+ "learning_rate": 1.4526612709554417e-06,
+ "loss": 0.1874,
+ "step": 1018
+ },
+ {
+ "epoch": 7.661654135338345,
+ "grad_norm": 0.22813825422042444,
+ "learning_rate": 1.4515528112026937e-06,
+ "loss": 0.1746,
+ "step": 1019
+ },
+ {
+ "epoch": 7.669172932330827,
+ "grad_norm": 0.2183304937255535,
+ "learning_rate": 1.4504436540946545e-06,
+ "loss": 0.1757,
+ "step": 1020
+ },
+ {
+ "epoch": 7.676691729323308,
+ "grad_norm": 0.25263971086857856,
+ "learning_rate": 1.4493338013442498e-06,
+ "loss": 0.1751,
+ "step": 1021
+ },
+ {
+ "epoch": 7.684210526315789,
+ "grad_norm": 0.21928420212811559,
+ "learning_rate": 1.4482232546654797e-06,
+ "loss": 0.1632,
+ "step": 1022
+ },
+ {
+ "epoch": 7.69172932330827,
+ "grad_norm": 0.23458499318946896,
+ "learning_rate": 1.4471120157734168e-06,
+ "loss": 0.1764,
+ "step": 1023
+ },
+ {
+ "epoch": 7.6992481203007515,
+ "grad_norm": 0.21840355236920278,
+ "learning_rate": 1.4460000863842022e-06,
+ "loss": 0.1719,
+ "step": 1024
+ },
+ {
+ "epoch": 7.706766917293233,
+ "grad_norm": 0.23192498152072427,
+ "learning_rate": 1.4448874682150428e-06,
+ "loss": 0.1864,
+ "step": 1025
+ },
+ {
+ "epoch": 7.714285714285714,
+ "grad_norm": 0.22141128373885496,
+ "learning_rate": 1.4437741629842103e-06,
+ "loss": 0.1751,
+ "step": 1026
+ },
+ {
+ "epoch": 7.7218045112781954,
+ "grad_norm": 0.2272888731572437,
+ "learning_rate": 1.4426601724110362e-06,
+ "loss": 0.1835,
+ "step": 1027
+ },
+ {
+ "epoch": 7.7293233082706765,
+ "grad_norm": 0.21421449262038775,
+ "learning_rate": 1.4415454982159118e-06,
+ "loss": 0.1786,
+ "step": 1028
+ },
+ {
+ "epoch": 7.7368421052631575,
+ "grad_norm": 0.2156675501993543,
+ "learning_rate": 1.4404301421202832e-06,
+ "loss": 0.1716,
+ "step": 1029
+ },
+ {
+ "epoch": 7.7443609022556394,
+ "grad_norm": 0.21499595931240123,
+ "learning_rate": 1.43931410584665e-06,
+ "loss": 0.1677,
+ "step": 1030
+ },
+ {
+ "epoch": 7.7518796992481205,
+ "grad_norm": 0.21874096824376824,
+ "learning_rate": 1.438197391118562e-06,
+ "loss": 0.1746,
+ "step": 1031
+ },
+ {
+ "epoch": 7.7593984962406015,
+ "grad_norm": 0.2095954832187379,
+ "learning_rate": 1.4370799996606166e-06,
+ "loss": 0.1761,
+ "step": 1032
+ },
+ {
+ "epoch": 7.7669172932330826,
+ "grad_norm": 0.21417922302998377,
+ "learning_rate": 1.4359619331984568e-06,
+ "loss": 0.1647,
+ "step": 1033
+ },
+ {
+ "epoch": 7.774436090225564,
+ "grad_norm": 0.21893991731131948,
+ "learning_rate": 1.4348431934587684e-06,
+ "loss": 0.1706,
+ "step": 1034
+ },
+ {
+ "epoch": 7.7819548872180455,
+ "grad_norm": 0.21557044866935451,
+ "learning_rate": 1.4337237821692753e-06,
+ "loss": 0.1679,
+ "step": 1035
+ },
+ {
+ "epoch": 7.7894736842105265,
+ "grad_norm": 0.2149530935000189,
+ "learning_rate": 1.4326037010587405e-06,
+ "loss": 0.1713,
+ "step": 1036
+ },
+ {
+ "epoch": 7.796992481203008,
+ "grad_norm": 0.21943161703473496,
+ "learning_rate": 1.4314829518569598e-06,
+ "loss": 0.1821,
+ "step": 1037
+ },
+ {
+ "epoch": 7.804511278195489,
+ "grad_norm": 0.21140004639585885,
+ "learning_rate": 1.430361536294762e-06,
+ "loss": 0.1781,
+ "step": 1038
+ },
+ {
+ "epoch": 7.81203007518797,
+ "grad_norm": 0.20747790099531782,
+ "learning_rate": 1.4292394561040046e-06,
+ "loss": 0.1739,
+ "step": 1039
+ },
+ {
+ "epoch": 7.819548872180452,
+ "grad_norm": 0.22686448214740126,
+ "learning_rate": 1.4281167130175712e-06,
+ "loss": 0.1781,
+ "step": 1040
+ },
+ {
+ "epoch": 7.827067669172933,
+ "grad_norm": 0.225973010934224,
+ "learning_rate": 1.4269933087693693e-06,
+ "loss": 0.1787,
+ "step": 1041
+ },
+ {
+ "epoch": 7.834586466165414,
+ "grad_norm": 0.21657017664719191,
+ "learning_rate": 1.4258692450943274e-06,
+ "loss": 0.1756,
+ "step": 1042
+ },
+ {
+ "epoch": 7.842105263157895,
+ "grad_norm": 0.2173618317183377,
+ "learning_rate": 1.4247445237283928e-06,
+ "loss": 0.1788,
+ "step": 1043
+ },
+ {
+ "epoch": 7.849624060150376,
+ "grad_norm": 0.21292508414835568,
+ "learning_rate": 1.4236191464085282e-06,
+ "loss": 0.1705,
+ "step": 1044
+ },
+ {
+ "epoch": 7.857142857142857,
+ "grad_norm": 0.21104495977971774,
+ "learning_rate": 1.422493114872709e-06,
+ "loss": 0.1694,
+ "step": 1045
+ },
+ {
+ "epoch": 7.864661654135339,
+ "grad_norm": 0.2120117720771325,
+ "learning_rate": 1.4213664308599219e-06,
+ "loss": 0.1758,
+ "step": 1046
+ },
+ {
+ "epoch": 7.87218045112782,
+ "grad_norm": 0.21956286476880568,
+ "learning_rate": 1.4202390961101597e-06,
+ "loss": 0.1753,
+ "step": 1047
+ },
+ {
+ "epoch": 7.879699248120301,
+ "grad_norm": 0.22596773388618804,
+ "learning_rate": 1.419111112364422e-06,
+ "loss": 0.184,
+ "step": 1048
+ },
+ {
+ "epoch": 7.887218045112782,
+ "grad_norm": 0.22114502552537438,
+ "learning_rate": 1.4179824813647092e-06,
+ "loss": 0.1731,
+ "step": 1049
+ },
+ {
+ "epoch": 7.894736842105263,
+ "grad_norm": 0.22137214482558715,
+ "learning_rate": 1.4168532048540223e-06,
+ "loss": 0.1811,
+ "step": 1050
+ },
+ {
+ "epoch": 7.902255639097744,
+ "grad_norm": 0.22313352737130954,
+ "learning_rate": 1.4157232845763583e-06,
+ "loss": 0.17,
+ "step": 1051
+ },
+ {
+ "epoch": 7.909774436090226,
+ "grad_norm": 0.2305161019299778,
+ "learning_rate": 1.414592722276709e-06,
+ "loss": 0.1753,
+ "step": 1052
+ },
+ {
+ "epoch": 7.917293233082707,
+ "grad_norm": 0.21620635993454546,
+ "learning_rate": 1.4134615197010576e-06,
+ "loss": 0.1775,
+ "step": 1053
+ },
+ {
+ "epoch": 7.924812030075188,
+ "grad_norm": 0.22268285076152716,
+ "learning_rate": 1.4123296785963759e-06,
+ "loss": 0.1727,
+ "step": 1054
+ },
+ {
+ "epoch": 7.932330827067669,
+ "grad_norm": 0.22648157283344986,
+ "learning_rate": 1.4111972007106223e-06,
+ "loss": 0.1802,
+ "step": 1055
+ },
+ {
+ "epoch": 7.93984962406015,
+ "grad_norm": 0.22470068701043575,
+ "learning_rate": 1.410064087792738e-06,
+ "loss": 0.177,
+ "step": 1056
+ },
+ {
+ "epoch": 7.947368421052632,
+ "grad_norm": 0.21447455845104502,
+ "learning_rate": 1.4089303415926457e-06,
+ "loss": 0.1784,
+ "step": 1057
+ },
+ {
+ "epoch": 7.954887218045113,
+ "grad_norm": 0.22177262466910555,
+ "learning_rate": 1.4077959638612448e-06,
+ "loss": 0.1691,
+ "step": 1058
+ },
+ {
+ "epoch": 7.962406015037594,
+ "grad_norm": 0.21349990026186866,
+ "learning_rate": 1.4066609563504117e-06,
+ "loss": 0.1783,
+ "step": 1059
+ },
+ {
+ "epoch": 7.969924812030075,
+ "grad_norm": 0.2087031081915122,
+ "learning_rate": 1.4055253208129937e-06,
+ "loss": 0.1723,
+ "step": 1060
+ },
+ {
+ "epoch": 7.977443609022556,
+ "grad_norm": 0.2241547610715225,
+ "learning_rate": 1.4043890590028093e-06,
+ "loss": 0.1802,
+ "step": 1061
+ },
+ {
+ "epoch": 7.984962406015038,
+ "grad_norm": 0.23002770471782605,
+ "learning_rate": 1.4032521726746437e-06,
+ "loss": 0.1743,
+ "step": 1062
+ },
+ {
+ "epoch": 7.992481203007519,
+ "grad_norm": 0.22782040628231218,
+ "learning_rate": 1.4021146635842463e-06,
+ "loss": 0.1731,
+ "step": 1063
+ },
+ {
+ "epoch": 8.0,
+ "grad_norm": 0.2563191390214376,
+ "learning_rate": 1.4009765334883286e-06,
+ "loss": 0.184,
+ "step": 1064
+ },
+ {
+ "epoch": 8.0,
+ "eval_loss": 0.24922634661197662,
+ "eval_runtime": 35.863,
+ "eval_samples_per_second": 12.464,
+ "eval_steps_per_second": 0.195,
+ "step": 1064
+ },
+ {
+ "epoch": 8.007518796992482,
+ "grad_norm": 0.32833405375685804,
+ "learning_rate": 1.3998377841445612e-06,
+ "loss": 0.1645,
+ "step": 1065
+ },
+ {
+ "epoch": 8.015037593984962,
+ "grad_norm": 0.23008845320133364,
+ "learning_rate": 1.3986984173115708e-06,
+ "loss": 0.1579,
+ "step": 1066
+ },
+ {
+ "epoch": 8.022556390977444,
+ "grad_norm": 0.2965905066195183,
+ "learning_rate": 1.3975584347489382e-06,
+ "loss": 0.16,
+ "step": 1067
+ },
+ {
+ "epoch": 8.030075187969924,
+ "grad_norm": 0.278874237888332,
+ "learning_rate": 1.396417838217194e-06,
+ "loss": 0.1601,
+ "step": 1068
+ },
+ {
+ "epoch": 8.037593984962406,
+ "grad_norm": 0.2345667830990778,
+ "learning_rate": 1.3952766294778183e-06,
+ "loss": 0.167,
+ "step": 1069
+ },
+ {
+ "epoch": 8.045112781954888,
+ "grad_norm": 0.2920254185202652,
+ "learning_rate": 1.3941348102932358e-06,
+ "loss": 0.1549,
+ "step": 1070
+ },
+ {
+ "epoch": 8.052631578947368,
+ "grad_norm": 0.255618331732919,
+ "learning_rate": 1.3929923824268143e-06,
+ "loss": 0.1667,
+ "step": 1071
+ },
+ {
+ "epoch": 8.06015037593985,
+ "grad_norm": 0.24110288624576365,
+ "learning_rate": 1.3918493476428617e-06,
+ "loss": 0.1591,
+ "step": 1072
+ },
+ {
+ "epoch": 8.06766917293233,
+ "grad_norm": 0.277111778319247,
+ "learning_rate": 1.3907057077066226e-06,
+ "loss": 0.1634,
+ "step": 1073
+ },
+ {
+ "epoch": 8.075187969924812,
+ "grad_norm": 0.2563918393734953,
+ "learning_rate": 1.3895614643842772e-06,
+ "loss": 0.1532,
+ "step": 1074
+ },
+ {
+ "epoch": 8.082706766917294,
+ "grad_norm": 0.2125752869009054,
+ "learning_rate": 1.3884166194429364e-06,
+ "loss": 0.1599,
+ "step": 1075
+ },
+ {
+ "epoch": 8.090225563909774,
+ "grad_norm": 0.27117673550416066,
+ "learning_rate": 1.3872711746506412e-06,
+ "loss": 0.1635,
+ "step": 1076
+ },
+ {
+ "epoch": 8.097744360902256,
+ "grad_norm": 0.23573963530742778,
+ "learning_rate": 1.386125131776358e-06,
+ "loss": 0.1672,
+ "step": 1077
+ },
+ {
+ "epoch": 8.105263157894736,
+ "grad_norm": 0.21687041077006194,
+ "learning_rate": 1.3849784925899777e-06,
+ "loss": 0.1604,
+ "step": 1078
+ },
+ {
+ "epoch": 8.112781954887218,
+ "grad_norm": 0.2495830716384833,
+ "learning_rate": 1.3838312588623118e-06,
+ "loss": 0.1615,
+ "step": 1079
+ },
+ {
+ "epoch": 8.1203007518797,
+ "grad_norm": 0.2481275866160215,
+ "learning_rate": 1.3826834323650898e-06,
+ "loss": 0.1616,
+ "step": 1080
+ },
+ {
+ "epoch": 8.12781954887218,
+ "grad_norm": 0.21362532739236803,
+ "learning_rate": 1.3815350148709567e-06,
+ "loss": 0.1505,
+ "step": 1081
+ },
+ {
+ "epoch": 8.135338345864662,
+ "grad_norm": 0.24882752978940662,
+ "learning_rate": 1.3803860081534707e-06,
+ "loss": 0.16,
+ "step": 1082
+ },
+ {
+ "epoch": 8.142857142857142,
+ "grad_norm": 0.23500162940643107,
+ "learning_rate": 1.3792364139870997e-06,
+ "loss": 0.1582,
+ "step": 1083
+ },
+ {
+ "epoch": 8.150375939849624,
+ "grad_norm": 0.23870562607850973,
+ "learning_rate": 1.3780862341472182e-06,
+ "loss": 0.1626,
+ "step": 1084
+ },
+ {
+ "epoch": 8.157894736842104,
+ "grad_norm": 0.2420563091622864,
+ "learning_rate": 1.3769354704101058e-06,
+ "loss": 0.1624,
+ "step": 1085
+ },
+ {
+ "epoch": 8.165413533834586,
+ "grad_norm": 0.2204000209405036,
+ "learning_rate": 1.375784124552944e-06,
+ "loss": 0.1548,
+ "step": 1086
+ },
+ {
+ "epoch": 8.172932330827068,
+ "grad_norm": 0.2219919571698883,
+ "learning_rate": 1.3746321983538127e-06,
+ "loss": 0.1573,
+ "step": 1087
+ },
+ {
+ "epoch": 8.180451127819548,
+ "grad_norm": 0.2306947142599625,
+ "learning_rate": 1.3734796935916885e-06,
+ "loss": 0.1572,
+ "step": 1088
+ },
+ {
+ "epoch": 8.18796992481203,
+ "grad_norm": 0.2183483511433359,
+ "learning_rate": 1.3723266120464417e-06,
+ "loss": 0.1534,
+ "step": 1089
+ },
+ {
+ "epoch": 8.19548872180451,
+ "grad_norm": 0.2126137970053714,
+ "learning_rate": 1.3711729554988322e-06,
+ "loss": 0.1616,
+ "step": 1090
+ },
+ {
+ "epoch": 8.203007518796992,
+ "grad_norm": 0.2414794172996464,
+ "learning_rate": 1.3700187257305099e-06,
+ "loss": 0.1603,
+ "step": 1091
+ },
+ {
+ "epoch": 8.210526315789474,
+ "grad_norm": 0.21093540339902309,
+ "learning_rate": 1.3688639245240078e-06,
+ "loss": 0.1683,
+ "step": 1092
+ },
+ {
+ "epoch": 8.218045112781954,
+ "grad_norm": 0.23936275991700987,
+ "learning_rate": 1.3677085536627428e-06,
+ "loss": 0.1706,
+ "step": 1093
+ },
+ {
+ "epoch": 8.225563909774436,
+ "grad_norm": 0.22959032049235925,
+ "learning_rate": 1.3665526149310114e-06,
+ "loss": 0.1534,
+ "step": 1094
+ },
+ {
+ "epoch": 8.233082706766917,
+ "grad_norm": 0.2513766315782546,
+ "learning_rate": 1.3653961101139864e-06,
+ "loss": 0.1586,
+ "step": 1095
+ },
+ {
+ "epoch": 8.240601503759398,
+ "grad_norm": 0.2251503900350931,
+ "learning_rate": 1.3642390409977154e-06,
+ "loss": 0.1617,
+ "step": 1096
+ },
+ {
+ "epoch": 8.24812030075188,
+ "grad_norm": 0.21675037175797982,
+ "learning_rate": 1.3630814093691174e-06,
+ "loss": 0.1557,
+ "step": 1097
+ },
+ {
+ "epoch": 8.25563909774436,
+ "grad_norm": 0.2200323811062368,
+ "learning_rate": 1.36192321701598e-06,
+ "loss": 0.1731,
+ "step": 1098
+ },
+ {
+ "epoch": 8.263157894736842,
+ "grad_norm": 0.22604331205561712,
+ "learning_rate": 1.3607644657269568e-06,
+ "loss": 0.1692,
+ "step": 1099
+ },
+ {
+ "epoch": 8.270676691729323,
+ "grad_norm": 0.2237159196877982,
+ "learning_rate": 1.3596051572915649e-06,
+ "loss": 0.1602,
+ "step": 1100
+ },
+ {
+ "epoch": 8.278195488721805,
+ "grad_norm": 0.22579904109429005,
+ "learning_rate": 1.3584452935001809e-06,
+ "loss": 0.1616,
+ "step": 1101
+ },
+ {
+ "epoch": 8.285714285714286,
+ "grad_norm": 0.2069591840137254,
+ "learning_rate": 1.3572848761440402e-06,
+ "loss": 0.1598,
+ "step": 1102
+ },
+ {
+ "epoch": 8.293233082706767,
+ "grad_norm": 0.21886174820236862,
+ "learning_rate": 1.3561239070152324e-06,
+ "loss": 0.1626,
+ "step": 1103
+ },
+ {
+ "epoch": 8.300751879699249,
+ "grad_norm": 0.2380091782413886,
+ "learning_rate": 1.3549623879066994e-06,
+ "loss": 0.1697,
+ "step": 1104
+ },
+ {
+ "epoch": 8.308270676691729,
+ "grad_norm": 0.2270698348875947,
+ "learning_rate": 1.3538003206122326e-06,
+ "loss": 0.1586,
+ "step": 1105
+ },
+ {
+ "epoch": 8.31578947368421,
+ "grad_norm": 0.21523193135181518,
+ "learning_rate": 1.3526377069264698e-06,
+ "loss": 0.1586,
+ "step": 1106
+ },
+ {
+ "epoch": 8.323308270676693,
+ "grad_norm": 0.223406174787816,
+ "learning_rate": 1.3514745486448927e-06,
+ "loss": 0.1569,
+ "step": 1107
+ },
+ {
+ "epoch": 8.330827067669173,
+ "grad_norm": 0.22313829259113235,
+ "learning_rate": 1.3503108475638244e-06,
+ "loss": 0.1537,
+ "step": 1108
+ },
+ {
+ "epoch": 8.338345864661655,
+ "grad_norm": 0.2195320333205595,
+ "learning_rate": 1.3491466054804251e-06,
+ "loss": 0.1511,
+ "step": 1109
+ },
+ {
+ "epoch": 8.345864661654135,
+ "grad_norm": 0.22385989190833613,
+ "learning_rate": 1.347981824192692e-06,
+ "loss": 0.1639,
+ "step": 1110
+ },
+ {
+ "epoch": 8.353383458646617,
+ "grad_norm": 0.22358486857824897,
+ "learning_rate": 1.346816505499454e-06,
+ "loss": 0.1579,
+ "step": 1111
+ },
+ {
+ "epoch": 8.360902255639097,
+ "grad_norm": 0.21552424683626503,
+ "learning_rate": 1.3456506512003704e-06,
+ "loss": 0.1667,
+ "step": 1112
+ },
+ {
+ "epoch": 8.368421052631579,
+ "grad_norm": 0.23159881720188913,
+ "learning_rate": 1.3444842630959277e-06,
+ "loss": 0.1674,
+ "step": 1113
+ },
+ {
+ "epoch": 8.37593984962406,
+ "grad_norm": 0.21330830523733366,
+ "learning_rate": 1.3433173429874364e-06,
+ "loss": 0.1579,
+ "step": 1114
+ },
+ {
+ "epoch": 8.38345864661654,
+ "grad_norm": 0.21665446597270804,
+ "learning_rate": 1.3421498926770287e-06,
+ "loss": 0.1595,
+ "step": 1115
+ },
+ {
+ "epoch": 8.390977443609023,
+ "grad_norm": 0.23114911430862867,
+ "learning_rate": 1.3409819139676558e-06,
+ "loss": 0.1608,
+ "step": 1116
+ },
+ {
+ "epoch": 8.398496240601503,
+ "grad_norm": 0.22818004787755783,
+ "learning_rate": 1.3398134086630851e-06,
+ "loss": 0.155,
+ "step": 1117
+ },
+ {
+ "epoch": 8.406015037593985,
+ "grad_norm": 0.224381315921893,
+ "learning_rate": 1.3386443785678969e-06,
+ "loss": 0.1652,
+ "step": 1118
+ },
+ {
+ "epoch": 8.413533834586467,
+ "grad_norm": 0.22199855071757318,
+ "learning_rate": 1.3374748254874816e-06,
+ "loss": 0.1671,
+ "step": 1119
+ },
+ {
+ "epoch": 8.421052631578947,
+ "grad_norm": 0.22709568161510535,
+ "learning_rate": 1.336304751228039e-06,
+ "loss": 0.163,
+ "step": 1120
+ },
+ {
+ "epoch": 8.428571428571429,
+ "grad_norm": 0.21488355169734405,
+ "learning_rate": 1.3351341575965709e-06,
+ "loss": 0.1549,
+ "step": 1121
+ },
+ {
+ "epoch": 8.436090225563909,
+ "grad_norm": 0.28405215159671665,
+ "learning_rate": 1.3339630464008838e-06,
+ "loss": 0.159,
+ "step": 1122
+ },
+ {
+ "epoch": 8.443609022556391,
+ "grad_norm": 0.21327335667252564,
+ "learning_rate": 1.3327914194495823e-06,
+ "loss": 0.1537,
+ "step": 1123
+ },
+ {
+ "epoch": 8.451127819548873,
+ "grad_norm": 0.21303295365202973,
+ "learning_rate": 1.3316192785520678e-06,
+ "loss": 0.1529,
+ "step": 1124
+ },
+ {
+ "epoch": 8.458646616541353,
+ "grad_norm": 0.2157050730883264,
+ "learning_rate": 1.3304466255185352e-06,
+ "loss": 0.1611,
+ "step": 1125
+ },
+ {
+ "epoch": 8.466165413533835,
+ "grad_norm": 0.22218593799561256,
+ "learning_rate": 1.3292734621599706e-06,
+ "loss": 0.1628,
+ "step": 1126
+ },
+ {
+ "epoch": 8.473684210526315,
+ "grad_norm": 0.2251545269256422,
+ "learning_rate": 1.3280997902881478e-06,
+ "loss": 0.1552,
+ "step": 1127
+ },
+ {
+ "epoch": 8.481203007518797,
+ "grad_norm": 0.21398117723819546,
+ "learning_rate": 1.3269256117156266e-06,
+ "loss": 0.1593,
+ "step": 1128
+ },
+ {
+ "epoch": 8.488721804511279,
+ "grad_norm": 0.20809819261784757,
+ "learning_rate": 1.3257509282557486e-06,
+ "loss": 0.1685,
+ "step": 1129
+ },
+ {
+ "epoch": 8.496240601503759,
+ "grad_norm": 0.3614889056814091,
+ "learning_rate": 1.3245757417226355e-06,
+ "loss": 0.1647,
+ "step": 1130
+ },
+ {
+ "epoch": 8.503759398496241,
+ "grad_norm": 0.22017471496281152,
+ "learning_rate": 1.323400053931186e-06,
+ "loss": 0.1703,
+ "step": 1131
+ },
+ {
+ "epoch": 8.511278195488721,
+ "grad_norm": 0.22569970072408446,
+ "learning_rate": 1.3222238666970727e-06,
+ "loss": 0.1562,
+ "step": 1132
+ },
+ {
+ "epoch": 8.518796992481203,
+ "grad_norm": 0.2324457420804441,
+ "learning_rate": 1.3210471818367395e-06,
+ "loss": 0.1659,
+ "step": 1133
+ },
+ {
+ "epoch": 8.526315789473685,
+ "grad_norm": 0.22419188067444182,
+ "learning_rate": 1.3198700011673989e-06,
+ "loss": 0.159,
+ "step": 1134
+ },
+ {
+ "epoch": 8.533834586466165,
+ "grad_norm": 0.23450753486879514,
+ "learning_rate": 1.3186923265070293e-06,
+ "loss": 0.1601,
+ "step": 1135
+ },
+ {
+ "epoch": 8.541353383458647,
+ "grad_norm": 0.22407371497526066,
+ "learning_rate": 1.3175141596743717e-06,
+ "loss": 0.17,
+ "step": 1136
+ },
+ {
+ "epoch": 8.548872180451127,
+ "grad_norm": 0.2279177939137672,
+ "learning_rate": 1.3163355024889274e-06,
+ "loss": 0.14,
+ "step": 1137
+ },
+ {
+ "epoch": 8.556390977443609,
+ "grad_norm": 0.232522069574415,
+ "learning_rate": 1.3151563567709546e-06,
+ "loss": 0.1632,
+ "step": 1138
+ },
+ {
+ "epoch": 8.563909774436091,
+ "grad_norm": 0.22317236551498776,
+ "learning_rate": 1.3139767243414662e-06,
+ "loss": 0.1571,
+ "step": 1139
+ },
+ {
+ "epoch": 8.571428571428571,
+ "grad_norm": 0.235563175652614,
+ "learning_rate": 1.3127966070222272e-06,
+ "loss": 0.1699,
+ "step": 1140
+ },
+ {
+ "epoch": 8.578947368421053,
+ "grad_norm": 0.224972162383058,
+ "learning_rate": 1.3116160066357504e-06,
+ "loss": 0.1641,
+ "step": 1141
+ },
+ {
+ "epoch": 8.586466165413533,
+ "grad_norm": 0.2553232345229712,
+ "learning_rate": 1.310434925005296e-06,
+ "loss": 0.1654,
+ "step": 1142
+ },
+ {
+ "epoch": 8.593984962406015,
+ "grad_norm": 0.23855788382172372,
+ "learning_rate": 1.309253363954866e-06,
+ "loss": 0.1512,
+ "step": 1143
+ },
+ {
+ "epoch": 8.601503759398497,
+ "grad_norm": 0.2386514442276229,
+ "learning_rate": 1.3080713253092037e-06,
+ "loss": 0.156,
+ "step": 1144
+ },
+ {
+ "epoch": 8.609022556390977,
+ "grad_norm": 0.2342295122395984,
+ "learning_rate": 1.3068888108937898e-06,
+ "loss": 0.1684,
+ "step": 1145
+ },
+ {
+ "epoch": 8.61654135338346,
+ "grad_norm": 0.24438412000344126,
+ "learning_rate": 1.3057058225348399e-06,
+ "loss": 0.1594,
+ "step": 1146
+ },
+ {
+ "epoch": 8.62406015037594,
+ "grad_norm": 0.24104508316923817,
+ "learning_rate": 1.3045223620593005e-06,
+ "loss": 0.156,
+ "step": 1147
+ },
+ {
+ "epoch": 8.631578947368421,
+ "grad_norm": 0.23229662189997605,
+ "learning_rate": 1.3033384312948486e-06,
+ "loss": 0.1641,
+ "step": 1148
+ },
+ {
+ "epoch": 8.639097744360903,
+ "grad_norm": 0.24921676088574168,
+ "learning_rate": 1.302154032069887e-06,
+ "loss": 0.1626,
+ "step": 1149
+ },
+ {
+ "epoch": 8.646616541353383,
+ "grad_norm": 0.23114855456826627,
+ "learning_rate": 1.3009691662135413e-06,
+ "loss": 0.16,
+ "step": 1150
+ },
+ {
+ "epoch": 8.654135338345865,
+ "grad_norm": 0.21923265272012907,
+ "learning_rate": 1.299783835555659e-06,
+ "loss": 0.1569,
+ "step": 1151
+ },
+ {
+ "epoch": 8.661654135338345,
+ "grad_norm": 0.2231217951778346,
+ "learning_rate": 1.2985980419268043e-06,
+ "loss": 0.1511,
+ "step": 1152
+ },
+ {
+ "epoch": 8.669172932330827,
+ "grad_norm": 0.22976442572912045,
+ "learning_rate": 1.297411787158257e-06,
+ "loss": 0.1674,
+ "step": 1153
+ },
+ {
+ "epoch": 8.676691729323307,
+ "grad_norm": 0.23095030919996457,
+ "learning_rate": 1.296225073082009e-06,
+ "loss": 0.1674,
+ "step": 1154
+ },
+ {
+ "epoch": 8.68421052631579,
+ "grad_norm": 0.22112128021483768,
+ "learning_rate": 1.295037901530761e-06,
+ "loss": 0.1607,
+ "step": 1155
+ },
+ {
+ "epoch": 8.691729323308271,
+ "grad_norm": 0.2312691859046094,
+ "learning_rate": 1.2938502743379209e-06,
+ "loss": 0.1643,
+ "step": 1156
+ },
+ {
+ "epoch": 8.699248120300751,
+ "grad_norm": 0.2301370151467343,
+ "learning_rate": 1.2926621933376001e-06,
+ "loss": 0.1692,
+ "step": 1157
+ },
+ {
+ "epoch": 8.706766917293233,
+ "grad_norm": 0.2214436867382709,
+ "learning_rate": 1.2914736603646106e-06,
+ "loss": 0.1486,
+ "step": 1158
+ },
+ {
+ "epoch": 8.714285714285714,
+ "grad_norm": 0.2508194453162641,
+ "learning_rate": 1.2902846772544622e-06,
+ "loss": 0.1655,
+ "step": 1159
+ },
+ {
+ "epoch": 8.721804511278195,
+ "grad_norm": 0.23806449828233012,
+ "learning_rate": 1.2890952458433607e-06,
+ "loss": 0.1716,
+ "step": 1160
+ },
+ {
+ "epoch": 8.729323308270677,
+ "grad_norm": 0.2212302127733244,
+ "learning_rate": 1.2879053679682036e-06,
+ "loss": 0.1591,
+ "step": 1161
+ },
+ {
+ "epoch": 8.736842105263158,
+ "grad_norm": 0.23478927818158776,
+ "learning_rate": 1.286715045466578e-06,
+ "loss": 0.1669,
+ "step": 1162
+ },
+ {
+ "epoch": 8.74436090225564,
+ "grad_norm": 0.24623062756198238,
+ "learning_rate": 1.2855242801767576e-06,
+ "loss": 0.1544,
+ "step": 1163
+ },
+ {
+ "epoch": 8.75187969924812,
+ "grad_norm": 0.23323561558865874,
+ "learning_rate": 1.2843330739377001e-06,
+ "loss": 0.1601,
+ "step": 1164
+ },
+ {
+ "epoch": 8.759398496240602,
+ "grad_norm": 0.2327196648647578,
+ "learning_rate": 1.283141428589044e-06,
+ "loss": 0.1655,
+ "step": 1165
+ },
+ {
+ "epoch": 8.766917293233083,
+ "grad_norm": 0.2564195043036698,
+ "learning_rate": 1.281949345971106e-06,
+ "loss": 0.1592,
+ "step": 1166
+ },
+ {
+ "epoch": 8.774436090225564,
+ "grad_norm": 0.23113159721240986,
+ "learning_rate": 1.280756827924878e-06,
+ "loss": 0.1606,
+ "step": 1167
+ },
+ {
+ "epoch": 8.781954887218046,
+ "grad_norm": 0.2343949524229742,
+ "learning_rate": 1.279563876292025e-06,
+ "loss": 0.1615,
+ "step": 1168
+ },
+ {
+ "epoch": 8.789473684210526,
+ "grad_norm": 0.24881265265978644,
+ "learning_rate": 1.2783704929148807e-06,
+ "loss": 0.1567,
+ "step": 1169
+ },
+ {
+ "epoch": 8.796992481203008,
+ "grad_norm": 0.23178870490263076,
+ "learning_rate": 1.277176679636446e-06,
+ "loss": 0.1667,
+ "step": 1170
+ },
+ {
+ "epoch": 8.80451127819549,
+ "grad_norm": 0.22819966053482044,
+ "learning_rate": 1.2759824383003854e-06,
+ "loss": 0.1514,
+ "step": 1171
+ },
+ {
+ "epoch": 8.81203007518797,
+ "grad_norm": 0.23618074504688527,
+ "learning_rate": 1.274787770751025e-06,
+ "loss": 0.1574,
+ "step": 1172
+ },
+ {
+ "epoch": 8.819548872180452,
+ "grad_norm": 0.23645306428619914,
+ "learning_rate": 1.2735926788333492e-06,
+ "loss": 0.1613,
+ "step": 1173
+ },
+ {
+ "epoch": 8.827067669172932,
+ "grad_norm": 0.22414443455174612,
+ "learning_rate": 1.272397164392997e-06,
+ "loss": 0.1629,
+ "step": 1174
+ },
+ {
+ "epoch": 8.834586466165414,
+ "grad_norm": 0.23000716521603554,
+ "learning_rate": 1.2712012292762601e-06,
+ "loss": 0.1629,
+ "step": 1175
+ },
+ {
+ "epoch": 8.842105263157894,
+ "grad_norm": 0.23398793137993412,
+ "learning_rate": 1.2700048753300804e-06,
+ "loss": 0.1609,
+ "step": 1176
+ },
+ {
+ "epoch": 8.849624060150376,
+ "grad_norm": 0.2235422902146283,
+ "learning_rate": 1.2688081044020465e-06,
+ "loss": 0.1625,
+ "step": 1177
+ },
+ {
+ "epoch": 8.857142857142858,
+ "grad_norm": 0.2713436488481424,
+ "learning_rate": 1.2676109183403907e-06,
+ "loss": 0.1591,
+ "step": 1178
+ },
+ {
+ "epoch": 8.864661654135338,
+ "grad_norm": 0.2520471120649652,
+ "learning_rate": 1.2664133189939865e-06,
+ "loss": 0.1791,
+ "step": 1179
+ },
+ {
+ "epoch": 8.87218045112782,
+ "grad_norm": 0.2508359186868446,
+ "learning_rate": 1.2652153082123455e-06,
+ "loss": 0.157,
+ "step": 1180
+ },
+ {
+ "epoch": 8.8796992481203,
+ "grad_norm": 0.23410793122332269,
+ "learning_rate": 1.2640168878456155e-06,
+ "loss": 0.1548,
+ "step": 1181
+ },
+ {
+ "epoch": 8.887218045112782,
+ "grad_norm": 0.24815493277558764,
+ "learning_rate": 1.2628180597445752e-06,
+ "loss": 0.1557,
+ "step": 1182
+ },
+ {
+ "epoch": 8.894736842105264,
+ "grad_norm": 0.2539901925305197,
+ "learning_rate": 1.2616188257606349e-06,
+ "loss": 0.1624,
+ "step": 1183
+ },
+ {
+ "epoch": 8.902255639097744,
+ "grad_norm": 0.24344735959592492,
+ "learning_rate": 1.2604191877458306e-06,
+ "loss": 0.1615,
+ "step": 1184
+ },
+ {
+ "epoch": 8.909774436090226,
+ "grad_norm": 0.22492870407321489,
+ "learning_rate": 1.259219147552822e-06,
+ "loss": 0.1671,
+ "step": 1185
+ },
+ {
+ "epoch": 8.917293233082706,
+ "grad_norm": 0.23731937378296106,
+ "learning_rate": 1.258018707034891e-06,
+ "loss": 0.1663,
+ "step": 1186
+ },
+ {
+ "epoch": 8.924812030075188,
+ "grad_norm": 0.24262485605263515,
+ "learning_rate": 1.256817868045937e-06,
+ "loss": 0.1567,
+ "step": 1187
+ },
+ {
+ "epoch": 8.93233082706767,
+ "grad_norm": 0.2281344123855914,
+ "learning_rate": 1.2556166324404746e-06,
+ "loss": 0.164,
+ "step": 1188
+ },
+ {
+ "epoch": 8.93984962406015,
+ "grad_norm": 0.24185290328023398,
+ "learning_rate": 1.2544150020736317e-06,
+ "loss": 0.1578,
+ "step": 1189
+ },
+ {
+ "epoch": 8.947368421052632,
+ "grad_norm": 0.23277267961097572,
+ "learning_rate": 1.253212978801145e-06,
+ "loss": 0.1688,
+ "step": 1190
+ },
+ {
+ "epoch": 8.954887218045112,
+ "grad_norm": 0.2307801357228883,
+ "learning_rate": 1.2520105644793586e-06,
+ "loss": 0.1687,
+ "step": 1191
+ },
+ {
+ "epoch": 8.962406015037594,
+ "grad_norm": 0.23190265379350236,
+ "learning_rate": 1.25080776096522e-06,
+ "loss": 0.1629,
+ "step": 1192
+ },
+ {
+ "epoch": 8.969924812030076,
+ "grad_norm": 0.23091384523593453,
+ "learning_rate": 1.2496045701162783e-06,
+ "loss": 0.1625,
+ "step": 1193
+ },
+ {
+ "epoch": 8.977443609022556,
+ "grad_norm": 0.23939269028853463,
+ "learning_rate": 1.2484009937906806e-06,
+ "loss": 0.154,
+ "step": 1194
+ },
+ {
+ "epoch": 8.984962406015038,
+ "grad_norm": 0.24204254296168923,
+ "learning_rate": 1.247197033847169e-06,
+ "loss": 0.1661,
+ "step": 1195
+ },
+ {
+ "epoch": 8.992481203007518,
+ "grad_norm": 0.3030436694677564,
+ "learning_rate": 1.2459926921450779e-06,
+ "loss": 0.1652,
+ "step": 1196
+ },
+ {
+ "epoch": 9.0,
+ "grad_norm": 0.2169611304389509,
+ "learning_rate": 1.2447879705443325e-06,
+ "loss": 0.1591,
+ "step": 1197
+ },
+ {
+ "epoch": 9.0,
+ "eval_loss": 0.25892359018325806,
+ "eval_runtime": 35.7762,
+ "eval_samples_per_second": 12.494,
+ "eval_steps_per_second": 0.196,
+ "step": 1197
+ },
+ {
+ "epoch": 9.007518796992482,
+ "grad_norm": 0.342058964248194,
+ "learning_rate": 1.243582870905443e-06,
+ "loss": 0.1547,
+ "step": 1198
+ },
+ {
+ "epoch": 9.015037593984962,
+ "grad_norm": 0.24926039290146348,
+ "learning_rate": 1.242377395089505e-06,
+ "loss": 0.1515,
+ "step": 1199
+ },
+ {
+ "epoch": 9.022556390977444,
+ "grad_norm": 0.3188552562829292,
+ "learning_rate": 1.2411715449581937e-06,
+ "loss": 0.1561,
+ "step": 1200
+ },
+ {
+ "epoch": 9.030075187969924,
+ "grad_norm": 0.3094671471164551,
+ "learning_rate": 1.239965322373763e-06,
+ "loss": 0.1474,
+ "step": 1201
+ },
+ {
+ "epoch": 9.037593984962406,
+ "grad_norm": 0.23167852282079845,
+ "learning_rate": 1.2387587291990422e-06,
+ "loss": 0.1504,
+ "step": 1202
+ },
+ {
+ "epoch": 9.045112781954888,
+ "grad_norm": 0.28274058399445784,
+ "learning_rate": 1.2375517672974325e-06,
+ "loss": 0.1449,
+ "step": 1203
+ },
+ {
+ "epoch": 9.052631578947368,
+ "grad_norm": 0.2842397403014581,
+ "learning_rate": 1.236344438532905e-06,
+ "loss": 0.1502,
+ "step": 1204
+ },
+ {
+ "epoch": 9.06015037593985,
+ "grad_norm": 0.22935038167518906,
+ "learning_rate": 1.235136744769997e-06,
+ "loss": 0.1438,
+ "step": 1205
+ },
+ {
+ "epoch": 9.06766917293233,
+ "grad_norm": 0.28430685088632884,
+ "learning_rate": 1.2339286878738093e-06,
+ "loss": 0.1451,
+ "step": 1206
+ },
+ {
+ "epoch": 9.075187969924812,
+ "grad_norm": 0.2867125480229265,
+ "learning_rate": 1.232720269710004e-06,
+ "loss": 0.1464,
+ "step": 1207
+ },
+ {
+ "epoch": 9.082706766917294,
+ "grad_norm": 0.23445732220037188,
+ "learning_rate": 1.231511492144801e-06,
+ "loss": 0.1404,
+ "step": 1208
+ },
+ {
+ "epoch": 9.090225563909774,
+ "grad_norm": 0.28148696672108403,
+ "learning_rate": 1.2303023570449754e-06,
+ "loss": 0.1485,
+ "step": 1209
+ },
+ {
+ "epoch": 9.097744360902256,
+ "grad_norm": 0.27373439000881433,
+ "learning_rate": 1.2290928662778535e-06,
+ "loss": 0.1468,
+ "step": 1210
+ },
+ {
+ "epoch": 9.105263157894736,
+ "grad_norm": 0.2261141341717308,
+ "learning_rate": 1.227883021711312e-06,
+ "loss": 0.1394,
+ "step": 1211
+ },
+ {
+ "epoch": 9.112781954887218,
+ "grad_norm": 0.290025400638614,
+ "learning_rate": 1.2266728252137732e-06,
+ "loss": 0.1556,
+ "step": 1212
+ },
+ {
+ "epoch": 9.1203007518797,
+ "grad_norm": 0.6486535220307098,
+ "learning_rate": 1.225462278654204e-06,
+ "loss": 0.1482,
+ "step": 1213
+ },
+ {
+ "epoch": 9.12781954887218,
+ "grad_norm": 0.2416357295494709,
+ "learning_rate": 1.2242513839021106e-06,
+ "loss": 0.1355,
+ "step": 1214
+ },
+ {
+ "epoch": 9.135338345864662,
+ "grad_norm": 0.25609906974858787,
+ "learning_rate": 1.2230401428275382e-06,
+ "loss": 0.1521,
+ "step": 1215
+ },
+ {
+ "epoch": 9.142857142857142,
+ "grad_norm": 0.2634318475215811,
+ "learning_rate": 1.2218285573010652e-06,
+ "loss": 0.1522,
+ "step": 1216
+ },
+ {
+ "epoch": 9.150375939849624,
+ "grad_norm": 0.23343092495232454,
+ "learning_rate": 1.2206166291938036e-06,
+ "loss": 0.1328,
+ "step": 1217
+ },
+ {
+ "epoch": 9.157894736842104,
+ "grad_norm": 0.2613721358079805,
+ "learning_rate": 1.2194043603773935e-06,
+ "loss": 0.1522,
+ "step": 1218
+ },
+ {
+ "epoch": 9.165413533834586,
+ "grad_norm": 0.24341291240836194,
+ "learning_rate": 1.2181917527240018e-06,
+ "loss": 0.1523,
+ "step": 1219
+ },
+ {
+ "epoch": 9.172932330827068,
+ "grad_norm": 0.24353606347445328,
+ "learning_rate": 1.2169788081063178e-06,
+ "loss": 0.1441,
+ "step": 1220
+ },
+ {
+ "epoch": 9.180451127819548,
+ "grad_norm": 0.2857906563883379,
+ "learning_rate": 1.2157655283975523e-06,
+ "loss": 0.1481,
+ "step": 1221
+ },
+ {
+ "epoch": 9.18796992481203,
+ "grad_norm": 0.23450816262866236,
+ "learning_rate": 1.2145519154714329e-06,
+ "loss": 0.1505,
+ "step": 1222
+ },
+ {
+ "epoch": 9.19548872180451,
+ "grad_norm": 0.23312065536245513,
+ "learning_rate": 1.2133379712022015e-06,
+ "loss": 0.1431,
+ "step": 1223
+ },
+ {
+ "epoch": 9.203007518796992,
+ "grad_norm": 0.2353672770935492,
+ "learning_rate": 1.2121236974646125e-06,
+ "loss": 0.1529,
+ "step": 1224
+ },
+ {
+ "epoch": 9.210526315789474,
+ "grad_norm": 0.25278986071259907,
+ "learning_rate": 1.210909096133929e-06,
+ "loss": 0.1524,
+ "step": 1225
+ },
+ {
+ "epoch": 9.218045112781954,
+ "grad_norm": 0.2330003979107882,
+ "learning_rate": 1.2096941690859192e-06,
+ "loss": 0.1551,
+ "step": 1226
+ },
+ {
+ "epoch": 9.225563909774436,
+ "grad_norm": 0.24399252130817423,
+ "learning_rate": 1.2084789181968552e-06,
+ "loss": 0.1538,
+ "step": 1227
+ },
+ {
+ "epoch": 9.233082706766917,
+ "grad_norm": 0.23060370358843268,
+ "learning_rate": 1.2072633453435091e-06,
+ "loss": 0.1491,
+ "step": 1228
+ },
+ {
+ "epoch": 9.240601503759398,
+ "grad_norm": 0.2335986330294631,
+ "learning_rate": 1.2060474524031497e-06,
+ "loss": 0.1391,
+ "step": 1229
+ },
+ {
+ "epoch": 9.24812030075188,
+ "grad_norm": 0.2446150163666212,
+ "learning_rate": 1.2048312412535407e-06,
+ "loss": 0.1485,
+ "step": 1230
+ },
+ {
+ "epoch": 9.25563909774436,
+ "grad_norm": 0.2353742267257407,
+ "learning_rate": 1.203614713772937e-06,
+ "loss": 0.1475,
+ "step": 1231
+ },
+ {
+ "epoch": 9.263157894736842,
+ "grad_norm": 0.22715294305397443,
+ "learning_rate": 1.2023978718400817e-06,
+ "loss": 0.1476,
+ "step": 1232
+ },
+ {
+ "epoch": 9.270676691729323,
+ "grad_norm": 0.2340639881029823,
+ "learning_rate": 1.2011807173342045e-06,
+ "loss": 0.1357,
+ "step": 1233
+ },
+ {
+ "epoch": 9.278195488721805,
+ "grad_norm": 0.21780277779134913,
+ "learning_rate": 1.1999632521350167e-06,
+ "loss": 0.1432,
+ "step": 1234
+ },
+ {
+ "epoch": 9.285714285714286,
+ "grad_norm": 0.28896587632218973,
+ "learning_rate": 1.19874547812271e-06,
+ "loss": 0.1339,
+ "step": 1235
+ },
+ {
+ "epoch": 9.293233082706767,
+ "grad_norm": 0.23965247939875445,
+ "learning_rate": 1.1975273971779527e-06,
+ "loss": 0.1604,
+ "step": 1236
+ },
+ {
+ "epoch": 9.300751879699249,
+ "grad_norm": 0.241875575546432,
+ "learning_rate": 1.1963090111818877e-06,
+ "loss": 0.1442,
+ "step": 1237
+ },
+ {
+ "epoch": 9.308270676691729,
+ "grad_norm": 0.241447988776196,
+ "learning_rate": 1.1950903220161284e-06,
+ "loss": 0.15,
+ "step": 1238
+ },
+ {
+ "epoch": 9.31578947368421,
+ "grad_norm": 0.23488341162529836,
+ "learning_rate": 1.1938713315627564e-06,
+ "loss": 0.1454,
+ "step": 1239
+ },
+ {
+ "epoch": 9.323308270676693,
+ "grad_norm": 0.2421677960846136,
+ "learning_rate": 1.1926520417043194e-06,
+ "loss": 0.1471,
+ "step": 1240
+ },
+ {
+ "epoch": 9.330827067669173,
+ "grad_norm": 0.23425279031152227,
+ "learning_rate": 1.1914324543238265e-06,
+ "loss": 0.1441,
+ "step": 1241
+ },
+ {
+ "epoch": 9.338345864661655,
+ "grad_norm": 0.24661660875917438,
+ "learning_rate": 1.1902125713047466e-06,
+ "loss": 0.1509,
+ "step": 1242
+ },
+ {
+ "epoch": 9.345864661654135,
+ "grad_norm": 0.24151052383841984,
+ "learning_rate": 1.1889923945310057e-06,
+ "loss": 0.1434,
+ "step": 1243
+ },
+ {
+ "epoch": 9.353383458646617,
+ "grad_norm": 0.23731818249733377,
+ "learning_rate": 1.1877719258869824e-06,
+ "loss": 0.1391,
+ "step": 1244
+ },
+ {
+ "epoch": 9.360902255639097,
+ "grad_norm": 0.23001188644063253,
+ "learning_rate": 1.1865511672575073e-06,
+ "loss": 0.1529,
+ "step": 1245
+ },
+ {
+ "epoch": 9.368421052631579,
+ "grad_norm": 0.23000717822516153,
+ "learning_rate": 1.1853301205278577e-06,
+ "loss": 0.1403,
+ "step": 1246
+ },
+ {
+ "epoch": 9.37593984962406,
+ "grad_norm": 0.2520805119455644,
+ "learning_rate": 1.1841087875837565e-06,
+ "loss": 0.1471,
+ "step": 1247
+ },
+ {
+ "epoch": 9.38345864661654,
+ "grad_norm": 0.23486020107482522,
+ "learning_rate": 1.1828871703113684e-06,
+ "loss": 0.1376,
+ "step": 1248
+ },
+ {
+ "epoch": 9.390977443609023,
+ "grad_norm": 0.2544272937765405,
+ "learning_rate": 1.1816652705972976e-06,
+ "loss": 0.1484,
+ "step": 1249
+ },
+ {
+ "epoch": 9.398496240601503,
+ "grad_norm": 0.24280624673756115,
+ "learning_rate": 1.1804430903285835e-06,
+ "loss": 0.1401,
+ "step": 1250
+ },
+ {
+ "epoch": 9.406015037593985,
+ "grad_norm": 0.2586241505435117,
+ "learning_rate": 1.1792206313926998e-06,
+ "loss": 0.1506,
+ "step": 1251
+ },
+ {
+ "epoch": 9.413533834586467,
+ "grad_norm": 0.23278437580834402,
+ "learning_rate": 1.1779978956775504e-06,
+ "loss": 0.1564,
+ "step": 1252
+ },
+ {
+ "epoch": 9.421052631578947,
+ "grad_norm": 0.2316341817271337,
+ "learning_rate": 1.1767748850714658e-06,
+ "loss": 0.1485,
+ "step": 1253
+ },
+ {
+ "epoch": 9.428571428571429,
+ "grad_norm": 0.22802398589218095,
+ "learning_rate": 1.1755516014632022e-06,
+ "loss": 0.1428,
+ "step": 1254
+ },
+ {
+ "epoch": 9.436090225563909,
+ "grad_norm": 0.22890650028644166,
+ "learning_rate": 1.174328046741936e-06,
+ "loss": 0.1421,
+ "step": 1255
+ },
+ {
+ "epoch": 9.443609022556391,
+ "grad_norm": 0.256642153153405,
+ "learning_rate": 1.1731042227972644e-06,
+ "loss": 0.1461,
+ "step": 1256
+ },
+ {
+ "epoch": 9.451127819548873,
+ "grad_norm": 0.2282477588481103,
+ "learning_rate": 1.171880131519198e-06,
+ "loss": 0.1405,
+ "step": 1257
+ },
+ {
+ "epoch": 9.458646616541353,
+ "grad_norm": 0.22201379284725423,
+ "learning_rate": 1.170655774798162e-06,
+ "loss": 0.1404,
+ "step": 1258
+ },
+ {
+ "epoch": 9.466165413533835,
+ "grad_norm": 0.24598396766235528,
+ "learning_rate": 1.1694311545249907e-06,
+ "loss": 0.1481,
+ "step": 1259
+ },
+ {
+ "epoch": 9.473684210526315,
+ "grad_norm": 0.22894529955795637,
+ "learning_rate": 1.1682062725909257e-06,
+ "loss": 0.146,
+ "step": 1260
+ },
+ {
+ "epoch": 9.481203007518797,
+ "grad_norm": 0.2824739105325021,
+ "learning_rate": 1.1669811308876126e-06,
+ "loss": 0.1495,
+ "step": 1261
+ },
+ {
+ "epoch": 9.488721804511279,
+ "grad_norm": 0.23664606335790564,
+ "learning_rate": 1.1657557313070979e-06,
+ "loss": 0.1472,
+ "step": 1262
+ },
+ {
+ "epoch": 9.496240601503759,
+ "grad_norm": 0.30206264230988505,
+ "learning_rate": 1.164530075741827e-06,
+ "loss": 0.1521,
+ "step": 1263
+ },
+ {
+ "epoch": 9.503759398496241,
+ "grad_norm": 0.23672191920492924,
+ "learning_rate": 1.1633041660846404e-06,
+ "loss": 0.1463,
+ "step": 1264
+ },
+ {
+ "epoch": 9.511278195488721,
+ "grad_norm": 0.27015744412293224,
+ "learning_rate": 1.1620780042287704e-06,
+ "loss": 0.1545,
+ "step": 1265
+ },
+ {
+ "epoch": 9.518796992481203,
+ "grad_norm": 0.22855360143983014,
+ "learning_rate": 1.1608515920678396e-06,
+ "loss": 0.1442,
+ "step": 1266
+ },
+ {
+ "epoch": 9.526315789473685,
+ "grad_norm": 0.2592756591858696,
+ "learning_rate": 1.1596249314958571e-06,
+ "loss": 0.1409,
+ "step": 1267
+ },
+ {
+ "epoch": 9.533834586466165,
+ "grad_norm": 0.2689107427931512,
+ "learning_rate": 1.158398024407215e-06,
+ "loss": 0.144,
+ "step": 1268
+ },
+ {
+ "epoch": 9.541353383458647,
+ "grad_norm": 0.24514977061544363,
+ "learning_rate": 1.1571708726966862e-06,
+ "loss": 0.1468,
+ "step": 1269
+ },
+ {
+ "epoch": 9.548872180451127,
+ "grad_norm": 0.2515061682292228,
+ "learning_rate": 1.1559434782594222e-06,
+ "loss": 0.157,
+ "step": 1270
+ },
+ {
+ "epoch": 9.556390977443609,
+ "grad_norm": 0.26801025185675664,
+ "learning_rate": 1.1547158429909485e-06,
+ "loss": 0.1494,
+ "step": 1271
+ },
+ {
+ "epoch": 9.563909774436091,
+ "grad_norm": 0.24366795174816938,
+ "learning_rate": 1.1534879687871628e-06,
+ "loss": 0.1487,
+ "step": 1272
+ },
+ {
+ "epoch": 9.571428571428571,
+ "grad_norm": 0.24993414554781238,
+ "learning_rate": 1.152259857544332e-06,
+ "loss": 0.1483,
+ "step": 1273
+ },
+ {
+ "epoch": 9.578947368421053,
+ "grad_norm": 0.23590862814876953,
+ "learning_rate": 1.151031511159089e-06,
+ "loss": 0.1518,
+ "step": 1274
+ },
+ {
+ "epoch": 9.586466165413533,
+ "grad_norm": 0.230698603862258,
+ "learning_rate": 1.1498029315284293e-06,
+ "loss": 0.1521,
+ "step": 1275
+ },
+ {
+ "epoch": 9.593984962406015,
+ "grad_norm": 0.24642940617193151,
+ "learning_rate": 1.1485741205497092e-06,
+ "loss": 0.1532,
+ "step": 1276
+ },
+ {
+ "epoch": 9.601503759398497,
+ "grad_norm": 0.2574271852723903,
+ "learning_rate": 1.1473450801206425e-06,
+ "loss": 0.1508,
+ "step": 1277
+ },
+ {
+ "epoch": 9.609022556390977,
+ "grad_norm": 0.24167706639572398,
+ "learning_rate": 1.146115812139297e-06,
+ "loss": 0.1478,
+ "step": 1278
+ },
+ {
+ "epoch": 9.61654135338346,
+ "grad_norm": 0.2505176015960957,
+ "learning_rate": 1.1448863185040915e-06,
+ "loss": 0.1446,
+ "step": 1279
+ },
+ {
+ "epoch": 9.62406015037594,
+ "grad_norm": 0.24036480818458733,
+ "learning_rate": 1.1436566011137938e-06,
+ "loss": 0.151,
+ "step": 1280
+ },
+ {
+ "epoch": 9.631578947368421,
+ "grad_norm": 0.23098519931742323,
+ "learning_rate": 1.142426661867517e-06,
+ "loss": 0.1483,
+ "step": 1281
+ },
+ {
+ "epoch": 9.639097744360903,
+ "grad_norm": 0.2321336200532537,
+ "learning_rate": 1.1411965026647174e-06,
+ "loss": 0.1435,
+ "step": 1282
+ },
+ {
+ "epoch": 9.646616541353383,
+ "grad_norm": 0.229447341603156,
+ "learning_rate": 1.1399661254051904e-06,
+ "loss": 0.1503,
+ "step": 1283
+ },
+ {
+ "epoch": 9.654135338345865,
+ "grad_norm": 0.31731367499878893,
+ "learning_rate": 1.1387355319890683e-06,
+ "loss": 0.1409,
+ "step": 1284
+ },
+ {
+ "epoch": 9.661654135338345,
+ "grad_norm": 0.243364842388996,
+ "learning_rate": 1.1375047243168171e-06,
+ "loss": 0.1388,
+ "step": 1285
+ },
+ {
+ "epoch": 9.669172932330827,
+ "grad_norm": 0.22116348381963738,
+ "learning_rate": 1.1362737042892342e-06,
+ "loss": 0.1544,
+ "step": 1286
+ },
+ {
+ "epoch": 9.676691729323307,
+ "grad_norm": 0.23189399449068746,
+ "learning_rate": 1.135042473807444e-06,
+ "loss": 0.1526,
+ "step": 1287
+ },
+ {
+ "epoch": 9.68421052631579,
+ "grad_norm": 0.2543883178815507,
+ "learning_rate": 1.133811034772897e-06,
+ "loss": 0.1529,
+ "step": 1288
+ },
+ {
+ "epoch": 9.691729323308271,
+ "grad_norm": 0.25397495926147967,
+ "learning_rate": 1.1325793890873652e-06,
+ "loss": 0.1627,
+ "step": 1289
+ },
+ {
+ "epoch": 9.699248120300751,
+ "grad_norm": 0.2485539782758367,
+ "learning_rate": 1.13134753865294e-06,
+ "loss": 0.1505,
+ "step": 1290
+ },
+ {
+ "epoch": 9.706766917293233,
+ "grad_norm": 0.2501656807090538,
+ "learning_rate": 1.130115485372028e-06,
+ "loss": 0.1566,
+ "step": 1291
+ },
+ {
+ "epoch": 9.714285714285714,
+ "grad_norm": 0.23994732658448573,
+ "learning_rate": 1.1288832311473506e-06,
+ "loss": 0.1446,
+ "step": 1292
+ },
+ {
+ "epoch": 9.721804511278195,
+ "grad_norm": 0.2400336377776511,
+ "learning_rate": 1.1276507778819388e-06,
+ "loss": 0.1496,
+ "step": 1293
+ },
+ {
+ "epoch": 9.729323308270677,
+ "grad_norm": 0.2703846637779096,
+ "learning_rate": 1.1264181274791309e-06,
+ "loss": 0.1465,
+ "step": 1294
+ },
+ {
+ "epoch": 9.736842105263158,
+ "grad_norm": 0.22316066272696433,
+ "learning_rate": 1.1251852818425696e-06,
+ "loss": 0.1406,
+ "step": 1295
+ },
+ {
+ "epoch": 9.74436090225564,
+ "grad_norm": 0.23683648308981667,
+ "learning_rate": 1.1239522428761994e-06,
+ "loss": 0.1414,
+ "step": 1296
+ },
+ {
+ "epoch": 9.75187969924812,
+ "grad_norm": 0.23148296973443566,
+ "learning_rate": 1.1227190124842631e-06,
+ "loss": 0.1431,
+ "step": 1297
+ },
+ {
+ "epoch": 9.759398496240602,
+ "grad_norm": 0.23604163208177836,
+ "learning_rate": 1.1214855925712996e-06,
+ "loss": 0.1528,
+ "step": 1298
+ },
+ {
+ "epoch": 9.766917293233083,
+ "grad_norm": 0.2739346200839475,
+ "learning_rate": 1.1202519850421398e-06,
+ "loss": 0.1465,
+ "step": 1299
+ },
+ {
+ "epoch": 9.774436090225564,
+ "grad_norm": 0.24044497122173036,
+ "learning_rate": 1.1190181918019048e-06,
+ "loss": 0.148,
+ "step": 1300
+ },
+ {
+ "epoch": 9.781954887218046,
+ "grad_norm": 0.2439114309510566,
+ "learning_rate": 1.1177842147560024e-06,
+ "loss": 0.1469,
+ "step": 1301
+ },
+ {
+ "epoch": 9.789473684210526,
+ "grad_norm": 0.24493481228364308,
+ "learning_rate": 1.116550055810124e-06,
+ "loss": 0.1468,
+ "step": 1302
+ },
+ {
+ "epoch": 9.796992481203008,
+ "grad_norm": 0.2463635222072968,
+ "learning_rate": 1.1153157168702427e-06,
+ "loss": 0.1501,
+ "step": 1303
+ },
+ {
+ "epoch": 9.80451127819549,
+ "grad_norm": 0.22306699032154903,
+ "learning_rate": 1.1140811998426088e-06,
+ "loss": 0.1424,
+ "step": 1304
+ },
+ {
+ "epoch": 9.81203007518797,
+ "grad_norm": 0.22879308874467846,
+ "learning_rate": 1.1128465066337476e-06,
+ "loss": 0.1448,
+ "step": 1305
+ },
+ {
+ "epoch": 9.819548872180452,
+ "grad_norm": 0.22932487940786572,
+ "learning_rate": 1.111611639150457e-06,
+ "loss": 0.1546,
+ "step": 1306
+ },
+ {
+ "epoch": 9.827067669172932,
+ "grad_norm": 0.23999234809810033,
+ "learning_rate": 1.1103765992998038e-06,
+ "loss": 0.1507,
+ "step": 1307
+ },
+ {
+ "epoch": 9.834586466165414,
+ "grad_norm": 0.23090254706989752,
+ "learning_rate": 1.109141388989121e-06,
+ "loss": 0.1438,
+ "step": 1308
+ },
+ {
+ "epoch": 9.842105263157894,
+ "grad_norm": 0.2436049570757638,
+ "learning_rate": 1.1079060101260046e-06,
+ "loss": 0.1444,
+ "step": 1309
+ },
+ {
+ "epoch": 9.849624060150376,
+ "grad_norm": 0.24176800890376998,
+ "learning_rate": 1.1066704646183115e-06,
+ "loss": 0.1463,
+ "step": 1310
+ },
+ {
+ "epoch": 9.857142857142858,
+ "grad_norm": 0.2513824700933755,
+ "learning_rate": 1.1054347543741555e-06,
+ "loss": 0.143,
+ "step": 1311
+ },
+ {
+ "epoch": 9.864661654135338,
+ "grad_norm": 0.2362543090521668,
+ "learning_rate": 1.104198881301905e-06,
+ "loss": 0.1458,
+ "step": 1312
+ },
+ {
+ "epoch": 9.87218045112782,
+ "grad_norm": 0.2783239212763884,
+ "learning_rate": 1.1029628473101795e-06,
+ "loss": 0.1441,
+ "step": 1313
+ },
+ {
+ "epoch": 9.8796992481203,
+ "grad_norm": 0.2503457659021978,
+ "learning_rate": 1.1017266543078474e-06,
+ "loss": 0.1456,
+ "step": 1314
+ },
+ {
+ "epoch": 9.887218045112782,
+ "grad_norm": 0.24220666337503569,
+ "learning_rate": 1.1004903042040226e-06,
+ "loss": 0.1497,
+ "step": 1315
+ },
+ {
+ "epoch": 9.894736842105264,
+ "grad_norm": 0.2544398314473032,
+ "learning_rate": 1.0992537989080618e-06,
+ "loss": 0.1415,
+ "step": 1316
+ },
+ {
+ "epoch": 9.902255639097744,
+ "grad_norm": 0.24004095618694027,
+ "learning_rate": 1.0980171403295609e-06,
+ "loss": 0.1525,
+ "step": 1317
+ },
+ {
+ "epoch": 9.909774436090226,
+ "grad_norm": 0.24738857402628117,
+ "learning_rate": 1.0967803303783523e-06,
+ "loss": 0.1536,
+ "step": 1318
+ },
+ {
+ "epoch": 9.917293233082706,
+ "grad_norm": 0.23958685559920967,
+ "learning_rate": 1.0955433709645036e-06,
+ "loss": 0.1581,
+ "step": 1319
+ },
+ {
+ "epoch": 9.924812030075188,
+ "grad_norm": 0.2686913568959684,
+ "learning_rate": 1.0943062639983118e-06,
+ "loss": 0.1462,
+ "step": 1320
+ },
+ {
+ "epoch": 9.93233082706767,
+ "grad_norm": 0.2320441190241491,
+ "learning_rate": 1.0930690113903023e-06,
+ "loss": 0.1501,
+ "step": 1321
+ },
+ {
+ "epoch": 9.93984962406015,
+ "grad_norm": 0.22425877712759568,
+ "learning_rate": 1.0918316150512255e-06,
+ "loss": 0.1528,
+ "step": 1322
+ },
+ {
+ "epoch": 9.947368421052632,
+ "grad_norm": 0.22919468620289227,
+ "learning_rate": 1.0905940768920533e-06,
+ "loss": 0.1517,
+ "step": 1323
+ },
+ {
+ "epoch": 9.954887218045112,
+ "grad_norm": 0.23703522974727081,
+ "learning_rate": 1.089356398823977e-06,
+ "loss": 0.1478,
+ "step": 1324
+ },
+ {
+ "epoch": 9.962406015037594,
+ "grad_norm": 0.2338590943695267,
+ "learning_rate": 1.0881185827584044e-06,
+ "loss": 0.155,
+ "step": 1325
+ },
+ {
+ "epoch": 9.969924812030076,
+ "grad_norm": 0.23133116089746705,
+ "learning_rate": 1.0868806306069554e-06,
+ "loss": 0.1507,
+ "step": 1326
+ },
+ {
+ "epoch": 9.977443609022556,
+ "grad_norm": 0.23031213013217697,
+ "learning_rate": 1.0856425442814608e-06,
+ "loss": 0.1374,
+ "step": 1327
+ },
+ {
+ "epoch": 9.984962406015038,
+ "grad_norm": 0.24640489014355202,
+ "learning_rate": 1.0844043256939583e-06,
+ "loss": 0.1499,
+ "step": 1328
+ },
+ {
+ "epoch": 9.992481203007518,
+ "grad_norm": 0.23503082601390635,
+ "learning_rate": 1.0831659767566902e-06,
+ "loss": 0.147,
+ "step": 1329
+ },
+ {
+ "epoch": 10.0,
+ "grad_norm": 0.23212419069161896,
+ "learning_rate": 1.0819274993820996e-06,
+ "loss": 0.1432,
+ "step": 1330
+ },
+ {
+ "epoch": 10.0,
+ "eval_loss": 0.2708674371242523,
+ "eval_runtime": 35.928,
+ "eval_samples_per_second": 12.442,
+ "eval_steps_per_second": 0.195,
+ "step": 1330
+ },
+ {
+ "epoch": 10.007518796992482,
+ "grad_norm": 0.32464471779793636,
+ "learning_rate": 1.080688895482829e-06,
+ "loss": 0.1319,
+ "step": 1331
+ },
+ {
+ "epoch": 10.015037593984962,
+ "grad_norm": 0.2456505159845681,
+ "learning_rate": 1.0794501669717144e-06,
+ "loss": 0.1371,
+ "step": 1332
+ },
+ {
+ "epoch": 10.022556390977444,
+ "grad_norm": 0.2938468983002278,
+ "learning_rate": 1.078211315761786e-06,
+ "loss": 0.1304,
+ "step": 1333
+ },
+ {
+ "epoch": 10.030075187969924,
+ "grad_norm": 0.30195443732429395,
+ "learning_rate": 1.0769723437662628e-06,
+ "loss": 0.1294,
+ "step": 1334
+ },
+ {
+ "epoch": 10.037593984962406,
+ "grad_norm": 0.25370252676071625,
+ "learning_rate": 1.0757332528985504e-06,
+ "loss": 0.1245,
+ "step": 1335
+ },
+ {
+ "epoch": 10.045112781954888,
+ "grad_norm": 0.28726361304805587,
+ "learning_rate": 1.0744940450722377e-06,
+ "loss": 0.1399,
+ "step": 1336
+ },
+ {
+ "epoch": 10.052631578947368,
+ "grad_norm": 0.27347907704044977,
+ "learning_rate": 1.0732547222010948e-06,
+ "loss": 0.1357,
+ "step": 1337
+ },
+ {
+ "epoch": 10.06015037593985,
+ "grad_norm": 0.25214922069303264,
+ "learning_rate": 1.0720152861990693e-06,
+ "loss": 0.1371,
+ "step": 1338
+ },
+ {
+ "epoch": 10.06766917293233,
+ "grad_norm": 0.2732308709337131,
+ "learning_rate": 1.0707757389802831e-06,
+ "loss": 0.1399,
+ "step": 1339
+ },
+ {
+ "epoch": 10.075187969924812,
+ "grad_norm": 0.26540054992711704,
+ "learning_rate": 1.0695360824590304e-06,
+ "loss": 0.1319,
+ "step": 1340
+ },
+ {
+ "epoch": 10.082706766917294,
+ "grad_norm": 0.24740075881900597,
+ "learning_rate": 1.0682963185497735e-06,
+ "loss": 0.1334,
+ "step": 1341
+ },
+ {
+ "epoch": 10.090225563909774,
+ "grad_norm": 0.27561888512460675,
+ "learning_rate": 1.0670564491671414e-06,
+ "loss": 0.1382,
+ "step": 1342
+ },
+ {
+ "epoch": 10.097744360902256,
+ "grad_norm": 0.279566689570178,
+ "learning_rate": 1.0658164762259256e-06,
+ "loss": 0.1426,
+ "step": 1343
+ },
+ {
+ "epoch": 10.105263157894736,
+ "grad_norm": 0.23660643688871474,
+ "learning_rate": 1.0645764016410775e-06,
+ "loss": 0.1286,
+ "step": 1344
+ },
+ {
+ "epoch": 10.112781954887218,
+ "grad_norm": 0.24806168155974426,
+ "learning_rate": 1.0633362273277048e-06,
+ "loss": 0.1321,
+ "step": 1345
+ },
+ {
+ "epoch": 10.1203007518797,
+ "grad_norm": 0.2969628054993818,
+ "learning_rate": 1.0620959552010708e-06,
+ "loss": 0.1373,
+ "step": 1346
+ },
+ {
+ "epoch": 10.12781954887218,
+ "grad_norm": 0.23225796487260555,
+ "learning_rate": 1.0608555871765888e-06,
+ "loss": 0.1303,
+ "step": 1347
+ },
+ {
+ "epoch": 10.135338345864662,
+ "grad_norm": 0.24947062554382368,
+ "learning_rate": 1.0596151251698198e-06,
+ "loss": 0.1334,
+ "step": 1348
+ },
+ {
+ "epoch": 10.142857142857142,
+ "grad_norm": 0.2500361279868523,
+ "learning_rate": 1.0583745710964712e-06,
+ "loss": 0.1396,
+ "step": 1349
+ },
+ {
+ "epoch": 10.150375939849624,
+ "grad_norm": 0.2372264878943572,
+ "learning_rate": 1.0571339268723913e-06,
+ "loss": 0.1368,
+ "step": 1350
+ },
+ {
+ "epoch": 10.157894736842104,
+ "grad_norm": 0.23905744208772567,
+ "learning_rate": 1.0558931944135685e-06,
+ "loss": 0.1221,
+ "step": 1351
+ },
+ {
+ "epoch": 10.165413533834586,
+ "grad_norm": 0.2611209928595847,
+ "learning_rate": 1.054652375636127e-06,
+ "loss": 0.1277,
+ "step": 1352
+ },
+ {
+ "epoch": 10.172932330827068,
+ "grad_norm": 0.24327658632669216,
+ "learning_rate": 1.0534114724563249e-06,
+ "loss": 0.1373,
+ "step": 1353
+ },
+ {
+ "epoch": 10.180451127819548,
+ "grad_norm": 0.24147179919880074,
+ "learning_rate": 1.0521704867905493e-06,
+ "loss": 0.137,
+ "step": 1354
+ },
+ {
+ "epoch": 10.18796992481203,
+ "grad_norm": 0.25964893226229124,
+ "learning_rate": 1.0509294205553167e-06,
+ "loss": 0.134,
+ "step": 1355
+ },
+ {
+ "epoch": 10.19548872180451,
+ "grad_norm": 0.232108425344869,
+ "learning_rate": 1.0496882756672665e-06,
+ "loss": 0.1396,
+ "step": 1356
+ },
+ {
+ "epoch": 10.203007518796992,
+ "grad_norm": 0.25221303595724354,
+ "learning_rate": 1.04844705404316e-06,
+ "loss": 0.148,
+ "step": 1357
+ },
+ {
+ "epoch": 10.210526315789474,
+ "grad_norm": 0.24231057962638114,
+ "learning_rate": 1.047205757599877e-06,
+ "loss": 0.1256,
+ "step": 1358
+ },
+ {
+ "epoch": 10.218045112781954,
+ "grad_norm": 0.23502551072366165,
+ "learning_rate": 1.0459643882544125e-06,
+ "loss": 0.1369,
+ "step": 1359
+ },
+ {
+ "epoch": 10.225563909774436,
+ "grad_norm": 0.30563179006628294,
+ "learning_rate": 1.0447229479238748e-06,
+ "loss": 0.138,
+ "step": 1360
+ },
+ {
+ "epoch": 10.233082706766917,
+ "grad_norm": 0.22921926411969956,
+ "learning_rate": 1.0434814385254815e-06,
+ "loss": 0.1363,
+ "step": 1361
+ },
+ {
+ "epoch": 10.240601503759398,
+ "grad_norm": 0.24013385775804502,
+ "learning_rate": 1.0422398619765568e-06,
+ "loss": 0.1366,
+ "step": 1362
+ },
+ {
+ "epoch": 10.24812030075188,
+ "grad_norm": 0.24883218021938058,
+ "learning_rate": 1.0409982201945287e-06,
+ "loss": 0.1405,
+ "step": 1363
+ },
+ {
+ "epoch": 10.25563909774436,
+ "grad_norm": 0.23339403180753654,
+ "learning_rate": 1.0397565150969259e-06,
+ "loss": 0.1333,
+ "step": 1364
+ },
+ {
+ "epoch": 10.263157894736842,
+ "grad_norm": 0.23668613004627548,
+ "learning_rate": 1.0385147486013746e-06,
+ "loss": 0.1381,
+ "step": 1365
+ },
+ {
+ "epoch": 10.270676691729323,
+ "grad_norm": 0.2393149283402396,
+ "learning_rate": 1.0372729226255961e-06,
+ "loss": 0.1374,
+ "step": 1366
+ },
+ {
+ "epoch": 10.278195488721805,
+ "grad_norm": 0.26368867946021035,
+ "learning_rate": 1.0360310390874038e-06,
+ "loss": 0.1407,
+ "step": 1367
+ },
+ {
+ "epoch": 10.285714285714286,
+ "grad_norm": 0.2350112809226727,
+ "learning_rate": 1.0347890999046998e-06,
+ "loss": 0.1315,
+ "step": 1368
+ },
+ {
+ "epoch": 10.293233082706767,
+ "grad_norm": 0.2316863211712284,
+ "learning_rate": 1.0335471069954716e-06,
+ "loss": 0.1425,
+ "step": 1369
+ },
+ {
+ "epoch": 10.300751879699249,
+ "grad_norm": 0.2534369556706879,
+ "learning_rate": 1.0323050622777904e-06,
+ "loss": 0.14,
+ "step": 1370
+ },
+ {
+ "epoch": 10.308270676691729,
+ "grad_norm": 0.23036004013540434,
+ "learning_rate": 1.0310629676698072e-06,
+ "loss": 0.1364,
+ "step": 1371
+ },
+ {
+ "epoch": 10.31578947368421,
+ "grad_norm": 0.2327785185926705,
+ "learning_rate": 1.0298208250897503e-06,
+ "loss": 0.1419,
+ "step": 1372
+ },
+ {
+ "epoch": 10.323308270676693,
+ "grad_norm": 0.23317208634029615,
+ "learning_rate": 1.0285786364559214e-06,
+ "loss": 0.1294,
+ "step": 1373
+ },
+ {
+ "epoch": 10.330827067669173,
+ "grad_norm": 0.2372209630059384,
+ "learning_rate": 1.0273364036866938e-06,
+ "loss": 0.1485,
+ "step": 1374
+ },
+ {
+ "epoch": 10.338345864661655,
+ "grad_norm": 0.2496912661448492,
+ "learning_rate": 1.0260941287005086e-06,
+ "loss": 0.1382,
+ "step": 1375
+ },
+ {
+ "epoch": 10.345864661654135,
+ "grad_norm": 0.22856624231243786,
+ "learning_rate": 1.024851813415873e-06,
+ "loss": 0.1338,
+ "step": 1376
+ },
+ {
+ "epoch": 10.353383458646617,
+ "grad_norm": 0.24506835153269285,
+ "learning_rate": 1.0236094597513554e-06,
+ "loss": 0.1374,
+ "step": 1377
+ },
+ {
+ "epoch": 10.360902255639097,
+ "grad_norm": 0.23636264570326027,
+ "learning_rate": 1.022367069625584e-06,
+ "loss": 0.1365,
+ "step": 1378
+ },
+ {
+ "epoch": 10.368421052631579,
+ "grad_norm": 0.23979218982924,
+ "learning_rate": 1.0211246449572428e-06,
+ "loss": 0.1329,
+ "step": 1379
+ },
+ {
+ "epoch": 10.37593984962406,
+ "grad_norm": 0.24415894106921915,
+ "learning_rate": 1.01988218766507e-06,
+ "loss": 0.1374,
+ "step": 1380
+ },
+ {
+ "epoch": 10.38345864661654,
+ "grad_norm": 0.24073537655127508,
+ "learning_rate": 1.0186396996678537e-06,
+ "loss": 0.1369,
+ "step": 1381
+ },
+ {
+ "epoch": 10.390977443609023,
+ "grad_norm": 0.24140360276481113,
+ "learning_rate": 1.017397182884429e-06,
+ "loss": 0.1464,
+ "step": 1382
+ },
+ {
+ "epoch": 10.398496240601503,
+ "grad_norm": 0.2375848031571209,
+ "learning_rate": 1.0161546392336767e-06,
+ "loss": 0.1356,
+ "step": 1383
+ },
+ {
+ "epoch": 10.406015037593985,
+ "grad_norm": 0.23996690921007013,
+ "learning_rate": 1.0149120706345174e-06,
+ "loss": 0.13,
+ "step": 1384
+ },
+ {
+ "epoch": 10.413533834586467,
+ "grad_norm": 0.24202139137234105,
+ "learning_rate": 1.0136694790059115e-06,
+ "loss": 0.1366,
+ "step": 1385
+ },
+ {
+ "epoch": 10.421052631578947,
+ "grad_norm": 0.23863822520623018,
+ "learning_rate": 1.0124268662668544e-06,
+ "loss": 0.1355,
+ "step": 1386
+ },
+ {
+ "epoch": 10.428571428571429,
+ "grad_norm": 0.2516869361401898,
+ "learning_rate": 1.0111842343363745e-06,
+ "loss": 0.1362,
+ "step": 1387
+ },
+ {
+ "epoch": 10.436090225563909,
+ "grad_norm": 0.25637636526765456,
+ "learning_rate": 1.0099415851335297e-06,
+ "loss": 0.1385,
+ "step": 1388
+ },
+ {
+ "epoch": 10.443609022556391,
+ "grad_norm": 0.24847895131780476,
+ "learning_rate": 1.0086989205774042e-06,
+ "loss": 0.1319,
+ "step": 1389
+ },
+ {
+ "epoch": 10.451127819548873,
+ "grad_norm": 0.24643114444932782,
+ "learning_rate": 1.0074562425871065e-06,
+ "loss": 0.1222,
+ "step": 1390
+ },
+ {
+ "epoch": 10.458646616541353,
+ "grad_norm": 0.24455510559473806,
+ "learning_rate": 1.0062135530817653e-06,
+ "loss": 0.1334,
+ "step": 1391
+ },
+ {
+ "epoch": 10.466165413533835,
+ "grad_norm": 0.2636234990043188,
+ "learning_rate": 1.0049708539805272e-06,
+ "loss": 0.1335,
+ "step": 1392
+ },
+ {
+ "epoch": 10.473684210526315,
+ "grad_norm": 0.26904618255541285,
+ "learning_rate": 1.0037281472025543e-06,
+ "loss": 0.1324,
+ "step": 1393
+ },
+ {
+ "epoch": 10.481203007518797,
+ "grad_norm": 0.2591323994198032,
+ "learning_rate": 1.0024854346670194e-06,
+ "loss": 0.1297,
+ "step": 1394
+ },
+ {
+ "epoch": 10.488721804511279,
+ "grad_norm": 0.2445157529397136,
+ "learning_rate": 1.0012427182931054e-06,
+ "loss": 0.1268,
+ "step": 1395
+ },
+ {
+ "epoch": 10.496240601503759,
+ "grad_norm": 0.23741188095002141,
+ "learning_rate": 1e-06,
+ "loss": 0.1334,
+ "step": 1396
+ },
+ {
+ "epoch": 10.503759398496241,
+ "grad_norm": 0.23637805322758298,
+ "learning_rate": 9.98757281706895e-07,
+ "loss": 0.132,
+ "step": 1397
+ },
+ {
+ "epoch": 10.511278195488721,
+ "grad_norm": 0.27897205413325515,
+ "learning_rate": 9.975145653329805e-07,
+ "loss": 0.1439,
+ "step": 1398
+ },
+ {
+ "epoch": 10.518796992481203,
+ "grad_norm": 0.24810789452901832,
+ "learning_rate": 9.96271852797446e-07,
+ "loss": 0.1302,
+ "step": 1399
+ },
+ {
+ "epoch": 10.526315789473685,
+ "grad_norm": 0.24241464693755999,
+ "learning_rate": 9.950291460194727e-07,
+ "loss": 0.1432,
+ "step": 1400
+ },
+ {
+ "epoch": 10.533834586466165,
+ "grad_norm": 0.2621701770666348,
+ "learning_rate": 9.93786446918235e-07,
+ "loss": 0.1329,
+ "step": 1401
+ },
+ {
+ "epoch": 10.541353383458647,
+ "grad_norm": 0.24320539036579647,
+ "learning_rate": 9.925437574128937e-07,
+ "loss": 0.1314,
+ "step": 1402
+ },
+ {
+ "epoch": 10.548872180451127,
+ "grad_norm": 0.24055847930985155,
+ "learning_rate": 9.913010794225959e-07,
+ "loss": 0.1326,
+ "step": 1403
+ },
+ {
+ "epoch": 10.556390977443609,
+ "grad_norm": 0.23327806373234386,
+ "learning_rate": 9.900584148664704e-07,
+ "loss": 0.1213,
+ "step": 1404
+ },
+ {
+ "epoch": 10.563909774436091,
+ "grad_norm": 0.2950497434704313,
+ "learning_rate": 9.888157656636256e-07,
+ "loss": 0.1339,
+ "step": 1405
+ },
+ {
+ "epoch": 10.571428571428571,
+ "grad_norm": 0.23650582616569196,
+ "learning_rate": 9.875731337331457e-07,
+ "loss": 0.1405,
+ "step": 1406
+ },
+ {
+ "epoch": 10.578947368421053,
+ "grad_norm": 0.2505055970727908,
+ "learning_rate": 9.863305209940888e-07,
+ "loss": 0.1295,
+ "step": 1407
+ },
+ {
+ "epoch": 10.586466165413533,
+ "grad_norm": 0.2593658288956644,
+ "learning_rate": 9.850879293654827e-07,
+ "loss": 0.1471,
+ "step": 1408
+ },
+ {
+ "epoch": 10.593984962406015,
+ "grad_norm": 0.25430859568942676,
+ "learning_rate": 9.838453607663236e-07,
+ "loss": 0.1281,
+ "step": 1409
+ },
+ {
+ "epoch": 10.601503759398497,
+ "grad_norm": 1.6228487171757506,
+ "learning_rate": 9.826028171155707e-07,
+ "loss": 0.1383,
+ "step": 1410
+ },
+ {
+ "epoch": 10.609022556390977,
+ "grad_norm": 0.259896490210717,
+ "learning_rate": 9.813603003321464e-07,
+ "loss": 0.1293,
+ "step": 1411
+ },
+ {
+ "epoch": 10.61654135338346,
+ "grad_norm": 0.25332257366210453,
+ "learning_rate": 9.801178123349297e-07,
+ "loss": 0.1355,
+ "step": 1412
+ },
+ {
+ "epoch": 10.62406015037594,
+ "grad_norm": 0.24110065854124052,
+ "learning_rate": 9.788753550427573e-07,
+ "loss": 0.1343,
+ "step": 1413
+ },
+ {
+ "epoch": 10.631578947368421,
+ "grad_norm": 0.2393025430328228,
+ "learning_rate": 9.776329303744159e-07,
+ "loss": 0.13,
+ "step": 1414
+ },
+ {
+ "epoch": 10.639097744360903,
+ "grad_norm": 0.26040989644878115,
+ "learning_rate": 9.763905402486447e-07,
+ "loss": 0.1354,
+ "step": 1415
+ },
+ {
+ "epoch": 10.646616541353383,
+ "grad_norm": 0.2734832285033153,
+ "learning_rate": 9.751481865841267e-07,
+ "loss": 0.1352,
+ "step": 1416
+ },
+ {
+ "epoch": 10.654135338345865,
+ "grad_norm": 0.24879837153581547,
+ "learning_rate": 9.739058712994913e-07,
+ "loss": 0.1285,
+ "step": 1417
+ },
+ {
+ "epoch": 10.661654135338345,
+ "grad_norm": 0.25822847065246063,
+ "learning_rate": 9.726635963133062e-07,
+ "loss": 0.1377,
+ "step": 1418
+ },
+ {
+ "epoch": 10.669172932330827,
+ "grad_norm": 0.26047302640596426,
+ "learning_rate": 9.714213635440787e-07,
+ "loss": 0.1362,
+ "step": 1419
+ },
+ {
+ "epoch": 10.676691729323307,
+ "grad_norm": 0.2539670480678628,
+ "learning_rate": 9.701791749102494e-07,
+ "loss": 0.1385,
+ "step": 1420
+ },
+ {
+ "epoch": 10.68421052631579,
+ "grad_norm": 0.2540615852759655,
+ "learning_rate": 9.689370323301927e-07,
+ "loss": 0.1355,
+ "step": 1421
+ },
+ {
+ "epoch": 10.691729323308271,
+ "grad_norm": 0.2576218339732704,
+ "learning_rate": 9.676949377222095e-07,
+ "loss": 0.1371,
+ "step": 1422
+ },
+ {
+ "epoch": 10.699248120300751,
+ "grad_norm": 0.2500866177911836,
+ "learning_rate": 9.664528930045283e-07,
+ "loss": 0.1313,
+ "step": 1423
+ },
+ {
+ "epoch": 10.706766917293233,
+ "grad_norm": 0.2500346263704434,
+ "learning_rate": 9.652109000953006e-07,
+ "loss": 0.1361,
+ "step": 1424
+ },
+ {
+ "epoch": 10.714285714285714,
+ "grad_norm": 0.24387087383987832,
+ "learning_rate": 9.639689609125961e-07,
+ "loss": 0.1317,
+ "step": 1425
+ },
+ {
+ "epoch": 10.721804511278195,
+ "grad_norm": 0.24002486453466018,
+ "learning_rate": 9.627270773744042e-07,
+ "loss": 0.1251,
+ "step": 1426
+ },
+ {
+ "epoch": 10.729323308270677,
+ "grad_norm": 0.272061555099557,
+ "learning_rate": 9.614852513986256e-07,
+ "loss": 0.1396,
+ "step": 1427
+ },
+ {
+ "epoch": 10.736842105263158,
+ "grad_norm": 0.25047893002423915,
+ "learning_rate": 9.602434849030745e-07,
+ "loss": 0.134,
+ "step": 1428
+ },
+ {
+ "epoch": 10.74436090225564,
+ "grad_norm": 0.238778746867874,
+ "learning_rate": 9.590017798054712e-07,
+ "loss": 0.124,
+ "step": 1429
+ },
+ {
+ "epoch": 10.75187969924812,
+ "grad_norm": 0.2632397496638039,
+ "learning_rate": 9.577601380234433e-07,
+ "loss": 0.1325,
+ "step": 1430
+ },
+ {
+ "epoch": 10.759398496240602,
+ "grad_norm": 0.24373774754858213,
+ "learning_rate": 9.565185614745184e-07,
+ "loss": 0.1256,
+ "step": 1431
+ },
+ {
+ "epoch": 10.766917293233083,
+ "grad_norm": 0.24632647093379129,
+ "learning_rate": 9.552770520761254e-07,
+ "loss": 0.1467,
+ "step": 1432
+ },
+ {
+ "epoch": 10.774436090225564,
+ "grad_norm": 0.2639641126703772,
+ "learning_rate": 9.540356117455876e-07,
+ "loss": 0.1324,
+ "step": 1433
+ },
+ {
+ "epoch": 10.781954887218046,
+ "grad_norm": 0.25934599392211133,
+ "learning_rate": 9.527942424001234e-07,
+ "loss": 0.1437,
+ "step": 1434
+ },
+ {
+ "epoch": 10.789473684210526,
+ "grad_norm": 0.2605451703683244,
+ "learning_rate": 9.5155294595684e-07,
+ "loss": 0.1407,
+ "step": 1435
+ },
+ {
+ "epoch": 10.796992481203008,
+ "grad_norm": 0.25210908977436625,
+ "learning_rate": 9.503117243327335e-07,
+ "loss": 0.1375,
+ "step": 1436
+ },
+ {
+ "epoch": 10.80451127819549,
+ "grad_norm": 0.24500043242593272,
+ "learning_rate": 9.490705794446829e-07,
+ "loss": 0.1327,
+ "step": 1437
+ },
+ {
+ "epoch": 10.81203007518797,
+ "grad_norm": 0.25873713631210304,
+ "learning_rate": 9.478295132094505e-07,
+ "loss": 0.1375,
+ "step": 1438
+ },
+ {
+ "epoch": 10.819548872180452,
+ "grad_norm": 0.2679482195953247,
+ "learning_rate": 9.465885275436751e-07,
+ "loss": 0.1328,
+ "step": 1439
+ },
+ {
+ "epoch": 10.827067669172932,
+ "grad_norm": 0.23468697656069576,
+ "learning_rate": 9.453476243638729e-07,
+ "loss": 0.1453,
+ "step": 1440
+ },
+ {
+ "epoch": 10.834586466165414,
+ "grad_norm": 0.25038091178966665,
+ "learning_rate": 9.441068055864314e-07,
+ "loss": 0.1345,
+ "step": 1441
+ },
+ {
+ "epoch": 10.842105263157894,
+ "grad_norm": 0.2514551235968068,
+ "learning_rate": 9.428660731276088e-07,
+ "loss": 0.1334,
+ "step": 1442
+ },
+ {
+ "epoch": 10.849624060150376,
+ "grad_norm": 0.24807612562818812,
+ "learning_rate": 9.416254289035287e-07,
+ "loss": 0.1355,
+ "step": 1443
+ },
+ {
+ "epoch": 10.857142857142858,
+ "grad_norm": 0.2432819937126929,
+ "learning_rate": 9.403848748301802e-07,
+ "loss": 0.1341,
+ "step": 1444
+ },
+ {
+ "epoch": 10.864661654135338,
+ "grad_norm": 0.23614747266486624,
+ "learning_rate": 9.391444128234111e-07,
+ "loss": 0.1382,
+ "step": 1445
+ },
+ {
+ "epoch": 10.87218045112782,
+ "grad_norm": 0.2589070296792415,
+ "learning_rate": 9.37904044798929e-07,
+ "loss": 0.1255,
+ "step": 1446
+ },
+ {
+ "epoch": 10.8796992481203,
+ "grad_norm": 0.2630136323671777,
+ "learning_rate": 9.366637726722949e-07,
+ "loss": 0.1259,
+ "step": 1447
+ },
+ {
+ "epoch": 10.887218045112782,
+ "grad_norm": 0.24970998267202757,
+ "learning_rate": 9.354235983589227e-07,
+ "loss": 0.1312,
+ "step": 1448
+ },
+ {
+ "epoch": 10.894736842105264,
+ "grad_norm": 0.24095511149374935,
+ "learning_rate": 9.341835237740747e-07,
+ "loss": 0.1363,
+ "step": 1449
+ },
+ {
+ "epoch": 10.902255639097744,
+ "grad_norm": 0.25264782977710687,
+ "learning_rate": 9.329435508328585e-07,
+ "loss": 0.122,
+ "step": 1450
+ },
+ {
+ "epoch": 10.909774436090226,
+ "grad_norm": 0.24040727322948693,
+ "learning_rate": 9.317036814502267e-07,
+ "loss": 0.1361,
+ "step": 1451
+ },
+ {
+ "epoch": 10.917293233082706,
+ "grad_norm": 0.25319133246095965,
+ "learning_rate": 9.304639175409698e-07,
+ "loss": 0.1453,
+ "step": 1452
+ },
+ {
+ "epoch": 10.924812030075188,
+ "grad_norm": 0.24887983411225983,
+ "learning_rate": 9.292242610197171e-07,
+ "loss": 0.1375,
+ "step": 1453
+ },
+ {
+ "epoch": 10.93233082706767,
+ "grad_norm": 0.24196819748329831,
+ "learning_rate": 9.279847138009307e-07,
+ "loss": 0.1381,
+ "step": 1454
+ },
+ {
+ "epoch": 10.93984962406015,
+ "grad_norm": 0.2411342429985461,
+ "learning_rate": 9.267452777989054e-07,
+ "loss": 0.1382,
+ "step": 1455
+ },
+ {
+ "epoch": 10.947368421052632,
+ "grad_norm": 0.2421293447744662,
+ "learning_rate": 9.255059549277622e-07,
+ "loss": 0.1366,
+ "step": 1456
+ },
+ {
+ "epoch": 10.954887218045112,
+ "grad_norm": 0.24702985585824214,
+ "learning_rate": 9.2426674710145e-07,
+ "loss": 0.139,
+ "step": 1457
+ },
+ {
+ "epoch": 10.962406015037594,
+ "grad_norm": 0.2402958156783438,
+ "learning_rate": 9.230276562337372e-07,
+ "loss": 0.141,
+ "step": 1458
+ },
+ {
+ "epoch": 10.969924812030076,
+ "grad_norm": 0.2478662399798217,
+ "learning_rate": 9.217886842382142e-07,
+ "loss": 0.1357,
+ "step": 1459
+ },
+ {
+ "epoch": 10.977443609022556,
+ "grad_norm": 0.24397624973029383,
+ "learning_rate": 9.205498330282856e-07,
+ "loss": 0.1353,
+ "step": 1460
+ },
+ {
+ "epoch": 10.984962406015038,
+ "grad_norm": 0.2433206472422932,
+ "learning_rate": 9.193111045171713e-07,
+ "loss": 0.1459,
+ "step": 1461
+ },
+ {
+ "epoch": 10.992481203007518,
+ "grad_norm": 0.2510707963522596,
+ "learning_rate": 9.180725006179001e-07,
+ "loss": 0.1302,
+ "step": 1462
+ },
+ {
+ "epoch": 11.0,
+ "grad_norm": 0.24145911955767407,
+ "learning_rate": 9.168340232433098e-07,
+ "loss": 0.14,
+ "step": 1463
+ },
+ {
+ "epoch": 11.0,
+ "eval_loss": 0.2832958400249481,
+ "eval_runtime": 37.0544,
+ "eval_samples_per_second": 12.063,
+ "eval_steps_per_second": 0.189,
+ "step": 1463
+ },
+ {
+ "epoch": 11.007518796992482,
+ "grad_norm": 0.34171563613431505,
+ "learning_rate": 9.155956743060415e-07,
+ "loss": 0.123,
+ "step": 1464
+ },
+ {
+ "epoch": 11.015037593984962,
+ "grad_norm": 0.2649588527379112,
+ "learning_rate": 9.143574557185393e-07,
+ "loss": 0.1171,
+ "step": 1465
+ },
+ {
+ "epoch": 11.022556390977444,
+ "grad_norm": 0.28387231008087005,
+ "learning_rate": 9.131193693930445e-07,
+ "loss": 0.1277,
+ "step": 1466
+ },
+ {
+ "epoch": 11.030075187969924,
+ "grad_norm": 0.32975852569321046,
+ "learning_rate": 9.118814172415958e-07,
+ "loss": 0.1324,
+ "step": 1467
+ },
+ {
+ "epoch": 11.037593984962406,
+ "grad_norm": 0.251620285663552,
+ "learning_rate": 9.106436011760228e-07,
+ "loss": 0.1257,
+ "step": 1468
+ },
+ {
+ "epoch": 11.045112781954888,
+ "grad_norm": 0.27058774096481025,
+ "learning_rate": 9.094059231079469e-07,
+ "loss": 0.1365,
+ "step": 1469
+ },
+ {
+ "epoch": 11.052631578947368,
+ "grad_norm": 0.2970158250396516,
+ "learning_rate": 9.081683849487744e-07,
+ "loss": 0.1255,
+ "step": 1470
+ },
+ {
+ "epoch": 11.06015037593985,
+ "grad_norm": 0.2604528654656931,
+ "learning_rate": 9.069309886096976e-07,
+ "loss": 0.1195,
+ "step": 1471
+ },
+ {
+ "epoch": 11.06766917293233,
+ "grad_norm": 0.25764554565578157,
+ "learning_rate": 9.056937360016879e-07,
+ "loss": 0.1288,
+ "step": 1472
+ },
+ {
+ "epoch": 11.075187969924812,
+ "grad_norm": 0.7689801785154663,
+ "learning_rate": 9.044566290354965e-07,
+ "loss": 0.1339,
+ "step": 1473
+ },
+ {
+ "epoch": 11.082706766917294,
+ "grad_norm": 0.33823701609729434,
+ "learning_rate": 9.032196696216475e-07,
+ "loss": 0.1177,
+ "step": 1474
+ },
+ {
+ "epoch": 11.090225563909774,
+ "grad_norm": 0.25663702450532394,
+ "learning_rate": 9.019828596704393e-07,
+ "loss": 0.1318,
+ "step": 1475
+ },
+ {
+ "epoch": 11.097744360902256,
+ "grad_norm": 0.28150964863035705,
+ "learning_rate": 9.007462010919385e-07,
+ "loss": 0.1163,
+ "step": 1476
+ },
+ {
+ "epoch": 11.105263157894736,
+ "grad_norm": 0.24710011401020046,
+ "learning_rate": 8.995096957959773e-07,
+ "loss": 0.1268,
+ "step": 1477
+ },
+ {
+ "epoch": 11.112781954887218,
+ "grad_norm": 0.2455594770080136,
+ "learning_rate": 8.982733456921529e-07,
+ "loss": 0.1198,
+ "step": 1478
+ },
+ {
+ "epoch": 11.1203007518797,
+ "grad_norm": 0.28650984356744086,
+ "learning_rate": 8.970371526898206e-07,
+ "loss": 0.1339,
+ "step": 1479
+ },
+ {
+ "epoch": 11.12781954887218,
+ "grad_norm": 0.26111468867018306,
+ "learning_rate": 8.958011186980952e-07,
+ "loss": 0.1257,
+ "step": 1480
+ },
+ {
+ "epoch": 11.135338345864662,
+ "grad_norm": 0.26148354023801124,
+ "learning_rate": 8.945652456258445e-07,
+ "loss": 0.1279,
+ "step": 1481
+ },
+ {
+ "epoch": 11.142857142857142,
+ "grad_norm": 0.2563009856899243,
+ "learning_rate": 8.933295353816887e-07,
+ "loss": 0.1179,
+ "step": 1482
+ },
+ {
+ "epoch": 11.150375939849624,
+ "grad_norm": 0.2640203527165647,
+ "learning_rate": 8.920939898739953e-07,
+ "loss": 0.1235,
+ "step": 1483
+ },
+ {
+ "epoch": 11.157894736842104,
+ "grad_norm": 0.24830956002660973,
+ "learning_rate": 8.908586110108793e-07,
+ "loss": 0.1292,
+ "step": 1484
+ },
+ {
+ "epoch": 11.165413533834586,
+ "grad_norm": 0.23984868509828536,
+ "learning_rate": 8.896234007001963e-07,
+ "loss": 0.1208,
+ "step": 1485
+ },
+ {
+ "epoch": 11.172932330827068,
+ "grad_norm": 0.26925246947991016,
+ "learning_rate": 8.883883608495433e-07,
+ "loss": 0.1245,
+ "step": 1486
+ },
+ {
+ "epoch": 11.180451127819548,
+ "grad_norm": 0.2650155407155453,
+ "learning_rate": 8.871534933662524e-07,
+ "loss": 0.1293,
+ "step": 1487
+ },
+ {
+ "epoch": 11.18796992481203,
+ "grad_norm": 0.24937485326882147,
+ "learning_rate": 8.859188001573915e-07,
+ "loss": 0.1162,
+ "step": 1488
+ },
+ {
+ "epoch": 11.19548872180451,
+ "grad_norm": 0.24903045930460005,
+ "learning_rate": 8.846842831297572e-07,
+ "loss": 0.1254,
+ "step": 1489
+ },
+ {
+ "epoch": 11.203007518796992,
+ "grad_norm": 0.25534406192144926,
+ "learning_rate": 8.83449944189876e-07,
+ "loss": 0.1324,
+ "step": 1490
+ },
+ {
+ "epoch": 11.210526315789474,
+ "grad_norm": 0.2661388367915712,
+ "learning_rate": 8.822157852439976e-07,
+ "loss": 0.1196,
+ "step": 1491
+ },
+ {
+ "epoch": 11.218045112781954,
+ "grad_norm": 0.2741892192165312,
+ "learning_rate": 8.809818081980953e-07,
+ "loss": 0.1241,
+ "step": 1492
+ },
+ {
+ "epoch": 11.225563909774436,
+ "grad_norm": 0.29458446687312834,
+ "learning_rate": 8.7974801495786e-07,
+ "loss": 0.1239,
+ "step": 1493
+ },
+ {
+ "epoch": 11.233082706766917,
+ "grad_norm": 0.2952011509648695,
+ "learning_rate": 8.785144074287004e-07,
+ "loss": 0.1245,
+ "step": 1494
+ },
+ {
+ "epoch": 11.240601503759398,
+ "grad_norm": 0.2889781437238123,
+ "learning_rate": 8.772809875157366e-07,
+ "loss": 0.1239,
+ "step": 1495
+ },
+ {
+ "epoch": 11.24812030075188,
+ "grad_norm": 0.26340734901554763,
+ "learning_rate": 8.760477571238005e-07,
+ "loss": 0.1222,
+ "step": 1496
+ },
+ {
+ "epoch": 11.25563909774436,
+ "grad_norm": 0.2632253906961304,
+ "learning_rate": 8.748147181574302e-07,
+ "loss": 0.1181,
+ "step": 1497
+ },
+ {
+ "epoch": 11.263157894736842,
+ "grad_norm": 0.2638895419399464,
+ "learning_rate": 8.735818725208691e-07,
+ "loss": 0.1177,
+ "step": 1498
+ },
+ {
+ "epoch": 11.270676691729323,
+ "grad_norm": 0.26947527003386446,
+ "learning_rate": 8.72349222118061e-07,
+ "loss": 0.1257,
+ "step": 1499
+ },
+ {
+ "epoch": 11.278195488721805,
+ "grad_norm": 0.2693215782019798,
+ "learning_rate": 8.711167688526493e-07,
+ "loss": 0.1184,
+ "step": 1500
+ },
+ {
+ "epoch": 11.285714285714286,
+ "grad_norm": 0.24549218518162763,
+ "learning_rate": 8.698845146279719e-07,
+ "loss": 0.1239,
+ "step": 1501
+ },
+ {
+ "epoch": 11.293233082706767,
+ "grad_norm": 0.24757439735377024,
+ "learning_rate": 8.686524613470603e-07,
+ "loss": 0.126,
+ "step": 1502
+ },
+ {
+ "epoch": 11.300751879699249,
+ "grad_norm": 0.2413621000037693,
+ "learning_rate": 8.67420610912635e-07,
+ "loss": 0.1168,
+ "step": 1503
+ },
+ {
+ "epoch": 11.308270676691729,
+ "grad_norm": 0.25461264978022546,
+ "learning_rate": 8.661889652271029e-07,
+ "loss": 0.1178,
+ "step": 1504
+ },
+ {
+ "epoch": 11.31578947368421,
+ "grad_norm": 0.27260920282914103,
+ "learning_rate": 8.649575261925562e-07,
+ "loss": 0.1226,
+ "step": 1505
+ },
+ {
+ "epoch": 11.323308270676693,
+ "grad_norm": 0.2585024640307792,
+ "learning_rate": 8.63726295710766e-07,
+ "loss": 0.1227,
+ "step": 1506
+ },
+ {
+ "epoch": 11.330827067669173,
+ "grad_norm": 0.23474845109605483,
+ "learning_rate": 8.624952756831831e-07,
+ "loss": 0.1172,
+ "step": 1507
+ },
+ {
+ "epoch": 11.338345864661655,
+ "grad_norm": 0.281380487070757,
+ "learning_rate": 8.612644680109318e-07,
+ "loss": 0.1211,
+ "step": 1508
+ },
+ {
+ "epoch": 11.345864661654135,
+ "grad_norm": 0.2442334592990169,
+ "learning_rate": 8.600338745948098e-07,
+ "loss": 0.1243,
+ "step": 1509
+ },
+ {
+ "epoch": 11.353383458646617,
+ "grad_norm": 0.24798489498152762,
+ "learning_rate": 8.588034973352826e-07,
+ "loss": 0.1191,
+ "step": 1510
+ },
+ {
+ "epoch": 11.360902255639097,
+ "grad_norm": 0.27126131607855836,
+ "learning_rate": 8.575733381324833e-07,
+ "loss": 0.1258,
+ "step": 1511
+ },
+ {
+ "epoch": 11.368421052631579,
+ "grad_norm": 0.2558025141503257,
+ "learning_rate": 8.563433988862064e-07,
+ "loss": 0.1247,
+ "step": 1512
+ },
+ {
+ "epoch": 11.37593984962406,
+ "grad_norm": 0.2435530555315264,
+ "learning_rate": 8.551136814959088e-07,
+ "loss": 0.1291,
+ "step": 1513
+ },
+ {
+ "epoch": 11.38345864661654,
+ "grad_norm": 0.25061611455248267,
+ "learning_rate": 8.53884187860703e-07,
+ "loss": 0.1216,
+ "step": 1514
+ },
+ {
+ "epoch": 11.390977443609023,
+ "grad_norm": 0.2413590506775299,
+ "learning_rate": 8.526549198793575e-07,
+ "loss": 0.1225,
+ "step": 1515
+ },
+ {
+ "epoch": 11.398496240601503,
+ "grad_norm": 0.24449249244650345,
+ "learning_rate": 8.514258794502904e-07,
+ "loss": 0.1165,
+ "step": 1516
+ },
+ {
+ "epoch": 11.406015037593985,
+ "grad_norm": 0.25491287609788216,
+ "learning_rate": 8.501970684715708e-07,
+ "loss": 0.1148,
+ "step": 1517
+ },
+ {
+ "epoch": 11.413533834586467,
+ "grad_norm": 0.24466681481746733,
+ "learning_rate": 8.48968488840911e-07,
+ "loss": 0.1231,
+ "step": 1518
+ },
+ {
+ "epoch": 11.421052631578947,
+ "grad_norm": 0.25861177443083155,
+ "learning_rate": 8.47740142455668e-07,
+ "loss": 0.1209,
+ "step": 1519
+ },
+ {
+ "epoch": 11.428571428571429,
+ "grad_norm": 0.24833869308145384,
+ "learning_rate": 8.46512031212837e-07,
+ "loss": 0.1217,
+ "step": 1520
+ },
+ {
+ "epoch": 11.436090225563909,
+ "grad_norm": 0.2391339701718387,
+ "learning_rate": 8.452841570090516e-07,
+ "loss": 0.1203,
+ "step": 1521
+ },
+ {
+ "epoch": 11.443609022556391,
+ "grad_norm": 0.2471723868399335,
+ "learning_rate": 8.440565217405778e-07,
+ "loss": 0.1294,
+ "step": 1522
+ },
+ {
+ "epoch": 11.451127819548873,
+ "grad_norm": 0.24609944492609817,
+ "learning_rate": 8.428291273033138e-07,
+ "loss": 0.1261,
+ "step": 1523
+ },
+ {
+ "epoch": 11.458646616541353,
+ "grad_norm": 0.24887002273475708,
+ "learning_rate": 8.41601975592785e-07,
+ "loss": 0.1193,
+ "step": 1524
+ },
+ {
+ "epoch": 11.466165413533835,
+ "grad_norm": 0.2985131353487837,
+ "learning_rate": 8.40375068504143e-07,
+ "loss": 0.1254,
+ "step": 1525
+ },
+ {
+ "epoch": 11.473684210526315,
+ "grad_norm": 0.2554516930467868,
+ "learning_rate": 8.391484079321601e-07,
+ "loss": 0.1251,
+ "step": 1526
+ },
+ {
+ "epoch": 11.481203007518797,
+ "grad_norm": 0.2619763953237756,
+ "learning_rate": 8.379219957712295e-07,
+ "loss": 0.1191,
+ "step": 1527
+ },
+ {
+ "epoch": 11.488721804511279,
+ "grad_norm": 0.2517119978197511,
+ "learning_rate": 8.366958339153598e-07,
+ "loss": 0.1192,
+ "step": 1528
+ },
+ {
+ "epoch": 11.496240601503759,
+ "grad_norm": 0.2521212430346227,
+ "learning_rate": 8.354699242581728e-07,
+ "loss": 0.1252,
+ "step": 1529
+ },
+ {
+ "epoch": 11.503759398496241,
+ "grad_norm": 0.28306479626315745,
+ "learning_rate": 8.342442686929023e-07,
+ "loss": 0.1323,
+ "step": 1530
+ },
+ {
+ "epoch": 11.511278195488721,
+ "grad_norm": 0.2638786089373067,
+ "learning_rate": 8.330188691123876e-07,
+ "loss": 0.1251,
+ "step": 1531
+ },
+ {
+ "epoch": 11.518796992481203,
+ "grad_norm": 0.24917331249848595,
+ "learning_rate": 8.317937274090746e-07,
+ "loss": 0.1217,
+ "step": 1532
+ },
+ {
+ "epoch": 11.526315789473685,
+ "grad_norm": 0.25944973882919387,
+ "learning_rate": 8.305688454750094e-07,
+ "loss": 0.1177,
+ "step": 1533
+ },
+ {
+ "epoch": 11.533834586466165,
+ "grad_norm": 0.2602323691225412,
+ "learning_rate": 8.293442252018384e-07,
+ "loss": 0.1202,
+ "step": 1534
+ },
+ {
+ "epoch": 11.541353383458647,
+ "grad_norm": 0.259819899672187,
+ "learning_rate": 8.28119868480802e-07,
+ "loss": 0.1224,
+ "step": 1535
+ },
+ {
+ "epoch": 11.548872180451127,
+ "grad_norm": 0.2780412544178925,
+ "learning_rate": 8.26895777202736e-07,
+ "loss": 0.1288,
+ "step": 1536
+ },
+ {
+ "epoch": 11.556390977443609,
+ "grad_norm": 0.25049262004746864,
+ "learning_rate": 8.256719532580638e-07,
+ "loss": 0.1189,
+ "step": 1537
+ },
+ {
+ "epoch": 11.563909774436091,
+ "grad_norm": 0.24417609530768256,
+ "learning_rate": 8.244483985367982e-07,
+ "loss": 0.1257,
+ "step": 1538
+ },
+ {
+ "epoch": 11.571428571428571,
+ "grad_norm": 0.2539928264331365,
+ "learning_rate": 8.232251149285342e-07,
+ "loss": 0.1218,
+ "step": 1539
+ },
+ {
+ "epoch": 11.578947368421053,
+ "grad_norm": 0.25631577788975246,
+ "learning_rate": 8.220021043224499e-07,
+ "loss": 0.1291,
+ "step": 1540
+ },
+ {
+ "epoch": 11.586466165413533,
+ "grad_norm": 0.2595875948922113,
+ "learning_rate": 8.207793686072999e-07,
+ "loss": 0.1134,
+ "step": 1541
+ },
+ {
+ "epoch": 11.593984962406015,
+ "grad_norm": 0.25577143318761275,
+ "learning_rate": 8.195569096714166e-07,
+ "loss": 0.13,
+ "step": 1542
+ },
+ {
+ "epoch": 11.601503759398497,
+ "grad_norm": 0.26748845521618597,
+ "learning_rate": 8.183347294027023e-07,
+ "loss": 0.1198,
+ "step": 1543
+ },
+ {
+ "epoch": 11.609022556390977,
+ "grad_norm": 0.26378537652073025,
+ "learning_rate": 8.171128296886315e-07,
+ "loss": 0.1238,
+ "step": 1544
+ },
+ {
+ "epoch": 11.61654135338346,
+ "grad_norm": 0.2719145205053655,
+ "learning_rate": 8.158912124162433e-07,
+ "loss": 0.1177,
+ "step": 1545
+ },
+ {
+ "epoch": 11.62406015037594,
+ "grad_norm": 0.2469253269994194,
+ "learning_rate": 8.146698794721423e-07,
+ "loss": 0.1288,
+ "step": 1546
+ },
+ {
+ "epoch": 11.631578947368421,
+ "grad_norm": 0.2501088717678514,
+ "learning_rate": 8.134488327424926e-07,
+ "loss": 0.126,
+ "step": 1547
+ },
+ {
+ "epoch": 11.639097744360903,
+ "grad_norm": 0.2515603761782012,
+ "learning_rate": 8.122280741130175e-07,
+ "loss": 0.1241,
+ "step": 1548
+ },
+ {
+ "epoch": 11.646616541353383,
+ "grad_norm": 0.25477987727351303,
+ "learning_rate": 8.110076054689942e-07,
+ "loss": 0.1272,
+ "step": 1549
+ },
+ {
+ "epoch": 11.654135338345865,
+ "grad_norm": 0.2454719795528635,
+ "learning_rate": 8.097874286952533e-07,
+ "loss": 0.1254,
+ "step": 1550
+ },
+ {
+ "epoch": 11.661654135338345,
+ "grad_norm": 0.2672739770431931,
+ "learning_rate": 8.085675456761734e-07,
+ "loss": 0.1245,
+ "step": 1551
+ },
+ {
+ "epoch": 11.669172932330827,
+ "grad_norm": 0.27976286486723945,
+ "learning_rate": 8.073479582956806e-07,
+ "loss": 0.1234,
+ "step": 1552
+ },
+ {
+ "epoch": 11.676691729323307,
+ "grad_norm": 0.28043452609422065,
+ "learning_rate": 8.061286684372434e-07,
+ "loss": 0.1308,
+ "step": 1553
+ },
+ {
+ "epoch": 11.68421052631579,
+ "grad_norm": 0.2517854056137587,
+ "learning_rate": 8.049096779838717e-07,
+ "loss": 0.1221,
+ "step": 1554
+ },
+ {
+ "epoch": 11.691729323308271,
+ "grad_norm": 0.24313920645845932,
+ "learning_rate": 8.036909888181127e-07,
+ "loss": 0.1266,
+ "step": 1555
+ },
+ {
+ "epoch": 11.699248120300751,
+ "grad_norm": 0.25074721666769695,
+ "learning_rate": 8.024726028220474e-07,
+ "loss": 0.1211,
+ "step": 1556
+ },
+ {
+ "epoch": 11.706766917293233,
+ "grad_norm": 0.2994370259847074,
+ "learning_rate": 8.012545218772904e-07,
+ "loss": 0.1305,
+ "step": 1557
+ },
+ {
+ "epoch": 11.714285714285714,
+ "grad_norm": 0.2643698791724764,
+ "learning_rate": 8.000367478649834e-07,
+ "loss": 0.1241,
+ "step": 1558
+ },
+ {
+ "epoch": 11.721804511278195,
+ "grad_norm": 0.24445265940444896,
+ "learning_rate": 7.988192826657958e-07,
+ "loss": 0.1173,
+ "step": 1559
+ },
+ {
+ "epoch": 11.729323308270677,
+ "grad_norm": 0.2777310843314281,
+ "learning_rate": 7.976021281599181e-07,
+ "loss": 0.1195,
+ "step": 1560
+ },
+ {
+ "epoch": 11.736842105263158,
+ "grad_norm": 0.2531585634452584,
+ "learning_rate": 7.963852862270633e-07,
+ "loss": 0.1267,
+ "step": 1561
+ },
+ {
+ "epoch": 11.74436090225564,
+ "grad_norm": 0.2508682851053104,
+ "learning_rate": 7.951687587464593e-07,
+ "loss": 0.131,
+ "step": 1562
+ },
+ {
+ "epoch": 11.75187969924812,
+ "grad_norm": 0.25752076721530126,
+ "learning_rate": 7.939525475968505e-07,
+ "loss": 0.1235,
+ "step": 1563
+ },
+ {
+ "epoch": 11.759398496240602,
+ "grad_norm": 0.24552321217607748,
+ "learning_rate": 7.92736654656491e-07,
+ "loss": 0.1277,
+ "step": 1564
+ },
+ {
+ "epoch": 11.766917293233083,
+ "grad_norm": 0.24640979294665005,
+ "learning_rate": 7.91521081803145e-07,
+ "loss": 0.129,
+ "step": 1565
+ },
+ {
+ "epoch": 11.774436090225564,
+ "grad_norm": 0.24792978367558555,
+ "learning_rate": 7.903058309140808e-07,
+ "loss": 0.1216,
+ "step": 1566
+ },
+ {
+ "epoch": 11.781954887218046,
+ "grad_norm": 0.2693004433486237,
+ "learning_rate": 7.890909038660713e-07,
+ "loss": 0.1301,
+ "step": 1567
+ },
+ {
+ "epoch": 11.789473684210526,
+ "grad_norm": 0.2789710426696441,
+ "learning_rate": 7.878763025353874e-07,
+ "loss": 0.1276,
+ "step": 1568
+ },
+ {
+ "epoch": 11.796992481203008,
+ "grad_norm": 0.24518544600045147,
+ "learning_rate": 7.866620287977987e-07,
+ "loss": 0.1315,
+ "step": 1569
+ },
+ {
+ "epoch": 11.80451127819549,
+ "grad_norm": 0.24397032839008828,
+ "learning_rate": 7.854480845285672e-07,
+ "loss": 0.1233,
+ "step": 1570
+ },
+ {
+ "epoch": 11.81203007518797,
+ "grad_norm": 0.2839076209268749,
+ "learning_rate": 7.842344716024477e-07,
+ "loss": 0.1214,
+ "step": 1571
+ },
+ {
+ "epoch": 11.819548872180452,
+ "grad_norm": 0.26297268397504503,
+ "learning_rate": 7.830211918936819e-07,
+ "loss": 0.1278,
+ "step": 1572
+ },
+ {
+ "epoch": 11.827067669172932,
+ "grad_norm": 0.24553754487683627,
+ "learning_rate": 7.818082472759983e-07,
+ "loss": 0.1194,
+ "step": 1573
+ },
+ {
+ "epoch": 11.834586466165414,
+ "grad_norm": 0.2543841137401341,
+ "learning_rate": 7.805956396226062e-07,
+ "loss": 0.1309,
+ "step": 1574
+ },
+ {
+ "epoch": 11.842105263157894,
+ "grad_norm": 0.2585028909230306,
+ "learning_rate": 7.793833708061965e-07,
+ "loss": 0.1218,
+ "step": 1575
+ },
+ {
+ "epoch": 11.849624060150376,
+ "grad_norm": 1.0727909939337046,
+ "learning_rate": 7.781714426989345e-07,
+ "loss": 0.1277,
+ "step": 1576
+ },
+ {
+ "epoch": 11.857142857142858,
+ "grad_norm": 0.27685205301373544,
+ "learning_rate": 7.769598571724619e-07,
+ "loss": 0.13,
+ "step": 1577
+ },
+ {
+ "epoch": 11.864661654135338,
+ "grad_norm": 0.2544807835704898,
+ "learning_rate": 7.75748616097889e-07,
+ "loss": 0.1234,
+ "step": 1578
+ },
+ {
+ "epoch": 11.87218045112782,
+ "grad_norm": 0.24904396661162334,
+ "learning_rate": 7.74537721345796e-07,
+ "loss": 0.1302,
+ "step": 1579
+ },
+ {
+ "epoch": 11.8796992481203,
+ "grad_norm": 0.2677498073820257,
+ "learning_rate": 7.733271747862264e-07,
+ "loss": 0.1282,
+ "step": 1580
+ },
+ {
+ "epoch": 11.887218045112782,
+ "grad_norm": 0.26154429653531996,
+ "learning_rate": 7.72116978288688e-07,
+ "loss": 0.1173,
+ "step": 1581
+ },
+ {
+ "epoch": 11.894736842105264,
+ "grad_norm": 0.2528397490922901,
+ "learning_rate": 7.709071337221468e-07,
+ "loss": 0.1132,
+ "step": 1582
+ },
+ {
+ "epoch": 11.902255639097744,
+ "grad_norm": 0.251489812154206,
+ "learning_rate": 7.696976429550247e-07,
+ "loss": 0.1208,
+ "step": 1583
+ },
+ {
+ "epoch": 11.909774436090226,
+ "grad_norm": 0.25703747416852707,
+ "learning_rate": 7.68488507855199e-07,
+ "loss": 0.1201,
+ "step": 1584
+ },
+ {
+ "epoch": 11.917293233082706,
+ "grad_norm": 0.2535641822413463,
+ "learning_rate": 7.672797302899958e-07,
+ "loss": 0.1184,
+ "step": 1585
+ },
+ {
+ "epoch": 11.924812030075188,
+ "grad_norm": 0.2806463594016138,
+ "learning_rate": 7.660713121261909e-07,
+ "loss": 0.1179,
+ "step": 1586
+ },
+ {
+ "epoch": 11.93233082706767,
+ "grad_norm": 0.27347293738040257,
+ "learning_rate": 7.648632552300032e-07,
+ "loss": 0.1255,
+ "step": 1587
+ },
+ {
+ "epoch": 11.93984962406015,
+ "grad_norm": 0.25609219434427527,
+ "learning_rate": 7.636555614670952e-07,
+ "loss": 0.1304,
+ "step": 1588
+ },
+ {
+ "epoch": 11.947368421052632,
+ "grad_norm": 0.24850582950400127,
+ "learning_rate": 7.624482327025674e-07,
+ "loss": 0.1261,
+ "step": 1589
+ },
+ {
+ "epoch": 11.954887218045112,
+ "grad_norm": 0.25480537069784726,
+ "learning_rate": 7.612412708009582e-07,
+ "loss": 0.1261,
+ "step": 1590
+ },
+ {
+ "epoch": 11.962406015037594,
+ "grad_norm": 0.2631255504213761,
+ "learning_rate": 7.600346776262371e-07,
+ "loss": 0.1294,
+ "step": 1591
+ },
+ {
+ "epoch": 11.969924812030076,
+ "grad_norm": 0.2634468654944961,
+ "learning_rate": 7.588284550418067e-07,
+ "loss": 0.1173,
+ "step": 1592
+ },
+ {
+ "epoch": 11.977443609022556,
+ "grad_norm": 0.2510796661152114,
+ "learning_rate": 7.57622604910495e-07,
+ "loss": 0.1241,
+ "step": 1593
+ },
+ {
+ "epoch": 11.984962406015038,
+ "grad_norm": 0.2516807070131344,
+ "learning_rate": 7.56417129094557e-07,
+ "loss": 0.1156,
+ "step": 1594
+ },
+ {
+ "epoch": 11.992481203007518,
+ "grad_norm": 0.2877080889654345,
+ "learning_rate": 7.552120294556674e-07,
+ "loss": 0.1271,
+ "step": 1595
+ },
+ {
+ "epoch": 12.0,
+ "grad_norm": 0.34746537958280077,
+ "learning_rate": 7.54007307854922e-07,
+ "loss": 0.1237,
+ "step": 1596
+ },
+ {
+ "epoch": 12.0,
+ "eval_loss": 0.2973707914352417,
+ "eval_runtime": 36.2167,
+ "eval_samples_per_second": 12.342,
+ "eval_steps_per_second": 0.193,
+ "step": 1596
+ },
+ {
+ "epoch": 12.007518796992482,
+ "grad_norm": 0.32735909416294673,
+ "learning_rate": 7.52802966152831e-07,
+ "loss": 0.1101,
+ "step": 1597
+ },
+ {
+ "epoch": 12.015037593984962,
+ "grad_norm": 0.23641899238294323,
+ "learning_rate": 7.515990062093194e-07,
+ "loss": 0.1212,
+ "step": 1598
+ },
+ {
+ "epoch": 12.022556390977444,
+ "grad_norm": 0.26328992353634867,
+ "learning_rate": 7.503954298837214e-07,
+ "loss": 0.1187,
+ "step": 1599
+ },
+ {
+ "epoch": 12.030075187969924,
+ "grad_norm": 0.28526555357620137,
+ "learning_rate": 7.4919223903478e-07,
+ "loss": 0.1118,
+ "step": 1600
+ },
+ {
+ "epoch": 12.037593984962406,
+ "grad_norm": 0.26850766086212335,
+ "learning_rate": 7.479894355206413e-07,
+ "loss": 0.1115,
+ "step": 1601
+ },
+ {
+ "epoch": 12.045112781954888,
+ "grad_norm": 0.26120095520516146,
+ "learning_rate": 7.46787021198855e-07,
+ "loss": 0.1136,
+ "step": 1602
+ },
+ {
+ "epoch": 12.052631578947368,
+ "grad_norm": 0.2644474832754939,
+ "learning_rate": 7.455849979263682e-07,
+ "loss": 0.1215,
+ "step": 1603
+ },
+ {
+ "epoch": 12.06015037593985,
+ "grad_norm": 0.54247964104322,
+ "learning_rate": 7.443833675595253e-07,
+ "loss": 0.1146,
+ "step": 1604
+ },
+ {
+ "epoch": 12.06766917293233,
+ "grad_norm": 0.2720436615292139,
+ "learning_rate": 7.431821319540629e-07,
+ "loss": 0.1117,
+ "step": 1605
+ },
+ {
+ "epoch": 12.075187969924812,
+ "grad_norm": 0.27935120000531366,
+ "learning_rate": 7.419812929651091e-07,
+ "loss": 0.1061,
+ "step": 1606
+ },
+ {
+ "epoch": 12.082706766917294,
+ "grad_norm": 0.2912415044910777,
+ "learning_rate": 7.407808524471781e-07,
+ "loss": 0.1154,
+ "step": 1607
+ },
+ {
+ "epoch": 12.090225563909774,
+ "grad_norm": 0.24401610261949336,
+ "learning_rate": 7.395808122541695e-07,
+ "loss": 0.1173,
+ "step": 1608
+ },
+ {
+ "epoch": 12.097744360902256,
+ "grad_norm": 0.28616914007007965,
+ "learning_rate": 7.383811742393653e-07,
+ "loss": 0.1144,
+ "step": 1609
+ },
+ {
+ "epoch": 12.105263157894736,
+ "grad_norm": 0.2762174708990091,
+ "learning_rate": 7.371819402554247e-07,
+ "loss": 0.119,
+ "step": 1610
+ },
+ {
+ "epoch": 12.112781954887218,
+ "grad_norm": 0.25078636903671864,
+ "learning_rate": 7.35983112154385e-07,
+ "loss": 0.1151,
+ "step": 1611
+ },
+ {
+ "epoch": 12.1203007518797,
+ "grad_norm": 0.2718096563853234,
+ "learning_rate": 7.347846917876543e-07,
+ "loss": 0.1233,
+ "step": 1612
+ },
+ {
+ "epoch": 12.12781954887218,
+ "grad_norm": 0.2790226524283502,
+ "learning_rate": 7.335866810060139e-07,
+ "loss": 0.1166,
+ "step": 1613
+ },
+ {
+ "epoch": 12.135338345864662,
+ "grad_norm": 0.2637631817245403,
+ "learning_rate": 7.323890816596093e-07,
+ "loss": 0.114,
+ "step": 1614
+ },
+ {
+ "epoch": 12.142857142857142,
+ "grad_norm": 0.2573826152162909,
+ "learning_rate": 7.311918955979537e-07,
+ "loss": 0.1199,
+ "step": 1615
+ },
+ {
+ "epoch": 12.150375939849624,
+ "grad_norm": 0.2606908133808131,
+ "learning_rate": 7.299951246699196e-07,
+ "loss": 0.1098,
+ "step": 1616
+ },
+ {
+ "epoch": 12.157894736842104,
+ "grad_norm": 0.26218756468641585,
+ "learning_rate": 7.287987707237402e-07,
+ "loss": 0.1111,
+ "step": 1617
+ },
+ {
+ "epoch": 12.165413533834586,
+ "grad_norm": 0.24263613834830378,
+ "learning_rate": 7.276028356070032e-07,
+ "loss": 0.1119,
+ "step": 1618
+ },
+ {
+ "epoch": 12.172932330827068,
+ "grad_norm": 0.27474337410093636,
+ "learning_rate": 7.264073211666509e-07,
+ "loss": 0.1147,
+ "step": 1619
+ },
+ {
+ "epoch": 12.180451127819548,
+ "grad_norm": 0.2915916914592556,
+ "learning_rate": 7.252122292489746e-07,
+ "loss": 0.1192,
+ "step": 1620
+ },
+ {
+ "epoch": 12.18796992481203,
+ "grad_norm": 0.27413036510518934,
+ "learning_rate": 7.240175616996146e-07,
+ "loss": 0.1089,
+ "step": 1621
+ },
+ {
+ "epoch": 12.19548872180451,
+ "grad_norm": 0.2659922028834579,
+ "learning_rate": 7.228233203635538e-07,
+ "loss": 0.1293,
+ "step": 1622
+ },
+ {
+ "epoch": 12.203007518796992,
+ "grad_norm": 0.2716107979307241,
+ "learning_rate": 7.216295070851193e-07,
+ "loss": 0.1087,
+ "step": 1623
+ },
+ {
+ "epoch": 12.210526315789474,
+ "grad_norm": 0.2418315560974555,
+ "learning_rate": 7.204361237079746e-07,
+ "loss": 0.1102,
+ "step": 1624
+ },
+ {
+ "epoch": 12.218045112781954,
+ "grad_norm": 0.27904651557380716,
+ "learning_rate": 7.192431720751217e-07,
+ "loss": 0.1151,
+ "step": 1625
+ },
+ {
+ "epoch": 12.225563909774436,
+ "grad_norm": 0.27546305289370204,
+ "learning_rate": 7.180506540288938e-07,
+ "loss": 0.114,
+ "step": 1626
+ },
+ {
+ "epoch": 12.233082706766917,
+ "grad_norm": 0.2609049764841966,
+ "learning_rate": 7.168585714109561e-07,
+ "loss": 0.1034,
+ "step": 1627
+ },
+ {
+ "epoch": 12.240601503759398,
+ "grad_norm": 0.2651966627179685,
+ "learning_rate": 7.156669260622997e-07,
+ "loss": 0.1145,
+ "step": 1628
+ },
+ {
+ "epoch": 12.24812030075188,
+ "grad_norm": 0.27221176869779695,
+ "learning_rate": 7.144757198232422e-07,
+ "loss": 0.1132,
+ "step": 1629
+ },
+ {
+ "epoch": 12.25563909774436,
+ "grad_norm": 0.25629156362646216,
+ "learning_rate": 7.132849545334217e-07,
+ "loss": 0.1159,
+ "step": 1630
+ },
+ {
+ "epoch": 12.263157894736842,
+ "grad_norm": 0.2654653301312955,
+ "learning_rate": 7.120946320317963e-07,
+ "loss": 0.1171,
+ "step": 1631
+ },
+ {
+ "epoch": 12.270676691729323,
+ "grad_norm": 0.286977917483048,
+ "learning_rate": 7.109047541566391e-07,
+ "loss": 0.1123,
+ "step": 1632
+ },
+ {
+ "epoch": 12.278195488721805,
+ "grad_norm": 0.2651441047683774,
+ "learning_rate": 7.097153227455378e-07,
+ "loss": 0.1083,
+ "step": 1633
+ },
+ {
+ "epoch": 12.285714285714286,
+ "grad_norm": 0.26534515237668294,
+ "learning_rate": 7.085263396353895e-07,
+ "loss": 0.1134,
+ "step": 1634
+ },
+ {
+ "epoch": 12.293233082706767,
+ "grad_norm": 0.2665026019940054,
+ "learning_rate": 7.073378066623998e-07,
+ "loss": 0.1075,
+ "step": 1635
+ },
+ {
+ "epoch": 12.300751879699249,
+ "grad_norm": 0.25567133610477877,
+ "learning_rate": 7.061497256620792e-07,
+ "loss": 0.116,
+ "step": 1636
+ },
+ {
+ "epoch": 12.308270676691729,
+ "grad_norm": 0.2554736855527389,
+ "learning_rate": 7.049620984692391e-07,
+ "loss": 0.1107,
+ "step": 1637
+ },
+ {
+ "epoch": 12.31578947368421,
+ "grad_norm": 0.24251956427227325,
+ "learning_rate": 7.037749269179914e-07,
+ "loss": 0.1162,
+ "step": 1638
+ },
+ {
+ "epoch": 12.323308270676693,
+ "grad_norm": 0.2654035717905756,
+ "learning_rate": 7.02588212841743e-07,
+ "loss": 0.1169,
+ "step": 1639
+ },
+ {
+ "epoch": 12.330827067669173,
+ "grad_norm": 0.26324900816286784,
+ "learning_rate": 7.014019580731959e-07,
+ "loss": 0.1167,
+ "step": 1640
+ },
+ {
+ "epoch": 12.338345864661655,
+ "grad_norm": 0.27383712779548364,
+ "learning_rate": 7.002161644443411e-07,
+ "loss": 0.1063,
+ "step": 1641
+ },
+ {
+ "epoch": 12.345864661654135,
+ "grad_norm": 0.2709252981383822,
+ "learning_rate": 6.990308337864589e-07,
+ "loss": 0.1228,
+ "step": 1642
+ },
+ {
+ "epoch": 12.353383458646617,
+ "grad_norm": 0.2724531330797684,
+ "learning_rate": 6.978459679301132e-07,
+ "loss": 0.1163,
+ "step": 1643
+ },
+ {
+ "epoch": 12.360902255639097,
+ "grad_norm": 0.2841193406800999,
+ "learning_rate": 6.966615687051516e-07,
+ "loss": 0.1111,
+ "step": 1644
+ },
+ {
+ "epoch": 12.368421052631579,
+ "grad_norm": 0.2740018750008192,
+ "learning_rate": 6.954776379406995e-07,
+ "loss": 0.1103,
+ "step": 1645
+ },
+ {
+ "epoch": 12.37593984962406,
+ "grad_norm": 0.28146573310078654,
+ "learning_rate": 6.942941774651605e-07,
+ "loss": 0.1171,
+ "step": 1646
+ },
+ {
+ "epoch": 12.38345864661654,
+ "grad_norm": 0.2624212777408767,
+ "learning_rate": 6.9311118910621e-07,
+ "loss": 0.1202,
+ "step": 1647
+ },
+ {
+ "epoch": 12.390977443609023,
+ "grad_norm": 0.25863456858358164,
+ "learning_rate": 6.919286746907962e-07,
+ "loss": 0.1127,
+ "step": 1648
+ },
+ {
+ "epoch": 12.398496240601503,
+ "grad_norm": 0.2632909659533151,
+ "learning_rate": 6.907466360451337e-07,
+ "loss": 0.1143,
+ "step": 1649
+ },
+ {
+ "epoch": 12.406015037593985,
+ "grad_norm": 0.2602436261315313,
+ "learning_rate": 6.895650749947041e-07,
+ "loss": 0.1136,
+ "step": 1650
+ },
+ {
+ "epoch": 12.413533834586467,
+ "grad_norm": 0.2510527804938189,
+ "learning_rate": 6.883839933642493e-07,
+ "loss": 0.1189,
+ "step": 1651
+ },
+ {
+ "epoch": 12.421052631578947,
+ "grad_norm": 0.26374688271400776,
+ "learning_rate": 6.87203392977773e-07,
+ "loss": 0.1089,
+ "step": 1652
+ },
+ {
+ "epoch": 12.428571428571429,
+ "grad_norm": 0.32013403296645243,
+ "learning_rate": 6.860232756585336e-07,
+ "loss": 0.1086,
+ "step": 1653
+ },
+ {
+ "epoch": 12.436090225563909,
+ "grad_norm": 0.258073159497804,
+ "learning_rate": 6.848436432290456e-07,
+ "loss": 0.1079,
+ "step": 1654
+ },
+ {
+ "epoch": 12.443609022556391,
+ "grad_norm": 0.2710320854422078,
+ "learning_rate": 6.836644975110726e-07,
+ "loss": 0.1307,
+ "step": 1655
+ },
+ {
+ "epoch": 12.451127819548873,
+ "grad_norm": 0.2684669526207422,
+ "learning_rate": 6.824858403256283e-07,
+ "loss": 0.1178,
+ "step": 1656
+ },
+ {
+ "epoch": 12.458646616541353,
+ "grad_norm": 0.25543706011618217,
+ "learning_rate": 6.813076734929703e-07,
+ "loss": 0.1136,
+ "step": 1657
+ },
+ {
+ "epoch": 12.466165413533835,
+ "grad_norm": 0.25843827999463387,
+ "learning_rate": 6.80129998832601e-07,
+ "loss": 0.1126,
+ "step": 1658
+ },
+ {
+ "epoch": 12.473684210526315,
+ "grad_norm": 0.29080846504934615,
+ "learning_rate": 6.789528181632603e-07,
+ "loss": 0.1106,
+ "step": 1659
+ },
+ {
+ "epoch": 12.481203007518797,
+ "grad_norm": 0.26212424507369037,
+ "learning_rate": 6.777761333029274e-07,
+ "loss": 0.1143,
+ "step": 1660
+ },
+ {
+ "epoch": 12.488721804511279,
+ "grad_norm": 0.26036401270482273,
+ "learning_rate": 6.765999460688144e-07,
+ "loss": 0.1108,
+ "step": 1661
+ },
+ {
+ "epoch": 12.496240601503759,
+ "grad_norm": 0.30172191039738755,
+ "learning_rate": 6.754242582773645e-07,
+ "loss": 0.1223,
+ "step": 1662
+ },
+ {
+ "epoch": 12.503759398496241,
+ "grad_norm": 0.27409192235488905,
+ "learning_rate": 6.742490717442517e-07,
+ "loss": 0.1076,
+ "step": 1663
+ },
+ {
+ "epoch": 12.511278195488721,
+ "grad_norm": 0.2612979759840921,
+ "learning_rate": 6.730743882843734e-07,
+ "loss": 0.1078,
+ "step": 1664
+ },
+ {
+ "epoch": 12.518796992481203,
+ "grad_norm": 0.2531045414953186,
+ "learning_rate": 6.719002097118523e-07,
+ "loss": 0.114,
+ "step": 1665
+ },
+ {
+ "epoch": 12.526315789473685,
+ "grad_norm": 0.3219722873834596,
+ "learning_rate": 6.707265378400295e-07,
+ "loss": 0.1158,
+ "step": 1666
+ },
+ {
+ "epoch": 12.533834586466165,
+ "grad_norm": 0.28432548011779885,
+ "learning_rate": 6.69553374481465e-07,
+ "loss": 0.1093,
+ "step": 1667
+ },
+ {
+ "epoch": 12.541353383458647,
+ "grad_norm": 0.2637194284682025,
+ "learning_rate": 6.683807214479323e-07,
+ "loss": 0.117,
+ "step": 1668
+ },
+ {
+ "epoch": 12.548872180451127,
+ "grad_norm": 0.26715970820178875,
+ "learning_rate": 6.672085805504178e-07,
+ "loss": 0.116,
+ "step": 1669
+ },
+ {
+ "epoch": 12.556390977443609,
+ "grad_norm": 0.2714714050445944,
+ "learning_rate": 6.660369535991162e-07,
+ "loss": 0.1166,
+ "step": 1670
+ },
+ {
+ "epoch": 12.563909774436091,
+ "grad_norm": 0.27848824750148055,
+ "learning_rate": 6.648658424034292e-07,
+ "loss": 0.1179,
+ "step": 1671
+ },
+ {
+ "epoch": 12.571428571428571,
+ "grad_norm": 0.27557787861088495,
+ "learning_rate": 6.636952487719612e-07,
+ "loss": 0.1122,
+ "step": 1672
+ },
+ {
+ "epoch": 12.578947368421053,
+ "grad_norm": 0.3067665362209671,
+ "learning_rate": 6.625251745125182e-07,
+ "loss": 0.1196,
+ "step": 1673
+ },
+ {
+ "epoch": 12.586466165413533,
+ "grad_norm": 0.26385384288614927,
+ "learning_rate": 6.61355621432103e-07,
+ "loss": 0.1104,
+ "step": 1674
+ },
+ {
+ "epoch": 12.593984962406015,
+ "grad_norm": 0.46896924426215386,
+ "learning_rate": 6.601865913369149e-07,
+ "loss": 0.12,
+ "step": 1675
+ },
+ {
+ "epoch": 12.601503759398497,
+ "grad_norm": 0.25595454238746534,
+ "learning_rate": 6.590180860323439e-07,
+ "loss": 0.1068,
+ "step": 1676
+ },
+ {
+ "epoch": 12.609022556390977,
+ "grad_norm": 0.27307865342882987,
+ "learning_rate": 6.578501073229713e-07,
+ "loss": 0.1208,
+ "step": 1677
+ },
+ {
+ "epoch": 12.61654135338346,
+ "grad_norm": 0.25703201153367367,
+ "learning_rate": 6.566826570125634e-07,
+ "loss": 0.1098,
+ "step": 1678
+ },
+ {
+ "epoch": 12.62406015037594,
+ "grad_norm": 0.27489584062413275,
+ "learning_rate": 6.555157369040723e-07,
+ "loss": 0.1121,
+ "step": 1679
+ },
+ {
+ "epoch": 12.631578947368421,
+ "grad_norm": 0.3130623984111721,
+ "learning_rate": 6.543493487996292e-07,
+ "loss": 0.105,
+ "step": 1680
+ },
+ {
+ "epoch": 12.639097744360903,
+ "grad_norm": 0.30400411608251554,
+ "learning_rate": 6.531834945005459e-07,
+ "loss": 0.1217,
+ "step": 1681
+ },
+ {
+ "epoch": 12.646616541353383,
+ "grad_norm": 0.2907629928853798,
+ "learning_rate": 6.520181758073078e-07,
+ "loss": 0.121,
+ "step": 1682
+ },
+ {
+ "epoch": 12.654135338345865,
+ "grad_norm": 0.26494739239457715,
+ "learning_rate": 6.508533945195749e-07,
+ "loss": 0.1151,
+ "step": 1683
+ },
+ {
+ "epoch": 12.661654135338345,
+ "grad_norm": 0.26400500225022194,
+ "learning_rate": 6.496891524361756e-07,
+ "loss": 0.1178,
+ "step": 1684
+ },
+ {
+ "epoch": 12.669172932330827,
+ "grad_norm": 0.41499611666010905,
+ "learning_rate": 6.485254513551072e-07,
+ "loss": 0.1141,
+ "step": 1685
+ },
+ {
+ "epoch": 12.676691729323307,
+ "grad_norm": 0.26994564045471475,
+ "learning_rate": 6.473622930735303e-07,
+ "loss": 0.1142,
+ "step": 1686
+ },
+ {
+ "epoch": 12.68421052631579,
+ "grad_norm": 0.2783416866870226,
+ "learning_rate": 6.461996793877673e-07,
+ "loss": 0.1117,
+ "step": 1687
+ },
+ {
+ "epoch": 12.691729323308271,
+ "grad_norm": 0.2636632609025374,
+ "learning_rate": 6.450376120933008e-07,
+ "loss": 0.116,
+ "step": 1688
+ },
+ {
+ "epoch": 12.699248120300751,
+ "grad_norm": 0.3093833633449611,
+ "learning_rate": 6.438760929847678e-07,
+ "loss": 0.1152,
+ "step": 1689
+ },
+ {
+ "epoch": 12.706766917293233,
+ "grad_norm": 0.3185323223560314,
+ "learning_rate": 6.427151238559602e-07,
+ "loss": 0.1155,
+ "step": 1690
+ },
+ {
+ "epoch": 12.714285714285714,
+ "grad_norm": 0.29328047336248453,
+ "learning_rate": 6.415547064998193e-07,
+ "loss": 0.119,
+ "step": 1691
+ },
+ {
+ "epoch": 12.721804511278195,
+ "grad_norm": 0.26563011170469175,
+ "learning_rate": 6.403948427084356e-07,
+ "loss": 0.1183,
+ "step": 1692
+ },
+ {
+ "epoch": 12.729323308270677,
+ "grad_norm": 0.255246985311997,
+ "learning_rate": 6.392355342730431e-07,
+ "loss": 0.112,
+ "step": 1693
+ },
+ {
+ "epoch": 12.736842105263158,
+ "grad_norm": 0.3016965410820718,
+ "learning_rate": 6.380767829840201e-07,
+ "loss": 0.1087,
+ "step": 1694
+ },
+ {
+ "epoch": 12.74436090225564,
+ "grad_norm": 0.27093830181197875,
+ "learning_rate": 6.369185906308825e-07,
+ "loss": 0.1191,
+ "step": 1695
+ },
+ {
+ "epoch": 12.75187969924812,
+ "grad_norm": 0.2596010113119012,
+ "learning_rate": 6.357609590022847e-07,
+ "loss": 0.1078,
+ "step": 1696
+ },
+ {
+ "epoch": 12.759398496240602,
+ "grad_norm": 0.25389809644378,
+ "learning_rate": 6.346038898860136e-07,
+ "loss": 0.1107,
+ "step": 1697
+ },
+ {
+ "epoch": 12.766917293233083,
+ "grad_norm": 0.26144977403769304,
+ "learning_rate": 6.334473850689888e-07,
+ "loss": 0.1126,
+ "step": 1698
+ },
+ {
+ "epoch": 12.774436090225564,
+ "grad_norm": 0.254562224458361,
+ "learning_rate": 6.322914463372569e-07,
+ "loss": 0.1179,
+ "step": 1699
+ },
+ {
+ "epoch": 12.781954887218046,
+ "grad_norm": 0.250850649210709,
+ "learning_rate": 6.311360754759923e-07,
+ "loss": 0.1172,
+ "step": 1700
+ },
+ {
+ "epoch": 12.789473684210526,
+ "grad_norm": 0.25882451231676146,
+ "learning_rate": 6.299812742694901e-07,
+ "loss": 0.115,
+ "step": 1701
+ },
+ {
+ "epoch": 12.796992481203008,
+ "grad_norm": 0.26509725310499604,
+ "learning_rate": 6.288270445011677e-07,
+ "loss": 0.1055,
+ "step": 1702
+ },
+ {
+ "epoch": 12.80451127819549,
+ "grad_norm": 0.24568284596677017,
+ "learning_rate": 6.276733879535583e-07,
+ "loss": 0.1028,
+ "step": 1703
+ },
+ {
+ "epoch": 12.81203007518797,
+ "grad_norm": 0.2626476922336455,
+ "learning_rate": 6.265203064083115e-07,
+ "loss": 0.1152,
+ "step": 1704
+ },
+ {
+ "epoch": 12.819548872180452,
+ "grad_norm": 0.26345394042850007,
+ "learning_rate": 6.253678016461872e-07,
+ "loss": 0.1104,
+ "step": 1705
+ },
+ {
+ "epoch": 12.827067669172932,
+ "grad_norm": 0.24848666762607066,
+ "learning_rate": 6.242158754470561e-07,
+ "loss": 0.1012,
+ "step": 1706
+ },
+ {
+ "epoch": 12.834586466165414,
+ "grad_norm": 0.262657958249588,
+ "learning_rate": 6.23064529589894e-07,
+ "loss": 0.1124,
+ "step": 1707
+ },
+ {
+ "epoch": 12.842105263157894,
+ "grad_norm": 0.27380994274544074,
+ "learning_rate": 6.219137658527817e-07,
+ "loss": 0.1225,
+ "step": 1708
+ },
+ {
+ "epoch": 12.849624060150376,
+ "grad_norm": 0.2726734587390962,
+ "learning_rate": 6.207635860129001e-07,
+ "loss": 0.1132,
+ "step": 1709
+ },
+ {
+ "epoch": 12.857142857142858,
+ "grad_norm": 0.26809192234713913,
+ "learning_rate": 6.196139918465291e-07,
+ "loss": 0.1174,
+ "step": 1710
+ },
+ {
+ "epoch": 12.864661654135338,
+ "grad_norm": 0.26054345994167327,
+ "learning_rate": 6.184649851290428e-07,
+ "loss": 0.1064,
+ "step": 1711
+ },
+ {
+ "epoch": 12.87218045112782,
+ "grad_norm": 0.2486235912786445,
+ "learning_rate": 6.173165676349102e-07,
+ "loss": 0.1184,
+ "step": 1712
+ },
+ {
+ "epoch": 12.8796992481203,
+ "grad_norm": 0.2638631713099812,
+ "learning_rate": 6.161687411376886e-07,
+ "loss": 0.108,
+ "step": 1713
+ },
+ {
+ "epoch": 12.887218045112782,
+ "grad_norm": 0.2653623497524921,
+ "learning_rate": 6.150215074100224e-07,
+ "loss": 0.1194,
+ "step": 1714
+ },
+ {
+ "epoch": 12.894736842105264,
+ "grad_norm": 0.28091900615073206,
+ "learning_rate": 6.138748682236423e-07,
+ "loss": 0.116,
+ "step": 1715
+ },
+ {
+ "epoch": 12.902255639097744,
+ "grad_norm": 0.28639258522118305,
+ "learning_rate": 6.12728825349359e-07,
+ "loss": 0.1114,
+ "step": 1716
+ },
+ {
+ "epoch": 12.909774436090226,
+ "grad_norm": 0.2538409853105954,
+ "learning_rate": 6.115833805570638e-07,
+ "loss": 0.1141,
+ "step": 1717
+ },
+ {
+ "epoch": 12.917293233082706,
+ "grad_norm": 0.2920985308973281,
+ "learning_rate": 6.104385356157229e-07,
+ "loss": 0.1151,
+ "step": 1718
+ },
+ {
+ "epoch": 12.924812030075188,
+ "grad_norm": 0.2555263356638123,
+ "learning_rate": 6.092942922933775e-07,
+ "loss": 0.1201,
+ "step": 1719
+ },
+ {
+ "epoch": 12.93233082706767,
+ "grad_norm": 0.2890256624177854,
+ "learning_rate": 6.081506523571384e-07,
+ "loss": 0.1139,
+ "step": 1720
+ },
+ {
+ "epoch": 12.93984962406015,
+ "grad_norm": 0.2896454414890025,
+ "learning_rate": 6.070076175731859e-07,
+ "loss": 0.1172,
+ "step": 1721
+ },
+ {
+ "epoch": 12.947368421052632,
+ "grad_norm": 0.26493107679101097,
+ "learning_rate": 6.058651897067641e-07,
+ "loss": 0.1104,
+ "step": 1722
+ },
+ {
+ "epoch": 12.954887218045112,
+ "grad_norm": 0.24868994354483953,
+ "learning_rate": 6.047233705221819e-07,
+ "loss": 0.1142,
+ "step": 1723
+ },
+ {
+ "epoch": 12.962406015037594,
+ "grad_norm": 0.2586589965339756,
+ "learning_rate": 6.035821617828059e-07,
+ "loss": 0.1181,
+ "step": 1724
+ },
+ {
+ "epoch": 12.969924812030076,
+ "grad_norm": 0.2899711222526131,
+ "learning_rate": 6.024415652510622e-07,
+ "loss": 0.1202,
+ "step": 1725
+ },
+ {
+ "epoch": 12.977443609022556,
+ "grad_norm": 0.2601909678778353,
+ "learning_rate": 6.01301582688429e-07,
+ "loss": 0.1061,
+ "step": 1726
+ },
+ {
+ "epoch": 12.984962406015038,
+ "grad_norm": 0.2581591429071276,
+ "learning_rate": 6.001622158554388e-07,
+ "loss": 0.1194,
+ "step": 1727
+ },
+ {
+ "epoch": 12.992481203007518,
+ "grad_norm": 0.27062270654426634,
+ "learning_rate": 5.990234665116712e-07,
+ "loss": 0.0996,
+ "step": 1728
+ },
+ {
+ "epoch": 13.0,
+ "grad_norm": 0.2727893650998764,
+ "learning_rate": 5.978853364157538e-07,
+ "loss": 0.1062,
+ "step": 1729
+ },
+ {
+ "epoch": 13.0,
+ "eval_loss": 0.3118974268436432,
+ "eval_runtime": 36.0618,
+ "eval_samples_per_second": 12.395,
+ "eval_steps_per_second": 0.194,
+ "step": 1729
+ },
+ {
+ "epoch": 13.007518796992482,
+ "grad_norm": 0.3168392528104374,
+ "learning_rate": 5.967478273253562e-07,
+ "loss": 0.1029,
+ "step": 1730
+ },
+ {
+ "epoch": 13.015037593984962,
+ "grad_norm": 0.23814189911502404,
+ "learning_rate": 5.956109409971907e-07,
+ "loss": 0.1155,
+ "step": 1731
+ },
+ {
+ "epoch": 13.022556390977444,
+ "grad_norm": 0.26092899852340334,
+ "learning_rate": 5.944746791870061e-07,
+ "loss": 0.104,
+ "step": 1732
+ },
+ {
+ "epoch": 13.030075187969924,
+ "grad_norm": 0.271920732333518,
+ "learning_rate": 5.933390436495885e-07,
+ "loss": 0.1013,
+ "step": 1733
+ },
+ {
+ "epoch": 13.037593984962406,
+ "grad_norm": 0.2787257641244149,
+ "learning_rate": 5.92204036138755e-07,
+ "loss": 0.1055,
+ "step": 1734
+ },
+ {
+ "epoch": 13.045112781954888,
+ "grad_norm": 0.2551587140856634,
+ "learning_rate": 5.910696584073544e-07,
+ "loss": 0.1005,
+ "step": 1735
+ },
+ {
+ "epoch": 13.052631578947368,
+ "grad_norm": 0.27205858791644366,
+ "learning_rate": 5.899359122072617e-07,
+ "loss": 0.0993,
+ "step": 1736
+ },
+ {
+ "epoch": 13.06015037593985,
+ "grad_norm": 0.2598641847560507,
+ "learning_rate": 5.888027992893779e-07,
+ "loss": 0.1096,
+ "step": 1737
+ },
+ {
+ "epoch": 13.06766917293233,
+ "grad_norm": 0.2527752832386274,
+ "learning_rate": 5.87670321403624e-07,
+ "loss": 0.1018,
+ "step": 1738
+ },
+ {
+ "epoch": 13.075187969924812,
+ "grad_norm": 0.2729434861002557,
+ "learning_rate": 5.865384802989424e-07,
+ "loss": 0.103,
+ "step": 1739
+ },
+ {
+ "epoch": 13.082706766917294,
+ "grad_norm": 0.2843099697710811,
+ "learning_rate": 5.854072777232914e-07,
+ "loss": 0.093,
+ "step": 1740
+ },
+ {
+ "epoch": 13.090225563909774,
+ "grad_norm": 0.26076846174935364,
+ "learning_rate": 5.842767154236419e-07,
+ "loss": 0.0958,
+ "step": 1741
+ },
+ {
+ "epoch": 13.097744360902256,
+ "grad_norm": 0.25918031649149104,
+ "learning_rate": 5.831467951459783e-07,
+ "loss": 0.11,
+ "step": 1742
+ },
+ {
+ "epoch": 13.105263157894736,
+ "grad_norm": 0.3052718393924426,
+ "learning_rate": 5.820175186352909e-07,
+ "loss": 0.0925,
+ "step": 1743
+ },
+ {
+ "epoch": 13.112781954887218,
+ "grad_norm": 0.26764415994065816,
+ "learning_rate": 5.808888876355784e-07,
+ "loss": 0.1098,
+ "step": 1744
+ },
+ {
+ "epoch": 13.1203007518797,
+ "grad_norm": 0.2618724726477309,
+ "learning_rate": 5.797609038898404e-07,
+ "loss": 0.1151,
+ "step": 1745
+ },
+ {
+ "epoch": 13.12781954887218,
+ "grad_norm": 0.2576088711556756,
+ "learning_rate": 5.786335691400788e-07,
+ "loss": 0.0962,
+ "step": 1746
+ },
+ {
+ "epoch": 13.135338345864662,
+ "grad_norm": 0.2766396773694538,
+ "learning_rate": 5.77506885127291e-07,
+ "loss": 0.1088,
+ "step": 1747
+ },
+ {
+ "epoch": 13.142857142857142,
+ "grad_norm": 0.2759334391919586,
+ "learning_rate": 5.763808535914723e-07,
+ "loss": 0.1121,
+ "step": 1748
+ },
+ {
+ "epoch": 13.150375939849624,
+ "grad_norm": 0.27574034577900725,
+ "learning_rate": 5.752554762716073e-07,
+ "loss": 0.115,
+ "step": 1749
+ },
+ {
+ "epoch": 13.157894736842104,
+ "grad_norm": 0.26075329180796586,
+ "learning_rate": 5.741307549056728e-07,
+ "loss": 0.1079,
+ "step": 1750
+ },
+ {
+ "epoch": 13.165413533834586,
+ "grad_norm": 0.2618518523229872,
+ "learning_rate": 5.730066912306309e-07,
+ "loss": 0.1089,
+ "step": 1751
+ },
+ {
+ "epoch": 13.172932330827068,
+ "grad_norm": 0.24862764257124167,
+ "learning_rate": 5.718832869824291e-07,
+ "loss": 0.1139,
+ "step": 1752
+ },
+ {
+ "epoch": 13.180451127819548,
+ "grad_norm": 0.27106909440073496,
+ "learning_rate": 5.707605438959954e-07,
+ "loss": 0.1007,
+ "step": 1753
+ },
+ {
+ "epoch": 13.18796992481203,
+ "grad_norm": 0.36930873708200485,
+ "learning_rate": 5.69638463705238e-07,
+ "loss": 0.1108,
+ "step": 1754
+ },
+ {
+ "epoch": 13.19548872180451,
+ "grad_norm": 0.27450887997622947,
+ "learning_rate": 5.685170481430401e-07,
+ "loss": 0.101,
+ "step": 1755
+ },
+ {
+ "epoch": 13.203007518796992,
+ "grad_norm": 0.2549654769099198,
+ "learning_rate": 5.673962989412598e-07,
+ "loss": 0.1069,
+ "step": 1756
+ },
+ {
+ "epoch": 13.210526315789474,
+ "grad_norm": 0.2737399535335202,
+ "learning_rate": 5.662762178307248e-07,
+ "loss": 0.1047,
+ "step": 1757
+ },
+ {
+ "epoch": 13.218045112781954,
+ "grad_norm": 0.2605666734602985,
+ "learning_rate": 5.651568065412319e-07,
+ "loss": 0.1129,
+ "step": 1758
+ },
+ {
+ "epoch": 13.225563909774436,
+ "grad_norm": 0.26278367572380446,
+ "learning_rate": 5.64038066801543e-07,
+ "loss": 0.1036,
+ "step": 1759
+ },
+ {
+ "epoch": 13.233082706766917,
+ "grad_norm": 0.25211792211505946,
+ "learning_rate": 5.629200003393837e-07,
+ "loss": 0.1044,
+ "step": 1760
+ },
+ {
+ "epoch": 13.240601503759398,
+ "grad_norm": 0.27179307611797815,
+ "learning_rate": 5.618026088814382e-07,
+ "loss": 0.1041,
+ "step": 1761
+ },
+ {
+ "epoch": 13.24812030075188,
+ "grad_norm": 0.2933060323822456,
+ "learning_rate": 5.606858941533503e-07,
+ "loss": 0.1107,
+ "step": 1762
+ },
+ {
+ "epoch": 13.25563909774436,
+ "grad_norm": 0.3569523865661312,
+ "learning_rate": 5.595698578797168e-07,
+ "loss": 0.1057,
+ "step": 1763
+ },
+ {
+ "epoch": 13.263157894736842,
+ "grad_norm": 0.28020127704301623,
+ "learning_rate": 5.584545017840885e-07,
+ "loss": 0.1128,
+ "step": 1764
+ },
+ {
+ "epoch": 13.270676691729323,
+ "grad_norm": 0.2934017718279465,
+ "learning_rate": 5.573398275889638e-07,
+ "loss": 0.1115,
+ "step": 1765
+ },
+ {
+ "epoch": 13.278195488721805,
+ "grad_norm": 0.26067885290087617,
+ "learning_rate": 5.562258370157897e-07,
+ "loss": 0.1121,
+ "step": 1766
+ },
+ {
+ "epoch": 13.285714285714286,
+ "grad_norm": 0.2681216207884683,
+ "learning_rate": 5.551125317849572e-07,
+ "loss": 0.1168,
+ "step": 1767
+ },
+ {
+ "epoch": 13.293233082706767,
+ "grad_norm": 0.25889016452438735,
+ "learning_rate": 5.539999136157976e-07,
+ "loss": 0.1117,
+ "step": 1768
+ },
+ {
+ "epoch": 13.300751879699249,
+ "grad_norm": 0.29446105785813165,
+ "learning_rate": 5.52887984226583e-07,
+ "loss": 0.1115,
+ "step": 1769
+ },
+ {
+ "epoch": 13.308270676691729,
+ "grad_norm": 0.2677305061325937,
+ "learning_rate": 5.517767453345199e-07,
+ "loss": 0.1048,
+ "step": 1770
+ },
+ {
+ "epoch": 13.31578947368421,
+ "grad_norm": 0.2834774205244204,
+ "learning_rate": 5.506661986557503e-07,
+ "loss": 0.1083,
+ "step": 1771
+ },
+ {
+ "epoch": 13.323308270676693,
+ "grad_norm": 0.26395638391950466,
+ "learning_rate": 5.495563459053454e-07,
+ "loss": 0.0916,
+ "step": 1772
+ },
+ {
+ "epoch": 13.330827067669173,
+ "grad_norm": 0.2776406022041351,
+ "learning_rate": 5.484471887973062e-07,
+ "loss": 0.103,
+ "step": 1773
+ },
+ {
+ "epoch": 13.338345864661655,
+ "grad_norm": 0.2505377689091274,
+ "learning_rate": 5.473387290445581e-07,
+ "loss": 0.1048,
+ "step": 1774
+ },
+ {
+ "epoch": 13.345864661654135,
+ "grad_norm": 0.25834153137155175,
+ "learning_rate": 5.46230968358951e-07,
+ "loss": 0.1037,
+ "step": 1775
+ },
+ {
+ "epoch": 13.353383458646617,
+ "grad_norm": 0.26216457399871695,
+ "learning_rate": 5.451239084512536e-07,
+ "loss": 0.1084,
+ "step": 1776
+ },
+ {
+ "epoch": 13.360902255639097,
+ "grad_norm": 0.3093511467568115,
+ "learning_rate": 5.44017551031154e-07,
+ "loss": 0.1091,
+ "step": 1777
+ },
+ {
+ "epoch": 13.368421052631579,
+ "grad_norm": 0.2807421051751322,
+ "learning_rate": 5.429118978072537e-07,
+ "loss": 0.0991,
+ "step": 1778
+ },
+ {
+ "epoch": 13.37593984962406,
+ "grad_norm": 0.2528389394081472,
+ "learning_rate": 5.418069504870684e-07,
+ "loss": 0.0946,
+ "step": 1779
+ },
+ {
+ "epoch": 13.38345864661654,
+ "grad_norm": 0.33319835655302865,
+ "learning_rate": 5.407027107770219e-07,
+ "loss": 0.114,
+ "step": 1780
+ },
+ {
+ "epoch": 13.390977443609023,
+ "grad_norm": 0.2665109474820751,
+ "learning_rate": 5.395991803824469e-07,
+ "loss": 0.1221,
+ "step": 1781
+ },
+ {
+ "epoch": 13.398496240601503,
+ "grad_norm": 0.27445222092025295,
+ "learning_rate": 5.38496361007579e-07,
+ "loss": 0.102,
+ "step": 1782
+ },
+ {
+ "epoch": 13.406015037593985,
+ "grad_norm": 0.3172383861104455,
+ "learning_rate": 5.373942543555575e-07,
+ "loss": 0.1113,
+ "step": 1783
+ },
+ {
+ "epoch": 13.413533834586467,
+ "grad_norm": 0.2715856229062395,
+ "learning_rate": 5.362928621284193e-07,
+ "loss": 0.0991,
+ "step": 1784
+ },
+ {
+ "epoch": 13.421052631578947,
+ "grad_norm": 0.28364928791887806,
+ "learning_rate": 5.351921860270993e-07,
+ "loss": 0.1022,
+ "step": 1785
+ },
+ {
+ "epoch": 13.428571428571429,
+ "grad_norm": 0.2668602584947125,
+ "learning_rate": 5.340922277514257e-07,
+ "loss": 0.108,
+ "step": 1786
+ },
+ {
+ "epoch": 13.436090225563909,
+ "grad_norm": 0.24993478082724865,
+ "learning_rate": 5.329929890001186e-07,
+ "loss": 0.1149,
+ "step": 1787
+ },
+ {
+ "epoch": 13.443609022556391,
+ "grad_norm": 0.28981359535639695,
+ "learning_rate": 5.31894471470786e-07,
+ "loss": 0.1026,
+ "step": 1788
+ },
+ {
+ "epoch": 13.451127819548873,
+ "grad_norm": 0.28223905007612166,
+ "learning_rate": 5.307966768599236e-07,
+ "loss": 0.1121,
+ "step": 1789
+ },
+ {
+ "epoch": 13.458646616541353,
+ "grad_norm": 0.26314490999428214,
+ "learning_rate": 5.296996068629089e-07,
+ "loss": 0.1056,
+ "step": 1790
+ },
+ {
+ "epoch": 13.466165413533835,
+ "grad_norm": 0.29263763239813784,
+ "learning_rate": 5.286032631740023e-07,
+ "loss": 0.1029,
+ "step": 1791
+ },
+ {
+ "epoch": 13.473684210526315,
+ "grad_norm": 0.2593909433398232,
+ "learning_rate": 5.275076474863408e-07,
+ "loss": 0.1038,
+ "step": 1792
+ },
+ {
+ "epoch": 13.481203007518797,
+ "grad_norm": 0.562359898828635,
+ "learning_rate": 5.264127614919373e-07,
+ "loss": 0.0994,
+ "step": 1793
+ },
+ {
+ "epoch": 13.488721804511279,
+ "grad_norm": 0.2979455831834758,
+ "learning_rate": 5.253186068816795e-07,
+ "loss": 0.104,
+ "step": 1794
+ },
+ {
+ "epoch": 13.496240601503759,
+ "grad_norm": 0.30425590361765525,
+ "learning_rate": 5.242251853453232e-07,
+ "loss": 0.1044,
+ "step": 1795
+ },
+ {
+ "epoch": 13.503759398496241,
+ "grad_norm": 0.2830746599255176,
+ "learning_rate": 5.231324985714941e-07,
+ "loss": 0.1038,
+ "step": 1796
+ },
+ {
+ "epoch": 13.511278195488721,
+ "grad_norm": 0.2833421682571021,
+ "learning_rate": 5.220405482476815e-07,
+ "loss": 0.1065,
+ "step": 1797
+ },
+ {
+ "epoch": 13.518796992481203,
+ "grad_norm": 0.3284681932590562,
+ "learning_rate": 5.209493360602392e-07,
+ "loss": 0.1157,
+ "step": 1798
+ },
+ {
+ "epoch": 13.526315789473685,
+ "grad_norm": 0.26861819996795144,
+ "learning_rate": 5.198588636943789e-07,
+ "loss": 0.1045,
+ "step": 1799
+ },
+ {
+ "epoch": 13.533834586466165,
+ "grad_norm": 0.3086633119179222,
+ "learning_rate": 5.187691328341719e-07,
+ "loss": 0.1,
+ "step": 1800
+ },
+ {
+ "epoch": 13.541353383458647,
+ "grad_norm": 0.26326746182028754,
+ "learning_rate": 5.176801451625426e-07,
+ "loss": 0.1024,
+ "step": 1801
+ },
+ {
+ "epoch": 13.548872180451127,
+ "grad_norm": 0.27962967918142356,
+ "learning_rate": 5.16591902361269e-07,
+ "loss": 0.112,
+ "step": 1802
+ },
+ {
+ "epoch": 13.556390977443609,
+ "grad_norm": 0.28283865047407847,
+ "learning_rate": 5.155044061109775e-07,
+ "loss": 0.1025,
+ "step": 1803
+ },
+ {
+ "epoch": 13.563909774436091,
+ "grad_norm": 0.2695219917972873,
+ "learning_rate": 5.14417658091143e-07,
+ "loss": 0.1053,
+ "step": 1804
+ },
+ {
+ "epoch": 13.571428571428571,
+ "grad_norm": 0.26391822739928517,
+ "learning_rate": 5.133316599800832e-07,
+ "loss": 0.1037,
+ "step": 1805
+ },
+ {
+ "epoch": 13.578947368421053,
+ "grad_norm": 0.2688789570839304,
+ "learning_rate": 5.122464134549596e-07,
+ "loss": 0.1102,
+ "step": 1806
+ },
+ {
+ "epoch": 13.586466165413533,
+ "grad_norm": 0.3063735552244718,
+ "learning_rate": 5.111619201917709e-07,
+ "loss": 0.1107,
+ "step": 1807
+ },
+ {
+ "epoch": 13.593984962406015,
+ "grad_norm": 0.2723951277201245,
+ "learning_rate": 5.100781818653547e-07,
+ "loss": 0.1056,
+ "step": 1808
+ },
+ {
+ "epoch": 13.601503759398497,
+ "grad_norm": 0.26137107824391104,
+ "learning_rate": 5.089952001493807e-07,
+ "loss": 0.1067,
+ "step": 1809
+ },
+ {
+ "epoch": 13.609022556390977,
+ "grad_norm": 0.2654366367294703,
+ "learning_rate": 5.079129767163514e-07,
+ "loss": 0.1022,
+ "step": 1810
+ },
+ {
+ "epoch": 13.61654135338346,
+ "grad_norm": 0.26690267430054715,
+ "learning_rate": 5.068315132375975e-07,
+ "loss": 0.1047,
+ "step": 1811
+ },
+ {
+ "epoch": 13.62406015037594,
+ "grad_norm": 0.3344940549805951,
+ "learning_rate": 5.057508113832772e-07,
+ "loss": 0.1039,
+ "step": 1812
+ },
+ {
+ "epoch": 13.631578947368421,
+ "grad_norm": 0.2745663824290679,
+ "learning_rate": 5.046708728223708e-07,
+ "loss": 0.1113,
+ "step": 1813
+ },
+ {
+ "epoch": 13.639097744360903,
+ "grad_norm": 0.2743287594677123,
+ "learning_rate": 5.035916992226815e-07,
+ "loss": 0.1045,
+ "step": 1814
+ },
+ {
+ "epoch": 13.646616541353383,
+ "grad_norm": 0.26371341370675844,
+ "learning_rate": 5.025132922508293e-07,
+ "loss": 0.1052,
+ "step": 1815
+ },
+ {
+ "epoch": 13.654135338345865,
+ "grad_norm": 0.26622788101504946,
+ "learning_rate": 5.014356535722526e-07,
+ "loss": 0.1078,
+ "step": 1816
+ },
+ {
+ "epoch": 13.661654135338345,
+ "grad_norm": 0.2623783024318779,
+ "learning_rate": 5.00358784851201e-07,
+ "loss": 0.0992,
+ "step": 1817
+ },
+ {
+ "epoch": 13.669172932330827,
+ "grad_norm": 0.3279346411514993,
+ "learning_rate": 4.992826877507366e-07,
+ "loss": 0.1104,
+ "step": 1818
+ },
+ {
+ "epoch": 13.676691729323307,
+ "grad_norm": 0.3050126136424657,
+ "learning_rate": 4.982073639327294e-07,
+ "loss": 0.1138,
+ "step": 1819
+ },
+ {
+ "epoch": 13.68421052631579,
+ "grad_norm": 0.27904982408258716,
+ "learning_rate": 4.971328150578539e-07,
+ "loss": 0.0972,
+ "step": 1820
+ },
+ {
+ "epoch": 13.691729323308271,
+ "grad_norm": 0.29217815531248886,
+ "learning_rate": 4.960590427855903e-07,
+ "loss": 0.0955,
+ "step": 1821
+ },
+ {
+ "epoch": 13.699248120300751,
+ "grad_norm": 0.309757537495527,
+ "learning_rate": 4.949860487742173e-07,
+ "loss": 0.1028,
+ "step": 1822
+ },
+ {
+ "epoch": 13.706766917293233,
+ "grad_norm": 0.28428013328191665,
+ "learning_rate": 4.939138346808129e-07,
+ "loss": 0.1041,
+ "step": 1823
+ },
+ {
+ "epoch": 13.714285714285714,
+ "grad_norm": 0.26508566228675506,
+ "learning_rate": 4.928424021612498e-07,
+ "loss": 0.1066,
+ "step": 1824
+ },
+ {
+ "epoch": 13.721804511278195,
+ "grad_norm": 0.2907133647281546,
+ "learning_rate": 4.917717528701949e-07,
+ "loss": 0.102,
+ "step": 1825
+ },
+ {
+ "epoch": 13.729323308270677,
+ "grad_norm": 0.27142149446522157,
+ "learning_rate": 4.907018884611039e-07,
+ "loss": 0.1217,
+ "step": 1826
+ },
+ {
+ "epoch": 13.736842105263158,
+ "grad_norm": 0.27122107345452673,
+ "learning_rate": 4.896328105862218e-07,
+ "loss": 0.1041,
+ "step": 1827
+ },
+ {
+ "epoch": 13.74436090225564,
+ "grad_norm": 0.2598869642841013,
+ "learning_rate": 4.885645208965778e-07,
+ "loss": 0.1157,
+ "step": 1828
+ },
+ {
+ "epoch": 13.75187969924812,
+ "grad_norm": 0.25748903921446525,
+ "learning_rate": 4.874970210419851e-07,
+ "loss": 0.1028,
+ "step": 1829
+ },
+ {
+ "epoch": 13.759398496240602,
+ "grad_norm": 0.2767372261250432,
+ "learning_rate": 4.864303126710356e-07,
+ "loss": 0.1023,
+ "step": 1830
+ },
+ {
+ "epoch": 13.766917293233083,
+ "grad_norm": 0.27301721876417595,
+ "learning_rate": 4.853643974311003e-07,
+ "loss": 0.1121,
+ "step": 1831
+ },
+ {
+ "epoch": 13.774436090225564,
+ "grad_norm": 0.3144288606643367,
+ "learning_rate": 4.842992769683242e-07,
+ "loss": 0.1029,
+ "step": 1832
+ },
+ {
+ "epoch": 13.781954887218046,
+ "grad_norm": 0.28759294215435827,
+ "learning_rate": 4.832349529276262e-07,
+ "loss": 0.1022,
+ "step": 1833
+ },
+ {
+ "epoch": 13.789473684210526,
+ "grad_norm": 0.3100301156784421,
+ "learning_rate": 4.821714269526934e-07,
+ "loss": 0.1098,
+ "step": 1834
+ },
+ {
+ "epoch": 13.796992481203008,
+ "grad_norm": 0.28203430597750945,
+ "learning_rate": 4.811087006859823e-07,
+ "loss": 0.1025,
+ "step": 1835
+ },
+ {
+ "epoch": 13.80451127819549,
+ "grad_norm": 0.317931443880216,
+ "learning_rate": 4.80046775768713e-07,
+ "loss": 0.1059,
+ "step": 1836
+ },
+ {
+ "epoch": 13.81203007518797,
+ "grad_norm": 0.2710248850451507,
+ "learning_rate": 4.78985653840869e-07,
+ "loss": 0.1076,
+ "step": 1837
+ },
+ {
+ "epoch": 13.819548872180452,
+ "grad_norm": 0.3011816185151925,
+ "learning_rate": 4.779253365411926e-07,
+ "loss": 0.1036,
+ "step": 1838
+ },
+ {
+ "epoch": 13.827067669172932,
+ "grad_norm": 0.34590877766102024,
+ "learning_rate": 4.768658255071851e-07,
+ "loss": 0.1065,
+ "step": 1839
+ },
+ {
+ "epoch": 13.834586466165414,
+ "grad_norm": 0.2731252345998902,
+ "learning_rate": 4.7580712237510056e-07,
+ "loss": 0.0981,
+ "step": 1840
+ },
+ {
+ "epoch": 13.842105263157894,
+ "grad_norm": 0.27864587337228863,
+ "learning_rate": 4.747492287799475e-07,
+ "loss": 0.1026,
+ "step": 1841
+ },
+ {
+ "epoch": 13.849624060150376,
+ "grad_norm": 0.27643374190236486,
+ "learning_rate": 4.7369214635548237e-07,
+ "loss": 0.107,
+ "step": 1842
+ },
+ {
+ "epoch": 13.857142857142858,
+ "grad_norm": 0.2831120960173783,
+ "learning_rate": 4.726358767342106e-07,
+ "loss": 0.1042,
+ "step": 1843
+ },
+ {
+ "epoch": 13.864661654135338,
+ "grad_norm": 0.3628768177626083,
+ "learning_rate": 4.715804215473809e-07,
+ "loss": 0.106,
+ "step": 1844
+ },
+ {
+ "epoch": 13.87218045112782,
+ "grad_norm": 0.2706793489395575,
+ "learning_rate": 4.705257824249845e-07,
+ "loss": 0.1118,
+ "step": 1845
+ },
+ {
+ "epoch": 13.8796992481203,
+ "grad_norm": 0.29936743208597866,
+ "learning_rate": 4.6947196099575345e-07,
+ "loss": 0.1012,
+ "step": 1846
+ },
+ {
+ "epoch": 13.887218045112782,
+ "grad_norm": 0.30118556593821616,
+ "learning_rate": 4.6841895888715546e-07,
+ "loss": 0.1118,
+ "step": 1847
+ },
+ {
+ "epoch": 13.894736842105264,
+ "grad_norm": 0.2748959842742476,
+ "learning_rate": 4.6736677772539435e-07,
+ "loss": 0.1129,
+ "step": 1848
+ },
+ {
+ "epoch": 13.902255639097744,
+ "grad_norm": 0.7706019751567299,
+ "learning_rate": 4.6631541913540474e-07,
+ "loss": 0.1101,
+ "step": 1849
+ },
+ {
+ "epoch": 13.909774436090226,
+ "grad_norm": 0.2733789585477752,
+ "learning_rate": 4.6526488474085224e-07,
+ "loss": 0.1066,
+ "step": 1850
+ },
+ {
+ "epoch": 13.917293233082706,
+ "grad_norm": 0.2726939619996214,
+ "learning_rate": 4.642151761641282e-07,
+ "loss": 0.1074,
+ "step": 1851
+ },
+ {
+ "epoch": 13.924812030075188,
+ "grad_norm": 0.2865043075262762,
+ "learning_rate": 4.631662950263502e-07,
+ "loss": 0.1147,
+ "step": 1852
+ },
+ {
+ "epoch": 13.93233082706767,
+ "grad_norm": 0.2788483081273172,
+ "learning_rate": 4.6211824294735647e-07,
+ "loss": 0.1088,
+ "step": 1853
+ },
+ {
+ "epoch": 13.93984962406015,
+ "grad_norm": 0.27844248078049194,
+ "learning_rate": 4.610710215457061e-07,
+ "loss": 0.1042,
+ "step": 1854
+ },
+ {
+ "epoch": 13.947368421052632,
+ "grad_norm": 0.2955140455318268,
+ "learning_rate": 4.6002463243867416e-07,
+ "loss": 0.1099,
+ "step": 1855
+ },
+ {
+ "epoch": 13.954887218045112,
+ "grad_norm": 0.2988158598923095,
+ "learning_rate": 4.5897907724225183e-07,
+ "loss": 0.1049,
+ "step": 1856
+ },
+ {
+ "epoch": 13.962406015037594,
+ "grad_norm": 0.3141112937892314,
+ "learning_rate": 4.5793435757114076e-07,
+ "loss": 0.1051,
+ "step": 1857
+ },
+ {
+ "epoch": 13.969924812030076,
+ "grad_norm": 0.3104762464526384,
+ "learning_rate": 4.5689047503875376e-07,
+ "loss": 0.099,
+ "step": 1858
+ },
+ {
+ "epoch": 13.977443609022556,
+ "grad_norm": 0.29403186885282073,
+ "learning_rate": 4.558474312572095e-07,
+ "loss": 0.1004,
+ "step": 1859
+ },
+ {
+ "epoch": 13.984962406015038,
+ "grad_norm": 0.2761847235541301,
+ "learning_rate": 4.5480522783733265e-07,
+ "loss": 0.1114,
+ "step": 1860
+ },
+ {
+ "epoch": 13.992481203007518,
+ "grad_norm": 0.2793640354092514,
+ "learning_rate": 4.5376386638864874e-07,
+ "loss": 0.1058,
+ "step": 1861
+ },
+ {
+ "epoch": 14.0,
+ "grad_norm": 0.2844650014487814,
+ "learning_rate": 4.527233485193843e-07,
+ "loss": 0.1072,
+ "step": 1862
+ },
+ {
+ "epoch": 14.0,
+ "eval_loss": 0.3249731957912445,
+ "eval_runtime": 35.8191,
+ "eval_samples_per_second": 12.479,
+ "eval_steps_per_second": 0.195,
+ "step": 1862
+ },
+ {
+ "epoch": 14.007518796992482,
+ "grad_norm": 0.2772428235093183,
+ "learning_rate": 4.5168367583646173e-07,
+ "loss": 0.0959,
+ "step": 1863
+ },
+ {
+ "epoch": 14.015037593984962,
+ "grad_norm": 0.28069634678030514,
+ "learning_rate": 4.5064484994549955e-07,
+ "loss": 0.0955,
+ "step": 1864
+ },
+ {
+ "epoch": 14.022556390977444,
+ "grad_norm": 0.2763800076221122,
+ "learning_rate": 4.496068724508072e-07,
+ "loss": 0.1004,
+ "step": 1865
+ },
+ {
+ "epoch": 14.030075187969924,
+ "grad_norm": 0.29648435261472267,
+ "learning_rate": 4.4856974495538527e-07,
+ "loss": 0.1018,
+ "step": 1866
+ },
+ {
+ "epoch": 14.037593984962406,
+ "grad_norm": 0.2839863744024837,
+ "learning_rate": 4.4753346906092006e-07,
+ "loss": 0.0993,
+ "step": 1867
+ },
+ {
+ "epoch": 14.045112781954888,
+ "grad_norm": 0.2901565121520646,
+ "learning_rate": 4.4649804636778455e-07,
+ "loss": 0.0972,
+ "step": 1868
+ },
+ {
+ "epoch": 14.052631578947368,
+ "grad_norm": 0.2701263699386613,
+ "learning_rate": 4.454634784750322e-07,
+ "loss": 0.1077,
+ "step": 1869
+ },
+ {
+ "epoch": 14.06015037593985,
+ "grad_norm": 0.2820299193868105,
+ "learning_rate": 4.4442976698039803e-07,
+ "loss": 0.103,
+ "step": 1870
+ },
+ {
+ "epoch": 14.06766917293233,
+ "grad_norm": 0.2833934881301805,
+ "learning_rate": 4.4339691348029297e-07,
+ "loss": 0.1021,
+ "step": 1871
+ },
+ {
+ "epoch": 14.075187969924812,
+ "grad_norm": 0.2603978924966974,
+ "learning_rate": 4.4236491956980415e-07,
+ "loss": 0.0976,
+ "step": 1872
+ },
+ {
+ "epoch": 14.082706766917294,
+ "grad_norm": 0.28774181783947256,
+ "learning_rate": 4.4133378684269086e-07,
+ "loss": 0.0893,
+ "step": 1873
+ },
+ {
+ "epoch": 14.090225563909774,
+ "grad_norm": 0.2824427095253245,
+ "learning_rate": 4.403035168913817e-07,
+ "loss": 0.1084,
+ "step": 1874
+ },
+ {
+ "epoch": 14.097744360902256,
+ "grad_norm": 0.28680839329603397,
+ "learning_rate": 4.3927411130697403e-07,
+ "loss": 0.1066,
+ "step": 1875
+ },
+ {
+ "epoch": 14.105263157894736,
+ "grad_norm": 0.3082715544000979,
+ "learning_rate": 4.38245571679229e-07,
+ "loss": 0.1061,
+ "step": 1876
+ },
+ {
+ "epoch": 14.112781954887218,
+ "grad_norm": 0.27074568389137676,
+ "learning_rate": 4.3721789959657186e-07,
+ "loss": 0.1,
+ "step": 1877
+ },
+ {
+ "epoch": 14.1203007518797,
+ "grad_norm": 0.2915828920792721,
+ "learning_rate": 4.3619109664608655e-07,
+ "loss": 0.0898,
+ "step": 1878
+ },
+ {
+ "epoch": 14.12781954887218,
+ "grad_norm": 0.26771322191832353,
+ "learning_rate": 4.351651644135164e-07,
+ "loss": 0.097,
+ "step": 1879
+ },
+ {
+ "epoch": 14.135338345864662,
+ "grad_norm": 0.2877484626004952,
+ "learning_rate": 4.3414010448325824e-07,
+ "loss": 0.1044,
+ "step": 1880
+ },
+ {
+ "epoch": 14.142857142857142,
+ "grad_norm": 0.29233470022624347,
+ "learning_rate": 4.331159184383636e-07,
+ "loss": 0.0982,
+ "step": 1881
+ },
+ {
+ "epoch": 14.150375939849624,
+ "grad_norm": 0.27435451967269325,
+ "learning_rate": 4.3209260786053283e-07,
+ "loss": 0.0944,
+ "step": 1882
+ },
+ {
+ "epoch": 14.157894736842104,
+ "grad_norm": 0.25927549514222314,
+ "learning_rate": 4.310701743301156e-07,
+ "loss": 0.1005,
+ "step": 1883
+ },
+ {
+ "epoch": 14.165413533834586,
+ "grad_norm": 0.2637610785831941,
+ "learning_rate": 4.3004861942610573e-07,
+ "loss": 0.1001,
+ "step": 1884
+ },
+ {
+ "epoch": 14.172932330827068,
+ "grad_norm": 0.27268272634421953,
+ "learning_rate": 4.290279447261417e-07,
+ "loss": 0.0988,
+ "step": 1885
+ },
+ {
+ "epoch": 14.180451127819548,
+ "grad_norm": 0.2685126112253308,
+ "learning_rate": 4.28008151806501e-07,
+ "loss": 0.0961,
+ "step": 1886
+ },
+ {
+ "epoch": 14.18796992481203,
+ "grad_norm": 0.25816612059204147,
+ "learning_rate": 4.2698924224210085e-07,
+ "loss": 0.0907,
+ "step": 1887
+ },
+ {
+ "epoch": 14.19548872180451,
+ "grad_norm": 0.26365820702765486,
+ "learning_rate": 4.25971217606493e-07,
+ "loss": 0.0999,
+ "step": 1888
+ },
+ {
+ "epoch": 14.203007518796992,
+ "grad_norm": 0.2668687484156792,
+ "learning_rate": 4.2495407947186377e-07,
+ "loss": 0.1026,
+ "step": 1889
+ },
+ {
+ "epoch": 14.210526315789474,
+ "grad_norm": 0.287635579320255,
+ "learning_rate": 4.239378294090291e-07,
+ "loss": 0.1017,
+ "step": 1890
+ },
+ {
+ "epoch": 14.218045112781954,
+ "grad_norm": 0.44430151190004286,
+ "learning_rate": 4.229224689874349e-07,
+ "loss": 0.1072,
+ "step": 1891
+ },
+ {
+ "epoch": 14.225563909774436,
+ "grad_norm": 0.2685104843842998,
+ "learning_rate": 4.2190799977515145e-07,
+ "loss": 0.1067,
+ "step": 1892
+ },
+ {
+ "epoch": 14.233082706766917,
+ "grad_norm": 0.26742413531771,
+ "learning_rate": 4.208944233388745e-07,
+ "loss": 0.0973,
+ "step": 1893
+ },
+ {
+ "epoch": 14.240601503759398,
+ "grad_norm": 0.2862678655693838,
+ "learning_rate": 4.1988174124391927e-07,
+ "loss": 0.0932,
+ "step": 1894
+ },
+ {
+ "epoch": 14.24812030075188,
+ "grad_norm": 0.3073062806051085,
+ "learning_rate": 4.1886995505422174e-07,
+ "loss": 0.0957,
+ "step": 1895
+ },
+ {
+ "epoch": 14.25563909774436,
+ "grad_norm": 0.29149929958597587,
+ "learning_rate": 4.178590663323323e-07,
+ "loss": 0.0996,
+ "step": 1896
+ },
+ {
+ "epoch": 14.263157894736842,
+ "grad_norm": 0.28961768579254377,
+ "learning_rate": 4.1684907663941703e-07,
+ "loss": 0.1067,
+ "step": 1897
+ },
+ {
+ "epoch": 14.270676691729323,
+ "grad_norm": 0.2823371707249106,
+ "learning_rate": 4.158399875352525e-07,
+ "loss": 0.0953,
+ "step": 1898
+ },
+ {
+ "epoch": 14.278195488721805,
+ "grad_norm": 0.3650480798644846,
+ "learning_rate": 4.1483180057822453e-07,
+ "loss": 0.0974,
+ "step": 1899
+ },
+ {
+ "epoch": 14.285714285714286,
+ "grad_norm": 0.2736703858255706,
+ "learning_rate": 4.138245173253266e-07,
+ "loss": 0.0916,
+ "step": 1900
+ },
+ {
+ "epoch": 14.293233082706767,
+ "grad_norm": 0.3176472637293826,
+ "learning_rate": 4.128181393321554e-07,
+ "loss": 0.0897,
+ "step": 1901
+ },
+ {
+ "epoch": 14.300751879699249,
+ "grad_norm": 0.33091745266677913,
+ "learning_rate": 4.118126681529107e-07,
+ "loss": 0.0932,
+ "step": 1902
+ },
+ {
+ "epoch": 14.308270676691729,
+ "grad_norm": 0.28409284949708696,
+ "learning_rate": 4.108081053403906e-07,
+ "loss": 0.1039,
+ "step": 1903
+ },
+ {
+ "epoch": 14.31578947368421,
+ "grad_norm": 0.2734673721335313,
+ "learning_rate": 4.0980445244599173e-07,
+ "loss": 0.1062,
+ "step": 1904
+ },
+ {
+ "epoch": 14.323308270676693,
+ "grad_norm": 0.28007479707476457,
+ "learning_rate": 4.0880171101970407e-07,
+ "loss": 0.1004,
+ "step": 1905
+ },
+ {
+ "epoch": 14.330827067669173,
+ "grad_norm": 0.27699631119493273,
+ "learning_rate": 4.0779988261011146e-07,
+ "loss": 0.0959,
+ "step": 1906
+ },
+ {
+ "epoch": 14.338345864661655,
+ "grad_norm": 0.27118135361940104,
+ "learning_rate": 4.067989687643861e-07,
+ "loss": 0.0984,
+ "step": 1907
+ },
+ {
+ "epoch": 14.345864661654135,
+ "grad_norm": 0.2656939579151871,
+ "learning_rate": 4.0579897102828965e-07,
+ "loss": 0.0992,
+ "step": 1908
+ },
+ {
+ "epoch": 14.353383458646617,
+ "grad_norm": 0.3002248556908912,
+ "learning_rate": 4.047998909461668e-07,
+ "loss": 0.0982,
+ "step": 1909
+ },
+ {
+ "epoch": 14.360902255639097,
+ "grad_norm": 0.311043047586697,
+ "learning_rate": 4.0380173006094744e-07,
+ "loss": 0.1014,
+ "step": 1910
+ },
+ {
+ "epoch": 14.368421052631579,
+ "grad_norm": 0.28327922202429084,
+ "learning_rate": 4.028044899141396e-07,
+ "loss": 0.1051,
+ "step": 1911
+ },
+ {
+ "epoch": 14.37593984962406,
+ "grad_norm": 0.27961040946847976,
+ "learning_rate": 4.0180817204583127e-07,
+ "loss": 0.1018,
+ "step": 1912
+ },
+ {
+ "epoch": 14.38345864661654,
+ "grad_norm": 0.3358139486610882,
+ "learning_rate": 4.0081277799468473e-07,
+ "loss": 0.0975,
+ "step": 1913
+ },
+ {
+ "epoch": 14.390977443609023,
+ "grad_norm": 0.27741743792502754,
+ "learning_rate": 3.998183092979367e-07,
+ "loss": 0.0934,
+ "step": 1914
+ },
+ {
+ "epoch": 14.398496240601503,
+ "grad_norm": 0.269529980014983,
+ "learning_rate": 3.988247674913935e-07,
+ "loss": 0.1031,
+ "step": 1915
+ },
+ {
+ "epoch": 14.406015037593985,
+ "grad_norm": 0.2689398251356694,
+ "learning_rate": 3.978321541094317e-07,
+ "loss": 0.1033,
+ "step": 1916
+ },
+ {
+ "epoch": 14.413533834586467,
+ "grad_norm": 0.26749865547021234,
+ "learning_rate": 3.9684047068499227e-07,
+ "loss": 0.106,
+ "step": 1917
+ },
+ {
+ "epoch": 14.421052631578947,
+ "grad_norm": 0.26704964743291704,
+ "learning_rate": 3.958497187495815e-07,
+ "loss": 0.1082,
+ "step": 1918
+ },
+ {
+ "epoch": 14.428571428571429,
+ "grad_norm": 0.2666826301922148,
+ "learning_rate": 3.9485989983326605e-07,
+ "loss": 0.1002,
+ "step": 1919
+ },
+ {
+ "epoch": 14.436090225563909,
+ "grad_norm": 0.2612587734822808,
+ "learning_rate": 3.938710154646726e-07,
+ "loss": 0.0911,
+ "step": 1920
+ },
+ {
+ "epoch": 14.443609022556391,
+ "grad_norm": 0.2693006655400547,
+ "learning_rate": 3.928830671709835e-07,
+ "loss": 0.1055,
+ "step": 1921
+ },
+ {
+ "epoch": 14.451127819548873,
+ "grad_norm": 0.26146416319935456,
+ "learning_rate": 3.918960564779368e-07,
+ "loss": 0.0951,
+ "step": 1922
+ },
+ {
+ "epoch": 14.458646616541353,
+ "grad_norm": 0.29444919614180665,
+ "learning_rate": 3.9090998490982116e-07,
+ "loss": 0.1077,
+ "step": 1923
+ },
+ {
+ "epoch": 14.466165413533835,
+ "grad_norm": 0.28091516040670966,
+ "learning_rate": 3.8992485398947563e-07,
+ "loss": 0.0989,
+ "step": 1924
+ },
+ {
+ "epoch": 14.473684210526315,
+ "grad_norm": 0.2653773899616918,
+ "learning_rate": 3.8894066523828706e-07,
+ "loss": 0.104,
+ "step": 1925
+ },
+ {
+ "epoch": 14.481203007518797,
+ "grad_norm": 0.27427587959441646,
+ "learning_rate": 3.879574201761858e-07,
+ "loss": 0.0989,
+ "step": 1926
+ },
+ {
+ "epoch": 14.488721804511279,
+ "grad_norm": 0.26805172383889997,
+ "learning_rate": 3.869751203216468e-07,
+ "loss": 0.093,
+ "step": 1927
+ },
+ {
+ "epoch": 14.496240601503759,
+ "grad_norm": 0.28070387935118224,
+ "learning_rate": 3.8599376719168317e-07,
+ "loss": 0.1039,
+ "step": 1928
+ },
+ {
+ "epoch": 14.503759398496241,
+ "grad_norm": 0.2770213618054806,
+ "learning_rate": 3.8501336230184786e-07,
+ "loss": 0.0927,
+ "step": 1929
+ },
+ {
+ "epoch": 14.511278195488721,
+ "grad_norm": 0.29597652278859105,
+ "learning_rate": 3.8403390716622785e-07,
+ "loss": 0.1022,
+ "step": 1930
+ },
+ {
+ "epoch": 14.518796992481203,
+ "grad_norm": 0.26274966348575374,
+ "learning_rate": 3.8305540329744456e-07,
+ "loss": 0.0934,
+ "step": 1931
+ },
+ {
+ "epoch": 14.526315789473685,
+ "grad_norm": 0.5346051124147657,
+ "learning_rate": 3.8207785220664934e-07,
+ "loss": 0.117,
+ "step": 1932
+ },
+ {
+ "epoch": 14.533834586466165,
+ "grad_norm": 0.2707728027209683,
+ "learning_rate": 3.811012554035231e-07,
+ "loss": 0.0962,
+ "step": 1933
+ },
+ {
+ "epoch": 14.541353383458647,
+ "grad_norm": 0.27719619278678015,
+ "learning_rate": 3.801256143962719e-07,
+ "loss": 0.1064,
+ "step": 1934
+ },
+ {
+ "epoch": 14.548872180451127,
+ "grad_norm": 0.4493555888976681,
+ "learning_rate": 3.7915093069162685e-07,
+ "loss": 0.0975,
+ "step": 1935
+ },
+ {
+ "epoch": 14.556390977443609,
+ "grad_norm": 0.2676120839050664,
+ "learning_rate": 3.7817720579483956e-07,
+ "loss": 0.0954,
+ "step": 1936
+ },
+ {
+ "epoch": 14.563909774436091,
+ "grad_norm": 0.27839776427106766,
+ "learning_rate": 3.772044412096821e-07,
+ "loss": 0.0948,
+ "step": 1937
+ },
+ {
+ "epoch": 14.571428571428571,
+ "grad_norm": 0.25667322842469137,
+ "learning_rate": 3.762326384384421e-07,
+ "loss": 0.0935,
+ "step": 1938
+ },
+ {
+ "epoch": 14.578947368421053,
+ "grad_norm": 0.27640370592929936,
+ "learning_rate": 3.752617989819232e-07,
+ "loss": 0.105,
+ "step": 1939
+ },
+ {
+ "epoch": 14.586466165413533,
+ "grad_norm": 0.581606271951674,
+ "learning_rate": 3.7429192433944013e-07,
+ "loss": 0.1,
+ "step": 1940
+ },
+ {
+ "epoch": 14.593984962406015,
+ "grad_norm": 0.2678696287480729,
+ "learning_rate": 3.7332301600881866e-07,
+ "loss": 0.1089,
+ "step": 1941
+ },
+ {
+ "epoch": 14.601503759398497,
+ "grad_norm": 0.277177174125625,
+ "learning_rate": 3.723550754863912e-07,
+ "loss": 0.093,
+ "step": 1942
+ },
+ {
+ "epoch": 14.609022556390977,
+ "grad_norm": 0.2794245562874899,
+ "learning_rate": 3.7138810426699675e-07,
+ "loss": 0.095,
+ "step": 1943
+ },
+ {
+ "epoch": 14.61654135338346,
+ "grad_norm": 0.2816047017715223,
+ "learning_rate": 3.7042210384397586e-07,
+ "loss": 0.0967,
+ "step": 1944
+ },
+ {
+ "epoch": 14.62406015037594,
+ "grad_norm": 0.2962558872955175,
+ "learning_rate": 3.694570757091715e-07,
+ "loss": 0.0958,
+ "step": 1945
+ },
+ {
+ "epoch": 14.631578947368421,
+ "grad_norm": 0.27744910550577906,
+ "learning_rate": 3.6849302135292346e-07,
+ "loss": 0.0938,
+ "step": 1946
+ },
+ {
+ "epoch": 14.639097744360903,
+ "grad_norm": 0.3533146980576209,
+ "learning_rate": 3.675299422640693e-07,
+ "loss": 0.0992,
+ "step": 1947
+ },
+ {
+ "epoch": 14.646616541353383,
+ "grad_norm": 0.2716122707054781,
+ "learning_rate": 3.6656783992993876e-07,
+ "loss": 0.1092,
+ "step": 1948
+ },
+ {
+ "epoch": 14.654135338345865,
+ "grad_norm": 0.28993525741924153,
+ "learning_rate": 3.656067158363546e-07,
+ "loss": 0.1009,
+ "step": 1949
+ },
+ {
+ "epoch": 14.661654135338345,
+ "grad_norm": 0.27781440464402096,
+ "learning_rate": 3.64646571467628e-07,
+ "loss": 0.1032,
+ "step": 1950
+ },
+ {
+ "epoch": 14.669172932330827,
+ "grad_norm": 0.41107032531341664,
+ "learning_rate": 3.6368740830655686e-07,
+ "loss": 0.1025,
+ "step": 1951
+ },
+ {
+ "epoch": 14.676691729323307,
+ "grad_norm": 0.26861181550851154,
+ "learning_rate": 3.6272922783442494e-07,
+ "loss": 0.0954,
+ "step": 1952
+ },
+ {
+ "epoch": 14.68421052631579,
+ "grad_norm": 0.35870150930457406,
+ "learning_rate": 3.617720315309968e-07,
+ "loss": 0.1024,
+ "step": 1953
+ },
+ {
+ "epoch": 14.691729323308271,
+ "grad_norm": 0.31112074052747246,
+ "learning_rate": 3.608158208745187e-07,
+ "loss": 0.0913,
+ "step": 1954
+ },
+ {
+ "epoch": 14.699248120300751,
+ "grad_norm": 0.26922996705696767,
+ "learning_rate": 3.5986059734171336e-07,
+ "loss": 0.0928,
+ "step": 1955
+ },
+ {
+ "epoch": 14.706766917293233,
+ "grad_norm": 0.2639238740360856,
+ "learning_rate": 3.5890636240778015e-07,
+ "loss": 0.1028,
+ "step": 1956
+ },
+ {
+ "epoch": 14.714285714285714,
+ "grad_norm": 0.30528255792617914,
+ "learning_rate": 3.579531175463906e-07,
+ "loss": 0.0952,
+ "step": 1957
+ },
+ {
+ "epoch": 14.721804511278195,
+ "grad_norm": 0.3846668675938827,
+ "learning_rate": 3.5700086422968843e-07,
+ "loss": 0.1001,
+ "step": 1958
+ },
+ {
+ "epoch": 14.729323308270677,
+ "grad_norm": 0.33426809135385077,
+ "learning_rate": 3.5604960392828475e-07,
+ "loss": 0.1065,
+ "step": 1959
+ },
+ {
+ "epoch": 14.736842105263158,
+ "grad_norm": 0.30597735943065557,
+ "learning_rate": 3.550993381112585e-07,
+ "loss": 0.103,
+ "step": 1960
+ },
+ {
+ "epoch": 14.74436090225564,
+ "grad_norm": 0.28485143259308837,
+ "learning_rate": 3.5415006824615133e-07,
+ "loss": 0.0919,
+ "step": 1961
+ },
+ {
+ "epoch": 14.75187969924812,
+ "grad_norm": 0.31121558521827825,
+ "learning_rate": 3.5320179579896834e-07,
+ "loss": 0.1066,
+ "step": 1962
+ },
+ {
+ "epoch": 14.759398496240602,
+ "grad_norm": 0.29786753048811254,
+ "learning_rate": 3.522545222341726e-07,
+ "loss": 0.1045,
+ "step": 1963
+ },
+ {
+ "epoch": 14.766917293233083,
+ "grad_norm": 0.286589665075983,
+ "learning_rate": 3.513082490146864e-07,
+ "loss": 0.1024,
+ "step": 1964
+ },
+ {
+ "epoch": 14.774436090225564,
+ "grad_norm": 0.2913863852762978,
+ "learning_rate": 3.5036297760188517e-07,
+ "loss": 0.0929,
+ "step": 1965
+ },
+ {
+ "epoch": 14.781954887218046,
+ "grad_norm": 0.28187505351561654,
+ "learning_rate": 3.4941870945559905e-07,
+ "loss": 0.1048,
+ "step": 1966
+ },
+ {
+ "epoch": 14.789473684210526,
+ "grad_norm": 0.2854506203825778,
+ "learning_rate": 3.4847544603410727e-07,
+ "loss": 0.1014,
+ "step": 1967
+ },
+ {
+ "epoch": 14.796992481203008,
+ "grad_norm": 0.3614169350633697,
+ "learning_rate": 3.475331887941387e-07,
+ "loss": 0.1079,
+ "step": 1968
+ },
+ {
+ "epoch": 14.80451127819549,
+ "grad_norm": 0.2819938946852024,
+ "learning_rate": 3.4659193919086715e-07,
+ "loss": 0.095,
+ "step": 1969
+ },
+ {
+ "epoch": 14.81203007518797,
+ "grad_norm": 0.5594999815144529,
+ "learning_rate": 3.4565169867791143e-07,
+ "loss": 0.0999,
+ "step": 1970
+ },
+ {
+ "epoch": 14.819548872180452,
+ "grad_norm": 0.4709483164193779,
+ "learning_rate": 3.447124687073306e-07,
+ "loss": 0.0936,
+ "step": 1971
+ },
+ {
+ "epoch": 14.827067669172932,
+ "grad_norm": 0.28134530188517304,
+ "learning_rate": 3.437742507296246e-07,
+ "loss": 0.0993,
+ "step": 1972
+ },
+ {
+ "epoch": 14.834586466165414,
+ "grad_norm": 0.2904354516800576,
+ "learning_rate": 3.428370461937291e-07,
+ "loss": 0.1045,
+ "step": 1973
+ },
+ {
+ "epoch": 14.842105263157894,
+ "grad_norm": 0.3231425222606742,
+ "learning_rate": 3.4190085654701604e-07,
+ "loss": 0.1001,
+ "step": 1974
+ },
+ {
+ "epoch": 14.849624060150376,
+ "grad_norm": 0.2729990046621249,
+ "learning_rate": 3.409656832352885e-07,
+ "loss": 0.1037,
+ "step": 1975
+ },
+ {
+ "epoch": 14.857142857142858,
+ "grad_norm": 0.2823431096775265,
+ "learning_rate": 3.400315277027812e-07,
+ "loss": 0.0886,
+ "step": 1976
+ },
+ {
+ "epoch": 14.864661654135338,
+ "grad_norm": 0.27152881777588644,
+ "learning_rate": 3.3909839139215704e-07,
+ "loss": 0.0964,
+ "step": 1977
+ },
+ {
+ "epoch": 14.87218045112782,
+ "grad_norm": 0.2725934994376035,
+ "learning_rate": 3.3816627574450364e-07,
+ "loss": 0.1074,
+ "step": 1978
+ },
+ {
+ "epoch": 14.8796992481203,
+ "grad_norm": 0.26819974784916495,
+ "learning_rate": 3.3723518219933387e-07,
+ "loss": 0.1033,
+ "step": 1979
+ },
+ {
+ "epoch": 14.887218045112782,
+ "grad_norm": 0.584859360315101,
+ "learning_rate": 3.363051121945809e-07,
+ "loss": 0.0938,
+ "step": 1980
+ },
+ {
+ "epoch": 14.894736842105264,
+ "grad_norm": 0.2821885537624927,
+ "learning_rate": 3.3537606716659836e-07,
+ "loss": 0.1022,
+ "step": 1981
+ },
+ {
+ "epoch": 14.902255639097744,
+ "grad_norm": 0.2852182896063876,
+ "learning_rate": 3.3444804855015573e-07,
+ "loss": 0.1039,
+ "step": 1982
+ },
+ {
+ "epoch": 14.909774436090226,
+ "grad_norm": 0.3003027438452993,
+ "learning_rate": 3.3352105777843853e-07,
+ "loss": 0.1039,
+ "step": 1983
+ },
+ {
+ "epoch": 14.917293233082706,
+ "grad_norm": 0.3192808576421222,
+ "learning_rate": 3.3259509628304363e-07,
+ "loss": 0.102,
+ "step": 1984
+ },
+ {
+ "epoch": 14.924812030075188,
+ "grad_norm": 0.30230936884955006,
+ "learning_rate": 3.3167016549397984e-07,
+ "loss": 0.0951,
+ "step": 1985
+ },
+ {
+ "epoch": 14.93233082706767,
+ "grad_norm": 0.41342932410766403,
+ "learning_rate": 3.307462668396628e-07,
+ "loss": 0.0983,
+ "step": 1986
+ },
+ {
+ "epoch": 14.93984962406015,
+ "grad_norm": 0.30141586275874444,
+ "learning_rate": 3.298234017469154e-07,
+ "loss": 0.097,
+ "step": 1987
+ },
+ {
+ "epoch": 14.947368421052632,
+ "grad_norm": 0.27828571111927036,
+ "learning_rate": 3.289015716409631e-07,
+ "loss": 0.095,
+ "step": 1988
+ },
+ {
+ "epoch": 14.954887218045112,
+ "grad_norm": 0.26772178272140995,
+ "learning_rate": 3.279807779454342e-07,
+ "loss": 0.1052,
+ "step": 1989
+ },
+ {
+ "epoch": 14.962406015037594,
+ "grad_norm": 0.28522850785031534,
+ "learning_rate": 3.270610220823553e-07,
+ "loss": 0.095,
+ "step": 1990
+ },
+ {
+ "epoch": 14.969924812030076,
+ "grad_norm": 0.28067092500092355,
+ "learning_rate": 3.261423054721515e-07,
+ "loss": 0.0983,
+ "step": 1991
+ },
+ {
+ "epoch": 14.977443609022556,
+ "grad_norm": 0.2796765827257035,
+ "learning_rate": 3.2522462953364125e-07,
+ "loss": 0.0984,
+ "step": 1992
+ },
+ {
+ "epoch": 14.984962406015038,
+ "grad_norm": 0.2822802625856575,
+ "learning_rate": 3.2430799568403776e-07,
+ "loss": 0.0974,
+ "step": 1993
+ },
+ {
+ "epoch": 14.992481203007518,
+ "grad_norm": 0.26996070486014134,
+ "learning_rate": 3.233924053389432e-07,
+ "loss": 0.0993,
+ "step": 1994
+ },
+ {
+ "epoch": 15.0,
+ "grad_norm": 0.27802018973231796,
+ "learning_rate": 3.2247785991234943e-07,
+ "loss": 0.1014,
+ "step": 1995
+ },
+ {
+ "epoch": 15.0,
+ "eval_loss": 0.3364607095718384,
+ "eval_runtime": 36.6008,
+ "eval_samples_per_second": 12.213,
+ "eval_steps_per_second": 0.191,
+ "step": 1995
+ },
+ {
+ "epoch": 15.007518796992482,
+ "grad_norm": 0.2963820360648287,
+ "learning_rate": 3.2156436081663353e-07,
+ "loss": 0.0978,
+ "step": 1996
+ },
+ {
+ "epoch": 15.015037593984962,
+ "grad_norm": 0.29860450516504305,
+ "learning_rate": 3.206519094625578e-07,
+ "loss": 0.0883,
+ "step": 1997
+ },
+ {
+ "epoch": 15.022556390977444,
+ "grad_norm": 0.28390152931519125,
+ "learning_rate": 3.1974050725926547e-07,
+ "loss": 0.0899,
+ "step": 1998
+ },
+ {
+ "epoch": 15.030075187969924,
+ "grad_norm": 0.3029696893515749,
+ "learning_rate": 3.188301556142805e-07,
+ "loss": 0.1017,
+ "step": 1999
+ },
+ {
+ "epoch": 15.037593984962406,
+ "grad_norm": 0.2750169648226588,
+ "learning_rate": 3.1792085593350306e-07,
+ "loss": 0.0858,
+ "step": 2000
+ },
+ {
+ "epoch": 15.045112781954888,
+ "grad_norm": 0.45113588947441696,
+ "learning_rate": 3.1701260962121036e-07,
+ "loss": 0.0913,
+ "step": 2001
+ },
+ {
+ "epoch": 15.052631578947368,
+ "grad_norm": 0.2852956588486362,
+ "learning_rate": 3.1610541808005154e-07,
+ "loss": 0.0912,
+ "step": 2002
+ },
+ {
+ "epoch": 15.06015037593985,
+ "grad_norm": 0.26772097890087637,
+ "learning_rate": 3.15199282711047e-07,
+ "loss": 0.0902,
+ "step": 2003
+ },
+ {
+ "epoch": 15.06766917293233,
+ "grad_norm": 0.28677116929664204,
+ "learning_rate": 3.142942049135869e-07,
+ "loss": 0.0956,
+ "step": 2004
+ },
+ {
+ "epoch": 15.075187969924812,
+ "grad_norm": 0.28650398679335026,
+ "learning_rate": 3.133901860854271e-07,
+ "loss": 0.1026,
+ "step": 2005
+ },
+ {
+ "epoch": 15.082706766917294,
+ "grad_norm": 0.27461992621876924,
+ "learning_rate": 3.12487227622689e-07,
+ "loss": 0.0978,
+ "step": 2006
+ },
+ {
+ "epoch": 15.090225563909774,
+ "grad_norm": 0.261821694551324,
+ "learning_rate": 3.115853309198552e-07,
+ "loss": 0.0916,
+ "step": 2007
+ },
+ {
+ "epoch": 15.097744360902256,
+ "grad_norm": 0.30138787708448733,
+ "learning_rate": 3.106844973697701e-07,
+ "loss": 0.1007,
+ "step": 2008
+ },
+ {
+ "epoch": 15.105263157894736,
+ "grad_norm": 0.280571603080205,
+ "learning_rate": 3.0978472836363454e-07,
+ "loss": 0.0975,
+ "step": 2009
+ },
+ {
+ "epoch": 15.112781954887218,
+ "grad_norm": 0.28289401069254916,
+ "learning_rate": 3.0888602529100705e-07,
+ "loss": 0.0857,
+ "step": 2010
+ },
+ {
+ "epoch": 15.1203007518797,
+ "grad_norm": 0.2749664039115127,
+ "learning_rate": 3.0798838953979845e-07,
+ "loss": 0.0914,
+ "step": 2011
+ },
+ {
+ "epoch": 15.12781954887218,
+ "grad_norm": 0.2672229728991579,
+ "learning_rate": 3.070918224962725e-07,
+ "loss": 0.0897,
+ "step": 2012
+ },
+ {
+ "epoch": 15.135338345864662,
+ "grad_norm": 0.34906303846835846,
+ "learning_rate": 3.061963255450415e-07,
+ "loss": 0.0953,
+ "step": 2013
+ },
+ {
+ "epoch": 15.142857142857142,
+ "grad_norm": 0.3566066728287742,
+ "learning_rate": 3.0530190006906587e-07,
+ "loss": 0.0989,
+ "step": 2014
+ },
+ {
+ "epoch": 15.150375939849624,
+ "grad_norm": 0.3032963311002434,
+ "learning_rate": 3.044085474496507e-07,
+ "loss": 0.0955,
+ "step": 2015
+ },
+ {
+ "epoch": 15.157894736842104,
+ "grad_norm": 0.28030365867677376,
+ "learning_rate": 3.03516269066445e-07,
+ "loss": 0.0969,
+ "step": 2016
+ },
+ {
+ "epoch": 15.165413533834586,
+ "grad_norm": 0.3740557209157965,
+ "learning_rate": 3.026250662974377e-07,
+ "loss": 0.0968,
+ "step": 2017
+ },
+ {
+ "epoch": 15.172932330827068,
+ "grad_norm": 0.3702316175855913,
+ "learning_rate": 3.017349405189579e-07,
+ "loss": 0.0884,
+ "step": 2018
+ },
+ {
+ "epoch": 15.180451127819548,
+ "grad_norm": 0.3328481034316482,
+ "learning_rate": 3.008458931056701e-07,
+ "loss": 0.0917,
+ "step": 2019
+ },
+ {
+ "epoch": 15.18796992481203,
+ "grad_norm": 0.42195492848987975,
+ "learning_rate": 2.9995792543057473e-07,
+ "loss": 0.096,
+ "step": 2020
+ },
+ {
+ "epoch": 15.19548872180451,
+ "grad_norm": 0.2701154551826032,
+ "learning_rate": 2.990710388650034e-07,
+ "loss": 0.0963,
+ "step": 2021
+ },
+ {
+ "epoch": 15.203007518796992,
+ "grad_norm": 0.297597128027944,
+ "learning_rate": 2.9818523477861955e-07,
+ "loss": 0.0931,
+ "step": 2022
+ },
+ {
+ "epoch": 15.210526315789474,
+ "grad_norm": 0.275446712249712,
+ "learning_rate": 2.973005145394135e-07,
+ "loss": 0.0888,
+ "step": 2023
+ },
+ {
+ "epoch": 15.218045112781954,
+ "grad_norm": 0.2823236433123351,
+ "learning_rate": 2.96416879513703e-07,
+ "loss": 0.0879,
+ "step": 2024
+ },
+ {
+ "epoch": 15.225563909774436,
+ "grad_norm": 0.27309868434433304,
+ "learning_rate": 2.955343310661286e-07,
+ "loss": 0.0993,
+ "step": 2025
+ },
+ {
+ "epoch": 15.233082706766917,
+ "grad_norm": 0.26630505672374905,
+ "learning_rate": 2.9465287055965393e-07,
+ "loss": 0.087,
+ "step": 2026
+ },
+ {
+ "epoch": 15.240601503759398,
+ "grad_norm": 0.2668443125206894,
+ "learning_rate": 2.937724993555615e-07,
+ "loss": 0.0901,
+ "step": 2027
+ },
+ {
+ "epoch": 15.24812030075188,
+ "grad_norm": 0.27468996254179956,
+ "learning_rate": 2.9289321881345254e-07,
+ "loss": 0.1032,
+ "step": 2028
+ },
+ {
+ "epoch": 15.25563909774436,
+ "grad_norm": 0.29410621118357705,
+ "learning_rate": 2.920150302912431e-07,
+ "loss": 0.091,
+ "step": 2029
+ },
+ {
+ "epoch": 15.263157894736842,
+ "grad_norm": 0.27515411076816515,
+ "learning_rate": 2.9113793514516273e-07,
+ "loss": 0.0895,
+ "step": 2030
+ },
+ {
+ "epoch": 15.270676691729323,
+ "grad_norm": 0.28395469610431046,
+ "learning_rate": 2.9026193472975333e-07,
+ "loss": 0.1037,
+ "step": 2031
+ },
+ {
+ "epoch": 15.278195488721805,
+ "grad_norm": 0.2704164647432797,
+ "learning_rate": 2.8938703039786503e-07,
+ "loss": 0.0953,
+ "step": 2032
+ },
+ {
+ "epoch": 15.285714285714286,
+ "grad_norm": 0.27982780443551386,
+ "learning_rate": 2.885132235006564e-07,
+ "loss": 0.0884,
+ "step": 2033
+ },
+ {
+ "epoch": 15.293233082706767,
+ "grad_norm": 0.2666312366641222,
+ "learning_rate": 2.876405153875898e-07,
+ "loss": 0.0933,
+ "step": 2034
+ },
+ {
+ "epoch": 15.300751879699249,
+ "grad_norm": 0.2729542520865784,
+ "learning_rate": 2.867689074064323e-07,
+ "loss": 0.0911,
+ "step": 2035
+ },
+ {
+ "epoch": 15.308270676691729,
+ "grad_norm": 0.3026471066115743,
+ "learning_rate": 2.858984009032502e-07,
+ "loss": 0.0958,
+ "step": 2036
+ },
+ {
+ "epoch": 15.31578947368421,
+ "grad_norm": 0.40142841222202885,
+ "learning_rate": 2.850289972224106e-07,
+ "loss": 0.0926,
+ "step": 2037
+ },
+ {
+ "epoch": 15.323308270676693,
+ "grad_norm": 0.2910091246354572,
+ "learning_rate": 2.841606977065757e-07,
+ "loss": 0.0896,
+ "step": 2038
+ },
+ {
+ "epoch": 15.330827067669173,
+ "grad_norm": 0.5028517501767987,
+ "learning_rate": 2.832935036967038e-07,
+ "loss": 0.0973,
+ "step": 2039
+ },
+ {
+ "epoch": 15.338345864661655,
+ "grad_norm": 0.2736616390968422,
+ "learning_rate": 2.82427416532045e-07,
+ "loss": 0.1061,
+ "step": 2040
+ },
+ {
+ "epoch": 15.345864661654135,
+ "grad_norm": 0.27591164352390873,
+ "learning_rate": 2.815624375501411e-07,
+ "loss": 0.0882,
+ "step": 2041
+ },
+ {
+ "epoch": 15.353383458646617,
+ "grad_norm": 0.2525002660473969,
+ "learning_rate": 2.806985680868209e-07,
+ "loss": 0.0911,
+ "step": 2042
+ },
+ {
+ "epoch": 15.360902255639097,
+ "grad_norm": 0.2691008520899465,
+ "learning_rate": 2.7983580947620165e-07,
+ "loss": 0.0982,
+ "step": 2043
+ },
+ {
+ "epoch": 15.368421052631579,
+ "grad_norm": 0.26401667071768514,
+ "learning_rate": 2.789741630506832e-07,
+ "loss": 0.0917,
+ "step": 2044
+ },
+ {
+ "epoch": 15.37593984962406,
+ "grad_norm": 0.2683674704017958,
+ "learning_rate": 2.781136301409492e-07,
+ "loss": 0.0872,
+ "step": 2045
+ },
+ {
+ "epoch": 15.38345864661654,
+ "grad_norm": 0.294946397509896,
+ "learning_rate": 2.7725421207596277e-07,
+ "loss": 0.1011,
+ "step": 2046
+ },
+ {
+ "epoch": 15.390977443609023,
+ "grad_norm": 0.2849050761978978,
+ "learning_rate": 2.7639591018296605e-07,
+ "loss": 0.0911,
+ "step": 2047
+ },
+ {
+ "epoch": 15.398496240601503,
+ "grad_norm": 0.2757688843637799,
+ "learning_rate": 2.755387257874764e-07,
+ "loss": 0.0988,
+ "step": 2048
+ },
+ {
+ "epoch": 15.406015037593985,
+ "grad_norm": 0.3769974378140889,
+ "learning_rate": 2.746826602132867e-07,
+ "loss": 0.0907,
+ "step": 2049
+ },
+ {
+ "epoch": 15.413533834586467,
+ "grad_norm": 0.33467958757877425,
+ "learning_rate": 2.738277147824605e-07,
+ "loss": 0.0912,
+ "step": 2050
+ },
+ {
+ "epoch": 15.421052631578947,
+ "grad_norm": 0.7084684176009229,
+ "learning_rate": 2.7297389081533297e-07,
+ "loss": 0.0856,
+ "step": 2051
+ },
+ {
+ "epoch": 15.428571428571429,
+ "grad_norm": 0.2933003064735501,
+ "learning_rate": 2.721211896305059e-07,
+ "loss": 0.0955,
+ "step": 2052
+ },
+ {
+ "epoch": 15.436090225563909,
+ "grad_norm": 0.29490306184676346,
+ "learning_rate": 2.712696125448485e-07,
+ "loss": 0.0952,
+ "step": 2053
+ },
+ {
+ "epoch": 15.443609022556391,
+ "grad_norm": 0.292770062054467,
+ "learning_rate": 2.704191608734926e-07,
+ "loss": 0.0985,
+ "step": 2054
+ },
+ {
+ "epoch": 15.451127819548873,
+ "grad_norm": 0.2643046161885617,
+ "learning_rate": 2.695698359298334e-07,
+ "loss": 0.092,
+ "step": 2055
+ },
+ {
+ "epoch": 15.458646616541353,
+ "grad_norm": 0.30375904080453375,
+ "learning_rate": 2.687216390255249e-07,
+ "loss": 0.0943,
+ "step": 2056
+ },
+ {
+ "epoch": 15.466165413533835,
+ "grad_norm": 0.2836392791723381,
+ "learning_rate": 2.678745714704792e-07,
+ "loss": 0.0931,
+ "step": 2057
+ },
+ {
+ "epoch": 15.473684210526315,
+ "grad_norm": 0.29594931876406905,
+ "learning_rate": 2.6702863457286516e-07,
+ "loss": 0.0861,
+ "step": 2058
+ },
+ {
+ "epoch": 15.481203007518797,
+ "grad_norm": 0.26909120271396464,
+ "learning_rate": 2.6618382963910424e-07,
+ "loss": 0.0947,
+ "step": 2059
+ },
+ {
+ "epoch": 15.488721804511279,
+ "grad_norm": 0.27217018714215374,
+ "learning_rate": 2.65340157973871e-07,
+ "loss": 0.0939,
+ "step": 2060
+ },
+ {
+ "epoch": 15.496240601503759,
+ "grad_norm": 0.2733183738010883,
+ "learning_rate": 2.6449762088008863e-07,
+ "loss": 0.0914,
+ "step": 2061
+ },
+ {
+ "epoch": 15.503759398496241,
+ "grad_norm": 0.2712841716549417,
+ "learning_rate": 2.636562196589294e-07,
+ "loss": 0.0938,
+ "step": 2062
+ },
+ {
+ "epoch": 15.511278195488721,
+ "grad_norm": 0.33025382416477994,
+ "learning_rate": 2.6281595560981005e-07,
+ "loss": 0.0869,
+ "step": 2063
+ },
+ {
+ "epoch": 15.518796992481203,
+ "grad_norm": 0.391574956403183,
+ "learning_rate": 2.619768300303925e-07,
+ "loss": 0.0873,
+ "step": 2064
+ },
+ {
+ "epoch": 15.526315789473685,
+ "grad_norm": 0.27808668590534513,
+ "learning_rate": 2.611388442165791e-07,
+ "loss": 0.1035,
+ "step": 2065
+ },
+ {
+ "epoch": 15.533834586466165,
+ "grad_norm": 0.2709115786081242,
+ "learning_rate": 2.603019994625133e-07,
+ "loss": 0.0936,
+ "step": 2066
+ },
+ {
+ "epoch": 15.541353383458647,
+ "grad_norm": 0.3258351359510103,
+ "learning_rate": 2.5946629706057534e-07,
+ "loss": 0.0908,
+ "step": 2067
+ },
+ {
+ "epoch": 15.548872180451127,
+ "grad_norm": 0.2756004892425253,
+ "learning_rate": 2.586317383013821e-07,
+ "loss": 0.0927,
+ "step": 2068
+ },
+ {
+ "epoch": 15.556390977443609,
+ "grad_norm": 0.27325249373112914,
+ "learning_rate": 2.577983244737832e-07,
+ "loss": 0.0897,
+ "step": 2069
+ },
+ {
+ "epoch": 15.563909774436091,
+ "grad_norm": 0.26949770198079415,
+ "learning_rate": 2.569660568648616e-07,
+ "loss": 0.0932,
+ "step": 2070
+ },
+ {
+ "epoch": 15.571428571428571,
+ "grad_norm": 0.3073401198503267,
+ "learning_rate": 2.561349367599285e-07,
+ "loss": 0.0948,
+ "step": 2071
+ },
+ {
+ "epoch": 15.578947368421053,
+ "grad_norm": 0.287558765147464,
+ "learning_rate": 2.5530496544252424e-07,
+ "loss": 0.1002,
+ "step": 2072
+ },
+ {
+ "epoch": 15.586466165413533,
+ "grad_norm": 0.28776910084525603,
+ "learning_rate": 2.544761441944139e-07,
+ "loss": 0.0946,
+ "step": 2073
+ },
+ {
+ "epoch": 15.593984962406015,
+ "grad_norm": 0.27045058499222957,
+ "learning_rate": 2.536484742955878e-07,
+ "loss": 0.094,
+ "step": 2074
+ },
+ {
+ "epoch": 15.601503759398497,
+ "grad_norm": 0.2699937252574269,
+ "learning_rate": 2.5282195702425655e-07,
+ "loss": 0.091,
+ "step": 2075
+ },
+ {
+ "epoch": 15.609022556390977,
+ "grad_norm": 0.292503539078292,
+ "learning_rate": 2.5199659365685235e-07,
+ "loss": 0.0946,
+ "step": 2076
+ },
+ {
+ "epoch": 15.61654135338346,
+ "grad_norm": 0.27521415215863776,
+ "learning_rate": 2.511723854680239e-07,
+ "loss": 0.0867,
+ "step": 2077
+ },
+ {
+ "epoch": 15.62406015037594,
+ "grad_norm": 0.2832419525849557,
+ "learning_rate": 2.5034933373063726e-07,
+ "loss": 0.1001,
+ "step": 2078
+ },
+ {
+ "epoch": 15.631578947368421,
+ "grad_norm": 0.29108476524046384,
+ "learning_rate": 2.495274397157713e-07,
+ "loss": 0.1053,
+ "step": 2079
+ },
+ {
+ "epoch": 15.639097744360903,
+ "grad_norm": 0.30621592171789713,
+ "learning_rate": 2.487067046927178e-07,
+ "loss": 0.0954,
+ "step": 2080
+ },
+ {
+ "epoch": 15.646616541353383,
+ "grad_norm": 0.4194653719630109,
+ "learning_rate": 2.478871299289781e-07,
+ "loss": 0.0888,
+ "step": 2081
+ },
+ {
+ "epoch": 15.654135338345865,
+ "grad_norm": 0.285125670887528,
+ "learning_rate": 2.470687166902622e-07,
+ "loss": 0.0995,
+ "step": 2082
+ },
+ {
+ "epoch": 15.661654135338345,
+ "grad_norm": 0.293816641055334,
+ "learning_rate": 2.462514662404862e-07,
+ "loss": 0.0952,
+ "step": 2083
+ },
+ {
+ "epoch": 15.669172932330827,
+ "grad_norm": 0.5075396776873216,
+ "learning_rate": 2.454353798417698e-07,
+ "loss": 0.0954,
+ "step": 2084
+ },
+ {
+ "epoch": 15.676691729323307,
+ "grad_norm": 0.2781833588078897,
+ "learning_rate": 2.4462045875443604e-07,
+ "loss": 0.0938,
+ "step": 2085
+ },
+ {
+ "epoch": 15.68421052631579,
+ "grad_norm": 0.3410659548519398,
+ "learning_rate": 2.438067042370072e-07,
+ "loss": 0.0967,
+ "step": 2086
+ },
+ {
+ "epoch": 15.691729323308271,
+ "grad_norm": 0.2763495808363708,
+ "learning_rate": 2.4299411754620526e-07,
+ "loss": 0.0955,
+ "step": 2087
+ },
+ {
+ "epoch": 15.699248120300751,
+ "grad_norm": 0.274498326499821,
+ "learning_rate": 2.421826999369473e-07,
+ "loss": 0.0928,
+ "step": 2088
+ },
+ {
+ "epoch": 15.706766917293233,
+ "grad_norm": 0.271363060901705,
+ "learning_rate": 2.4137245266234593e-07,
+ "loss": 0.1044,
+ "step": 2089
+ },
+ {
+ "epoch": 15.714285714285714,
+ "grad_norm": 0.28252693730821044,
+ "learning_rate": 2.4056337697370587e-07,
+ "loss": 0.09,
+ "step": 2090
+ },
+ {
+ "epoch": 15.721804511278195,
+ "grad_norm": 0.2924698589379258,
+ "learning_rate": 2.3975547412052275e-07,
+ "loss": 0.0963,
+ "step": 2091
+ },
+ {
+ "epoch": 15.729323308270677,
+ "grad_norm": 0.28789996809210017,
+ "learning_rate": 2.389487453504806e-07,
+ "loss": 0.0879,
+ "step": 2092
+ },
+ {
+ "epoch": 15.736842105263158,
+ "grad_norm": 0.30312620894872355,
+ "learning_rate": 2.3814319190945075e-07,
+ "loss": 0.1,
+ "step": 2093
+ },
+ {
+ "epoch": 15.74436090225564,
+ "grad_norm": 0.2881783947819823,
+ "learning_rate": 2.373388150414889e-07,
+ "loss": 0.0878,
+ "step": 2094
+ },
+ {
+ "epoch": 15.75187969924812,
+ "grad_norm": 0.27628860924536747,
+ "learning_rate": 2.365356159888342e-07,
+ "loss": 0.0921,
+ "step": 2095
+ },
+ {
+ "epoch": 15.759398496240602,
+ "grad_norm": 0.28006619053416054,
+ "learning_rate": 2.3573359599190613e-07,
+ "loss": 0.0992,
+ "step": 2096
+ },
+ {
+ "epoch": 15.766917293233083,
+ "grad_norm": 0.2768492637587325,
+ "learning_rate": 2.349327562893044e-07,
+ "loss": 0.1021,
+ "step": 2097
+ },
+ {
+ "epoch": 15.774436090225564,
+ "grad_norm": 0.28314599858231826,
+ "learning_rate": 2.3413309811780458e-07,
+ "loss": 0.0988,
+ "step": 2098
+ },
+ {
+ "epoch": 15.781954887218046,
+ "grad_norm": 0.2902818996077741,
+ "learning_rate": 2.3333462271235905e-07,
+ "loss": 0.0894,
+ "step": 2099
+ },
+ {
+ "epoch": 15.789473684210526,
+ "grad_norm": 0.27293526473342855,
+ "learning_rate": 2.3253733130609187e-07,
+ "loss": 0.0936,
+ "step": 2100
+ },
+ {
+ "epoch": 15.796992481203008,
+ "grad_norm": 0.30563431758124404,
+ "learning_rate": 2.3174122513030035e-07,
+ "loss": 0.0908,
+ "step": 2101
+ },
+ {
+ "epoch": 15.80451127819549,
+ "grad_norm": 0.28273056559017923,
+ "learning_rate": 2.3094630541444992e-07,
+ "loss": 0.1037,
+ "step": 2102
+ },
+ {
+ "epoch": 15.81203007518797,
+ "grad_norm": 0.2819683367873249,
+ "learning_rate": 2.301525733861749e-07,
+ "loss": 0.1016,
+ "step": 2103
+ },
+ {
+ "epoch": 15.819548872180452,
+ "grad_norm": 0.2827661242016228,
+ "learning_rate": 2.2936003027127415e-07,
+ "loss": 0.0997,
+ "step": 2104
+ },
+ {
+ "epoch": 15.827067669172932,
+ "grad_norm": 0.34121822150314096,
+ "learning_rate": 2.2856867729371178e-07,
+ "loss": 0.1041,
+ "step": 2105
+ },
+ {
+ "epoch": 15.834586466165414,
+ "grad_norm": 0.254358351764514,
+ "learning_rate": 2.2777851567561267e-07,
+ "loss": 0.0905,
+ "step": 2106
+ },
+ {
+ "epoch": 15.842105263157894,
+ "grad_norm": 0.29582080664972094,
+ "learning_rate": 2.26989546637263e-07,
+ "loss": 0.0974,
+ "step": 2107
+ },
+ {
+ "epoch": 15.849624060150376,
+ "grad_norm": 0.281536420501201,
+ "learning_rate": 2.2620177139710627e-07,
+ "loss": 0.1004,
+ "step": 2108
+ },
+ {
+ "epoch": 15.857142857142858,
+ "grad_norm": 0.3246128531098712,
+ "learning_rate": 2.2541519117174246e-07,
+ "loss": 0.0995,
+ "step": 2109
+ },
+ {
+ "epoch": 15.864661654135338,
+ "grad_norm": 0.3029206441582456,
+ "learning_rate": 2.246298071759266e-07,
+ "loss": 0.0955,
+ "step": 2110
+ },
+ {
+ "epoch": 15.87218045112782,
+ "grad_norm": 0.3778784755572476,
+ "learning_rate": 2.2384562062256562e-07,
+ "loss": 0.0957,
+ "step": 2111
+ },
+ {
+ "epoch": 15.8796992481203,
+ "grad_norm": 0.2810675248615856,
+ "learning_rate": 2.2306263272271787e-07,
+ "loss": 0.0858,
+ "step": 2112
+ },
+ {
+ "epoch": 15.887218045112782,
+ "grad_norm": 0.2653807371834335,
+ "learning_rate": 2.2228084468558984e-07,
+ "loss": 0.094,
+ "step": 2113
+ },
+ {
+ "epoch": 15.894736842105264,
+ "grad_norm": 0.2867886773685861,
+ "learning_rate": 2.2150025771853588e-07,
+ "loss": 0.0987,
+ "step": 2114
+ },
+ {
+ "epoch": 15.902255639097744,
+ "grad_norm": 0.30210457121545686,
+ "learning_rate": 2.2072087302705423e-07,
+ "loss": 0.1011,
+ "step": 2115
+ },
+ {
+ "epoch": 15.909774436090226,
+ "grad_norm": 0.27517781776253203,
+ "learning_rate": 2.1994269181478798e-07,
+ "loss": 0.0969,
+ "step": 2116
+ },
+ {
+ "epoch": 15.917293233082706,
+ "grad_norm": 0.28829459542423985,
+ "learning_rate": 2.1916571528352002e-07,
+ "loss": 0.1025,
+ "step": 2117
+ },
+ {
+ "epoch": 15.924812030075188,
+ "grad_norm": 0.34800304116880654,
+ "learning_rate": 2.1838994463317417e-07,
+ "loss": 0.0898,
+ "step": 2118
+ },
+ {
+ "epoch": 15.93233082706767,
+ "grad_norm": 0.301216838041005,
+ "learning_rate": 2.1761538106181076e-07,
+ "loss": 0.0969,
+ "step": 2119
+ },
+ {
+ "epoch": 15.93984962406015,
+ "grad_norm": 0.2860550921979477,
+ "learning_rate": 2.1684202576562717e-07,
+ "loss": 0.1051,
+ "step": 2120
+ },
+ {
+ "epoch": 15.947368421052632,
+ "grad_norm": 0.26177624962591733,
+ "learning_rate": 2.1606987993895353e-07,
+ "loss": 0.089,
+ "step": 2121
+ },
+ {
+ "epoch": 15.954887218045112,
+ "grad_norm": 0.27818023359952737,
+ "learning_rate": 2.1529894477425327e-07,
+ "loss": 0.1034,
+ "step": 2122
+ },
+ {
+ "epoch": 15.962406015037594,
+ "grad_norm": 0.2613043589910478,
+ "learning_rate": 2.1452922146211916e-07,
+ "loss": 0.0961,
+ "step": 2123
+ },
+ {
+ "epoch": 15.969924812030076,
+ "grad_norm": 0.3018060897048875,
+ "learning_rate": 2.1376071119127337e-07,
+ "loss": 0.0929,
+ "step": 2124
+ },
+ {
+ "epoch": 15.977443609022556,
+ "grad_norm": 0.3263978036451699,
+ "learning_rate": 2.1299341514856363e-07,
+ "loss": 0.102,
+ "step": 2125
+ },
+ {
+ "epoch": 15.984962406015038,
+ "grad_norm": 0.3155548670153672,
+ "learning_rate": 2.122273345189638e-07,
+ "loss": 0.0902,
+ "step": 2126
+ },
+ {
+ "epoch": 15.992481203007518,
+ "grad_norm": 0.27465911803932663,
+ "learning_rate": 2.1146247048556932e-07,
+ "loss": 0.0978,
+ "step": 2127
+ },
+ {
+ "epoch": 16.0,
+ "grad_norm": 0.29706909269131093,
+ "learning_rate": 2.1069882422959807e-07,
+ "loss": 0.0931,
+ "step": 2128
+ },
+ {
+ "epoch": 16.0,
+ "eval_loss": 0.34437084197998047,
+ "eval_runtime": 36.3329,
+ "eval_samples_per_second": 12.303,
+ "eval_steps_per_second": 0.193,
+ "step": 2128
+ },
+ {
+ "epoch": 16.007518796992482,
+ "grad_norm": 0.3117729285960986,
+ "learning_rate": 2.099363969303861e-07,
+ "loss": 0.0833,
+ "step": 2129
+ },
+ {
+ "epoch": 16.015037593984964,
+ "grad_norm": 0.2798701311984748,
+ "learning_rate": 2.0917518976538807e-07,
+ "loss": 0.09,
+ "step": 2130
+ },
+ {
+ "epoch": 16.022556390977442,
+ "grad_norm": 0.2633684942511428,
+ "learning_rate": 2.084152039101732e-07,
+ "loss": 0.0927,
+ "step": 2131
+ },
+ {
+ "epoch": 16.030075187969924,
+ "grad_norm": 0.3229213216861476,
+ "learning_rate": 2.0765644053842578e-07,
+ "loss": 0.094,
+ "step": 2132
+ },
+ {
+ "epoch": 16.037593984962406,
+ "grad_norm": 0.2832738708710883,
+ "learning_rate": 2.0689890082194083e-07,
+ "loss": 0.0852,
+ "step": 2133
+ },
+ {
+ "epoch": 16.045112781954888,
+ "grad_norm": 0.28004542968703366,
+ "learning_rate": 2.0614258593062493e-07,
+ "loss": 0.097,
+ "step": 2134
+ },
+ {
+ "epoch": 16.05263157894737,
+ "grad_norm": 0.28725200081889946,
+ "learning_rate": 2.0538749703249236e-07,
+ "loss": 0.0796,
+ "step": 2135
+ },
+ {
+ "epoch": 16.06015037593985,
+ "grad_norm": 0.2627396958748146,
+ "learning_rate": 2.0463363529366373e-07,
+ "loss": 0.0903,
+ "step": 2136
+ },
+ {
+ "epoch": 16.06766917293233,
+ "grad_norm": 0.2531850595110087,
+ "learning_rate": 2.0388100187836554e-07,
+ "loss": 0.0799,
+ "step": 2137
+ },
+ {
+ "epoch": 16.075187969924812,
+ "grad_norm": 0.2691040968142747,
+ "learning_rate": 2.0312959794892615e-07,
+ "loss": 0.0861,
+ "step": 2138
+ },
+ {
+ "epoch": 16.082706766917294,
+ "grad_norm": 0.2875744922707444,
+ "learning_rate": 2.0237942466577617e-07,
+ "loss": 0.0828,
+ "step": 2139
+ },
+ {
+ "epoch": 16.090225563909776,
+ "grad_norm": 0.28531128023612884,
+ "learning_rate": 2.0163048318744492e-07,
+ "loss": 0.0878,
+ "step": 2140
+ },
+ {
+ "epoch": 16.097744360902254,
+ "grad_norm": 0.26331310193880464,
+ "learning_rate": 2.0088277467056013e-07,
+ "loss": 0.0905,
+ "step": 2141
+ },
+ {
+ "epoch": 16.105263157894736,
+ "grad_norm": 0.27854298100243663,
+ "learning_rate": 2.001363002698443e-07,
+ "loss": 0.0863,
+ "step": 2142
+ },
+ {
+ "epoch": 16.112781954887218,
+ "grad_norm": 0.3224570834104557,
+ "learning_rate": 1.9939106113811544e-07,
+ "loss": 0.0913,
+ "step": 2143
+ },
+ {
+ "epoch": 16.1203007518797,
+ "grad_norm": 0.30626034685818176,
+ "learning_rate": 1.9864705842628237e-07,
+ "loss": 0.0987,
+ "step": 2144
+ },
+ {
+ "epoch": 16.127819548872182,
+ "grad_norm": 0.3028458046151967,
+ "learning_rate": 1.9790429328334592e-07,
+ "loss": 0.1012,
+ "step": 2145
+ },
+ {
+ "epoch": 16.13533834586466,
+ "grad_norm": 0.26617813477312463,
+ "learning_rate": 1.9716276685639422e-07,
+ "loss": 0.0903,
+ "step": 2146
+ },
+ {
+ "epoch": 16.142857142857142,
+ "grad_norm": 0.26764368562319285,
+ "learning_rate": 1.9642248029060383e-07,
+ "loss": 0.0982,
+ "step": 2147
+ },
+ {
+ "epoch": 16.150375939849624,
+ "grad_norm": 0.27504727712721666,
+ "learning_rate": 1.956834347292352e-07,
+ "loss": 0.0958,
+ "step": 2148
+ },
+ {
+ "epoch": 16.157894736842106,
+ "grad_norm": 0.27959913537193604,
+ "learning_rate": 1.949456313136335e-07,
+ "loss": 0.0854,
+ "step": 2149
+ },
+ {
+ "epoch": 16.165413533834588,
+ "grad_norm": 0.3343243534220108,
+ "learning_rate": 1.9420907118322427e-07,
+ "loss": 0.0927,
+ "step": 2150
+ },
+ {
+ "epoch": 16.172932330827066,
+ "grad_norm": 0.2958803849531254,
+ "learning_rate": 1.9347375547551436e-07,
+ "loss": 0.0923,
+ "step": 2151
+ },
+ {
+ "epoch": 16.18045112781955,
+ "grad_norm": 0.31269113313522157,
+ "learning_rate": 1.9273968532608753e-07,
+ "loss": 0.0848,
+ "step": 2152
+ },
+ {
+ "epoch": 16.18796992481203,
+ "grad_norm": 0.26999253583964516,
+ "learning_rate": 1.9200686186860492e-07,
+ "loss": 0.0947,
+ "step": 2153
+ },
+ {
+ "epoch": 16.195488721804512,
+ "grad_norm": 0.29287018842845886,
+ "learning_rate": 1.9127528623480172e-07,
+ "loss": 0.0782,
+ "step": 2154
+ },
+ {
+ "epoch": 16.203007518796994,
+ "grad_norm": 0.27549656794218136,
+ "learning_rate": 1.9054495955448656e-07,
+ "loss": 0.0899,
+ "step": 2155
+ },
+ {
+ "epoch": 16.210526315789473,
+ "grad_norm": 0.2668778980048415,
+ "learning_rate": 1.898158829555385e-07,
+ "loss": 0.0997,
+ "step": 2156
+ },
+ {
+ "epoch": 16.218045112781954,
+ "grad_norm": 0.2731237310729084,
+ "learning_rate": 1.890880575639072e-07,
+ "loss": 0.087,
+ "step": 2157
+ },
+ {
+ "epoch": 16.225563909774436,
+ "grad_norm": 0.3552847214449844,
+ "learning_rate": 1.8836148450360866e-07,
+ "loss": 0.0946,
+ "step": 2158
+ },
+ {
+ "epoch": 16.23308270676692,
+ "grad_norm": 0.3134629837545447,
+ "learning_rate": 1.8763616489672608e-07,
+ "loss": 0.0855,
+ "step": 2159
+ },
+ {
+ "epoch": 16.2406015037594,
+ "grad_norm": 0.2672605106075276,
+ "learning_rate": 1.8691209986340595e-07,
+ "loss": 0.0915,
+ "step": 2160
+ },
+ {
+ "epoch": 16.24812030075188,
+ "grad_norm": 0.26661314710948897,
+ "learning_rate": 1.861892905218575e-07,
+ "loss": 0.0853,
+ "step": 2161
+ },
+ {
+ "epoch": 16.25563909774436,
+ "grad_norm": 0.27677105895391807,
+ "learning_rate": 1.8546773798835148e-07,
+ "loss": 0.0796,
+ "step": 2162
+ },
+ {
+ "epoch": 16.263157894736842,
+ "grad_norm": 0.26945978115405717,
+ "learning_rate": 1.8474744337721638e-07,
+ "loss": 0.0886,
+ "step": 2163
+ },
+ {
+ "epoch": 16.270676691729324,
+ "grad_norm": 0.3163707180778906,
+ "learning_rate": 1.8402840780083927e-07,
+ "loss": 0.0911,
+ "step": 2164
+ },
+ {
+ "epoch": 16.278195488721803,
+ "grad_norm": 0.3236123240317135,
+ "learning_rate": 1.833106323696617e-07,
+ "loss": 0.0945,
+ "step": 2165
+ },
+ {
+ "epoch": 16.285714285714285,
+ "grad_norm": 0.27049926188190276,
+ "learning_rate": 1.825941181921805e-07,
+ "loss": 0.0876,
+ "step": 2166
+ },
+ {
+ "epoch": 16.293233082706767,
+ "grad_norm": 0.27586866725043957,
+ "learning_rate": 1.8187886637494297e-07,
+ "loss": 0.0888,
+ "step": 2167
+ },
+ {
+ "epoch": 16.30075187969925,
+ "grad_norm": 0.2932072848459732,
+ "learning_rate": 1.8116487802254865e-07,
+ "loss": 0.0828,
+ "step": 2168
+ },
+ {
+ "epoch": 16.30827067669173,
+ "grad_norm": 0.278646672946318,
+ "learning_rate": 1.8045215423764426e-07,
+ "loss": 0.096,
+ "step": 2169
+ },
+ {
+ "epoch": 16.31578947368421,
+ "grad_norm": 0.28385623337302546,
+ "learning_rate": 1.7974069612092478e-07,
+ "loss": 0.0934,
+ "step": 2170
+ },
+ {
+ "epoch": 16.32330827067669,
+ "grad_norm": 0.2729073542767318,
+ "learning_rate": 1.790305047711298e-07,
+ "loss": 0.0889,
+ "step": 2171
+ },
+ {
+ "epoch": 16.330827067669173,
+ "grad_norm": 0.32653933817127334,
+ "learning_rate": 1.7832158128504328e-07,
+ "loss": 0.0946,
+ "step": 2172
+ },
+ {
+ "epoch": 16.338345864661655,
+ "grad_norm": 0.2833905461964408,
+ "learning_rate": 1.776139267574901e-07,
+ "loss": 0.0935,
+ "step": 2173
+ },
+ {
+ "epoch": 16.345864661654137,
+ "grad_norm": 0.2868364543647997,
+ "learning_rate": 1.7690754228133688e-07,
+ "loss": 0.1021,
+ "step": 2174
+ },
+ {
+ "epoch": 16.353383458646615,
+ "grad_norm": 0.26823223887533365,
+ "learning_rate": 1.7620242894748716e-07,
+ "loss": 0.0857,
+ "step": 2175
+ },
+ {
+ "epoch": 16.360902255639097,
+ "grad_norm": 0.29609754185728926,
+ "learning_rate": 1.7549858784488314e-07,
+ "loss": 0.0943,
+ "step": 2176
+ },
+ {
+ "epoch": 16.36842105263158,
+ "grad_norm": 0.30126818109644443,
+ "learning_rate": 1.7479602006050054e-07,
+ "loss": 0.0908,
+ "step": 2177
+ },
+ {
+ "epoch": 16.37593984962406,
+ "grad_norm": 0.32545928933322094,
+ "learning_rate": 1.740947266793501e-07,
+ "loss": 0.0878,
+ "step": 2178
+ },
+ {
+ "epoch": 16.383458646616543,
+ "grad_norm": 0.2997130928980806,
+ "learning_rate": 1.7339470878447337e-07,
+ "loss": 0.0935,
+ "step": 2179
+ },
+ {
+ "epoch": 16.39097744360902,
+ "grad_norm": 0.26642121752408193,
+ "learning_rate": 1.7269596745694292e-07,
+ "loss": 0.0839,
+ "step": 2180
+ },
+ {
+ "epoch": 16.398496240601503,
+ "grad_norm": 0.2925422236008237,
+ "learning_rate": 1.71998503775859e-07,
+ "loss": 0.0911,
+ "step": 2181
+ },
+ {
+ "epoch": 16.406015037593985,
+ "grad_norm": 0.28499239841066776,
+ "learning_rate": 1.713023188183498e-07,
+ "loss": 0.1032,
+ "step": 2182
+ },
+ {
+ "epoch": 16.413533834586467,
+ "grad_norm": 0.30758421241375694,
+ "learning_rate": 1.7060741365956743e-07,
+ "loss": 0.0913,
+ "step": 2183
+ },
+ {
+ "epoch": 16.42105263157895,
+ "grad_norm": 0.2743050031835749,
+ "learning_rate": 1.6991378937268886e-07,
+ "loss": 0.094,
+ "step": 2184
+ },
+ {
+ "epoch": 16.428571428571427,
+ "grad_norm": 0.29585826728082537,
+ "learning_rate": 1.6922144702891173e-07,
+ "loss": 0.092,
+ "step": 2185
+ },
+ {
+ "epoch": 16.43609022556391,
+ "grad_norm": 0.2914312490766461,
+ "learning_rate": 1.6853038769745465e-07,
+ "loss": 0.0882,
+ "step": 2186
+ },
+ {
+ "epoch": 16.44360902255639,
+ "grad_norm": 0.2736752376161258,
+ "learning_rate": 1.6784061244555513e-07,
+ "loss": 0.0939,
+ "step": 2187
+ },
+ {
+ "epoch": 16.451127819548873,
+ "grad_norm": 0.4103456135359743,
+ "learning_rate": 1.6715212233846654e-07,
+ "loss": 0.0867,
+ "step": 2188
+ },
+ {
+ "epoch": 16.458646616541355,
+ "grad_norm": 0.28235115429089735,
+ "learning_rate": 1.6646491843945853e-07,
+ "loss": 0.0883,
+ "step": 2189
+ },
+ {
+ "epoch": 16.466165413533833,
+ "grad_norm": 0.26210301959316906,
+ "learning_rate": 1.6577900180981363e-07,
+ "loss": 0.0948,
+ "step": 2190
+ },
+ {
+ "epoch": 16.473684210526315,
+ "grad_norm": 0.2992929443292218,
+ "learning_rate": 1.6509437350882716e-07,
+ "loss": 0.0874,
+ "step": 2191
+ },
+ {
+ "epoch": 16.481203007518797,
+ "grad_norm": 0.27378310548718426,
+ "learning_rate": 1.644110345938039e-07,
+ "loss": 0.093,
+ "step": 2192
+ },
+ {
+ "epoch": 16.48872180451128,
+ "grad_norm": 0.30683737906353187,
+ "learning_rate": 1.6372898612005837e-07,
+ "loss": 0.0985,
+ "step": 2193
+ },
+ {
+ "epoch": 16.49624060150376,
+ "grad_norm": 0.3125399477848913,
+ "learning_rate": 1.6304822914091132e-07,
+ "loss": 0.0901,
+ "step": 2194
+ },
+ {
+ "epoch": 16.50375939849624,
+ "grad_norm": 0.2817719609802915,
+ "learning_rate": 1.6236876470768958e-07,
+ "loss": 0.0897,
+ "step": 2195
+ },
+ {
+ "epoch": 16.51127819548872,
+ "grad_norm": 0.2835372649553096,
+ "learning_rate": 1.616905938697234e-07,
+ "loss": 0.0878,
+ "step": 2196
+ },
+ {
+ "epoch": 16.518796992481203,
+ "grad_norm": 0.2795826416518004,
+ "learning_rate": 1.610137176743457e-07,
+ "loss": 0.0891,
+ "step": 2197
+ },
+ {
+ "epoch": 16.526315789473685,
+ "grad_norm": 0.27704428373045886,
+ "learning_rate": 1.6033813716688948e-07,
+ "loss": 0.0888,
+ "step": 2198
+ },
+ {
+ "epoch": 16.533834586466167,
+ "grad_norm": 0.3838643261629065,
+ "learning_rate": 1.5966385339068756e-07,
+ "loss": 0.0923,
+ "step": 2199
+ },
+ {
+ "epoch": 16.541353383458645,
+ "grad_norm": 0.3327798602308651,
+ "learning_rate": 1.58990867387069e-07,
+ "loss": 0.0957,
+ "step": 2200
+ },
+ {
+ "epoch": 16.548872180451127,
+ "grad_norm": 0.2860844533103062,
+ "learning_rate": 1.5831918019535994e-07,
+ "loss": 0.0886,
+ "step": 2201
+ },
+ {
+ "epoch": 16.55639097744361,
+ "grad_norm": 0.26577171002874705,
+ "learning_rate": 1.5764879285287946e-07,
+ "loss": 0.0872,
+ "step": 2202
+ },
+ {
+ "epoch": 16.56390977443609,
+ "grad_norm": 0.32234525918355755,
+ "learning_rate": 1.569797063949404e-07,
+ "loss": 0.0923,
+ "step": 2203
+ },
+ {
+ "epoch": 16.571428571428573,
+ "grad_norm": 0.29364294954774156,
+ "learning_rate": 1.5631192185484554e-07,
+ "loss": 0.0971,
+ "step": 2204
+ },
+ {
+ "epoch": 16.57894736842105,
+ "grad_norm": 0.2782626594884415,
+ "learning_rate": 1.5564544026388792e-07,
+ "loss": 0.0933,
+ "step": 2205
+ },
+ {
+ "epoch": 16.586466165413533,
+ "grad_norm": 0.27071615971135415,
+ "learning_rate": 1.5498026265134745e-07,
+ "loss": 0.0868,
+ "step": 2206
+ },
+ {
+ "epoch": 16.593984962406015,
+ "grad_norm": 0.28501486193928705,
+ "learning_rate": 1.5431639004449125e-07,
+ "loss": 0.0839,
+ "step": 2207
+ },
+ {
+ "epoch": 16.601503759398497,
+ "grad_norm": 0.28980736273738406,
+ "learning_rate": 1.5365382346857002e-07,
+ "loss": 0.0908,
+ "step": 2208
+ },
+ {
+ "epoch": 16.60902255639098,
+ "grad_norm": 1.3595142719023594,
+ "learning_rate": 1.529925639468186e-07,
+ "loss": 0.0928,
+ "step": 2209
+ },
+ {
+ "epoch": 16.616541353383457,
+ "grad_norm": 0.2783982157272442,
+ "learning_rate": 1.5233261250045215e-07,
+ "loss": 0.0959,
+ "step": 2210
+ },
+ {
+ "epoch": 16.62406015037594,
+ "grad_norm": 0.2723725197699977,
+ "learning_rate": 1.5167397014866679e-07,
+ "loss": 0.0976,
+ "step": 2211
+ },
+ {
+ "epoch": 16.63157894736842,
+ "grad_norm": 0.2712280904406054,
+ "learning_rate": 1.5101663790863595e-07,
+ "loss": 0.092,
+ "step": 2212
+ },
+ {
+ "epoch": 16.639097744360903,
+ "grad_norm": 0.2767561006132685,
+ "learning_rate": 1.503606167955107e-07,
+ "loss": 0.0955,
+ "step": 2213
+ },
+ {
+ "epoch": 16.646616541353385,
+ "grad_norm": 0.25664880340524465,
+ "learning_rate": 1.4970590782241643e-07,
+ "loss": 0.0846,
+ "step": 2214
+ },
+ {
+ "epoch": 16.654135338345863,
+ "grad_norm": 0.2801499766841642,
+ "learning_rate": 1.4905251200045254e-07,
+ "loss": 0.0839,
+ "step": 2215
+ },
+ {
+ "epoch": 16.661654135338345,
+ "grad_norm": 0.28644606564108066,
+ "learning_rate": 1.4840043033869076e-07,
+ "loss": 0.1012,
+ "step": 2216
+ },
+ {
+ "epoch": 16.669172932330827,
+ "grad_norm": 0.275457467534494,
+ "learning_rate": 1.4774966384417252e-07,
+ "loss": 0.088,
+ "step": 2217
+ },
+ {
+ "epoch": 16.67669172932331,
+ "grad_norm": 0.2976322759155441,
+ "learning_rate": 1.4710021352190916e-07,
+ "loss": 0.0961,
+ "step": 2218
+ },
+ {
+ "epoch": 16.68421052631579,
+ "grad_norm": 0.2776350271826587,
+ "learning_rate": 1.4645208037487843e-07,
+ "loss": 0.0981,
+ "step": 2219
+ },
+ {
+ "epoch": 16.69172932330827,
+ "grad_norm": 0.29412261484802504,
+ "learning_rate": 1.4580526540402461e-07,
+ "loss": 0.0834,
+ "step": 2220
+ },
+ {
+ "epoch": 16.69924812030075,
+ "grad_norm": 0.2719935989445136,
+ "learning_rate": 1.451597696082557e-07,
+ "loss": 0.0978,
+ "step": 2221
+ },
+ {
+ "epoch": 16.706766917293233,
+ "grad_norm": 0.273462927092327,
+ "learning_rate": 1.4451559398444313e-07,
+ "loss": 0.0977,
+ "step": 2222
+ },
+ {
+ "epoch": 16.714285714285715,
+ "grad_norm": 0.2911077964955262,
+ "learning_rate": 1.4387273952741863e-07,
+ "loss": 0.095,
+ "step": 2223
+ },
+ {
+ "epoch": 16.721804511278194,
+ "grad_norm": 0.27352505309408903,
+ "learning_rate": 1.432312072299746e-07,
+ "loss": 0.0957,
+ "step": 2224
+ },
+ {
+ "epoch": 16.729323308270676,
+ "grad_norm": 0.3438966020266421,
+ "learning_rate": 1.4259099808286047e-07,
+ "loss": 0.0973,
+ "step": 2225
+ },
+ {
+ "epoch": 16.736842105263158,
+ "grad_norm": 0.27965019727250856,
+ "learning_rate": 1.4195211307478328e-07,
+ "loss": 0.0871,
+ "step": 2226
+ },
+ {
+ "epoch": 16.74436090225564,
+ "grad_norm": 0.281605332001671,
+ "learning_rate": 1.4131455319240426e-07,
+ "loss": 0.0892,
+ "step": 2227
+ },
+ {
+ "epoch": 16.75187969924812,
+ "grad_norm": 0.2832762096006877,
+ "learning_rate": 1.4067831942033902e-07,
+ "loss": 0.0919,
+ "step": 2228
+ },
+ {
+ "epoch": 16.7593984962406,
+ "grad_norm": 0.30505713708414706,
+ "learning_rate": 1.4004341274115438e-07,
+ "loss": 0.0924,
+ "step": 2229
+ },
+ {
+ "epoch": 16.76691729323308,
+ "grad_norm": 0.27137585834730515,
+ "learning_rate": 1.3940983413536845e-07,
+ "loss": 0.0907,
+ "step": 2230
+ },
+ {
+ "epoch": 16.774436090225564,
+ "grad_norm": 0.3055203452233181,
+ "learning_rate": 1.3877758458144762e-07,
+ "loss": 0.1047,
+ "step": 2231
+ },
+ {
+ "epoch": 16.781954887218046,
+ "grad_norm": 0.2605154929062625,
+ "learning_rate": 1.381466650558063e-07,
+ "loss": 0.0789,
+ "step": 2232
+ },
+ {
+ "epoch": 16.789473684210527,
+ "grad_norm": 0.29625327974695254,
+ "learning_rate": 1.3751707653280443e-07,
+ "loss": 0.0952,
+ "step": 2233
+ },
+ {
+ "epoch": 16.796992481203006,
+ "grad_norm": 0.28622266011739306,
+ "learning_rate": 1.3688881998474699e-07,
+ "loss": 0.1045,
+ "step": 2234
+ },
+ {
+ "epoch": 16.804511278195488,
+ "grad_norm": 0.265573127015782,
+ "learning_rate": 1.3626189638188102e-07,
+ "loss": 0.0847,
+ "step": 2235
+ },
+ {
+ "epoch": 16.81203007518797,
+ "grad_norm": 0.31588465050113074,
+ "learning_rate": 1.3563630669239624e-07,
+ "loss": 0.0994,
+ "step": 2236
+ },
+ {
+ "epoch": 16.81954887218045,
+ "grad_norm": 0.7288540345964466,
+ "learning_rate": 1.3501205188242105e-07,
+ "loss": 0.0855,
+ "step": 2237
+ },
+ {
+ "epoch": 16.827067669172934,
+ "grad_norm": 0.29587141394439515,
+ "learning_rate": 1.343891329160235e-07,
+ "loss": 0.0895,
+ "step": 2238
+ },
+ {
+ "epoch": 16.834586466165412,
+ "grad_norm": 0.2786001538461217,
+ "learning_rate": 1.3376755075520785e-07,
+ "loss": 0.1018,
+ "step": 2239
+ },
+ {
+ "epoch": 16.842105263157894,
+ "grad_norm": 0.28494585411545353,
+ "learning_rate": 1.331473063599139e-07,
+ "loss": 0.0913,
+ "step": 2240
+ },
+ {
+ "epoch": 16.849624060150376,
+ "grad_norm": 0.30763072812527004,
+ "learning_rate": 1.3252840068801607e-07,
+ "loss": 0.0928,
+ "step": 2241
+ },
+ {
+ "epoch": 16.857142857142858,
+ "grad_norm": 0.3177592243923783,
+ "learning_rate": 1.3191083469532061e-07,
+ "loss": 0.0959,
+ "step": 2242
+ },
+ {
+ "epoch": 16.86466165413534,
+ "grad_norm": 0.28003516465359213,
+ "learning_rate": 1.3129460933556547e-07,
+ "loss": 0.0917,
+ "step": 2243
+ },
+ {
+ "epoch": 16.872180451127818,
+ "grad_norm": 0.257866574968607,
+ "learning_rate": 1.306797255604175e-07,
+ "loss": 0.0779,
+ "step": 2244
+ },
+ {
+ "epoch": 16.8796992481203,
+ "grad_norm": 0.3045339281723961,
+ "learning_rate": 1.3006618431947248e-07,
+ "loss": 0.0918,
+ "step": 2245
+ },
+ {
+ "epoch": 16.887218045112782,
+ "grad_norm": 0.278630044046379,
+ "learning_rate": 1.294539865602521e-07,
+ "loss": 0.087,
+ "step": 2246
+ },
+ {
+ "epoch": 16.894736842105264,
+ "grad_norm": 0.2792936704362007,
+ "learning_rate": 1.2884313322820385e-07,
+ "loss": 0.0817,
+ "step": 2247
+ },
+ {
+ "epoch": 16.902255639097746,
+ "grad_norm": 0.34281807914429935,
+ "learning_rate": 1.2823362526669822e-07,
+ "loss": 0.0888,
+ "step": 2248
+ },
+ {
+ "epoch": 16.909774436090224,
+ "grad_norm": 0.29487571890774555,
+ "learning_rate": 1.2762546361702908e-07,
+ "loss": 0.0889,
+ "step": 2249
+ },
+ {
+ "epoch": 16.917293233082706,
+ "grad_norm": 0.30652115177613537,
+ "learning_rate": 1.2701864921840989e-07,
+ "loss": 0.0964,
+ "step": 2250
+ },
+ {
+ "epoch": 16.924812030075188,
+ "grad_norm": 0.2675895722094987,
+ "learning_rate": 1.2641318300797453e-07,
+ "loss": 0.0893,
+ "step": 2251
+ },
+ {
+ "epoch": 16.93233082706767,
+ "grad_norm": 0.2735089005849926,
+ "learning_rate": 1.25809065920774e-07,
+ "loss": 0.0954,
+ "step": 2252
+ },
+ {
+ "epoch": 16.93984962406015,
+ "grad_norm": 0.2905843172376852,
+ "learning_rate": 1.252062988897764e-07,
+ "loss": 0.0963,
+ "step": 2253
+ },
+ {
+ "epoch": 16.94736842105263,
+ "grad_norm": 0.3419049803479409,
+ "learning_rate": 1.2460488284586435e-07,
+ "loss": 0.0897,
+ "step": 2254
+ },
+ {
+ "epoch": 16.954887218045112,
+ "grad_norm": 0.3145406733824775,
+ "learning_rate": 1.2400481871783465e-07,
+ "loss": 0.0899,
+ "step": 2255
+ },
+ {
+ "epoch": 16.962406015037594,
+ "grad_norm": 0.2685404367663736,
+ "learning_rate": 1.2340610743239542e-07,
+ "loss": 0.0906,
+ "step": 2256
+ },
+ {
+ "epoch": 16.969924812030076,
+ "grad_norm": 0.3063957828803379,
+ "learning_rate": 1.2280874991416668e-07,
+ "loss": 0.0925,
+ "step": 2257
+ },
+ {
+ "epoch": 16.977443609022558,
+ "grad_norm": 0.2980333097309084,
+ "learning_rate": 1.2221274708567663e-07,
+ "loss": 0.0969,
+ "step": 2258
+ },
+ {
+ "epoch": 16.984962406015036,
+ "grad_norm": 0.2761287116926986,
+ "learning_rate": 1.2161809986736228e-07,
+ "loss": 0.0878,
+ "step": 2259
+ },
+ {
+ "epoch": 16.992481203007518,
+ "grad_norm": 0.27636840085451553,
+ "learning_rate": 1.210248091775663e-07,
+ "loss": 0.0914,
+ "step": 2260
+ },
+ {
+ "epoch": 17.0,
+ "grad_norm": 0.26897901819566544,
+ "learning_rate": 1.2043287593253703e-07,
+ "loss": 0.0824,
+ "step": 2261
+ },
+ {
+ "epoch": 17.0,
+ "eval_loss": 0.35341760516166687,
+ "eval_runtime": 36.1769,
+ "eval_samples_per_second": 12.356,
+ "eval_steps_per_second": 0.193,
+ "step": 2261
+ },
+ {
+ "epoch": 17.007518796992482,
+ "grad_norm": 0.2746202594529986,
+ "learning_rate": 1.198423010464259e-07,
+ "loss": 0.0696,
+ "step": 2262
+ },
+ {
+ "epoch": 17.015037593984964,
+ "grad_norm": 0.2809622507239482,
+ "learning_rate": 1.1925308543128732e-07,
+ "loss": 0.0862,
+ "step": 2263
+ },
+ {
+ "epoch": 17.022556390977442,
+ "grad_norm": 0.28140207365078884,
+ "learning_rate": 1.1866522999707551e-07,
+ "loss": 0.0886,
+ "step": 2264
+ },
+ {
+ "epoch": 17.030075187969924,
+ "grad_norm": 0.29553302149056837,
+ "learning_rate": 1.1807873565164505e-07,
+ "loss": 0.0909,
+ "step": 2265
+ },
+ {
+ "epoch": 17.037593984962406,
+ "grad_norm": 0.26713933102240084,
+ "learning_rate": 1.1749360330074798e-07,
+ "loss": 0.0806,
+ "step": 2266
+ },
+ {
+ "epoch": 17.045112781954888,
+ "grad_norm": 0.25760787397691953,
+ "learning_rate": 1.1690983384803288e-07,
+ "loss": 0.0809,
+ "step": 2267
+ },
+ {
+ "epoch": 17.05263157894737,
+ "grad_norm": 0.26874720474790886,
+ "learning_rate": 1.1632742819504404e-07,
+ "loss": 0.0911,
+ "step": 2268
+ },
+ {
+ "epoch": 17.06015037593985,
+ "grad_norm": 0.2586903307510066,
+ "learning_rate": 1.1574638724121887e-07,
+ "loss": 0.0958,
+ "step": 2269
+ },
+ {
+ "epoch": 17.06766917293233,
+ "grad_norm": 0.26206738256002476,
+ "learning_rate": 1.1516671188388805e-07,
+ "loss": 0.0941,
+ "step": 2270
+ },
+ {
+ "epoch": 17.075187969924812,
+ "grad_norm": 0.2717552769893455,
+ "learning_rate": 1.1458840301827233e-07,
+ "loss": 0.0947,
+ "step": 2271
+ },
+ {
+ "epoch": 17.082706766917294,
+ "grad_norm": 0.2771427016120492,
+ "learning_rate": 1.140114615374831e-07,
+ "loss": 0.0909,
+ "step": 2272
+ },
+ {
+ "epoch": 17.090225563909776,
+ "grad_norm": 0.27346121235464294,
+ "learning_rate": 1.1343588833251928e-07,
+ "loss": 0.0915,
+ "step": 2273
+ },
+ {
+ "epoch": 17.097744360902254,
+ "grad_norm": 0.26410033688067636,
+ "learning_rate": 1.1286168429226717e-07,
+ "loss": 0.0941,
+ "step": 2274
+ },
+ {
+ "epoch": 17.105263157894736,
+ "grad_norm": 0.2962855757610137,
+ "learning_rate": 1.122888503034981e-07,
+ "loss": 0.0974,
+ "step": 2275
+ },
+ {
+ "epoch": 17.112781954887218,
+ "grad_norm": 0.3199449484964692,
+ "learning_rate": 1.1171738725086832e-07,
+ "loss": 0.0864,
+ "step": 2276
+ },
+ {
+ "epoch": 17.1203007518797,
+ "grad_norm": 0.32638143720776636,
+ "learning_rate": 1.1114729601691585e-07,
+ "loss": 0.0878,
+ "step": 2277
+ },
+ {
+ "epoch": 17.127819548872182,
+ "grad_norm": 0.2786149154022678,
+ "learning_rate": 1.1057857748206145e-07,
+ "loss": 0.0836,
+ "step": 2278
+ },
+ {
+ "epoch": 17.13533834586466,
+ "grad_norm": 0.2644364430783197,
+ "learning_rate": 1.1001123252460443e-07,
+ "loss": 0.0882,
+ "step": 2279
+ },
+ {
+ "epoch": 17.142857142857142,
+ "grad_norm": 0.2619785374381697,
+ "learning_rate": 1.0944526202072423e-07,
+ "loss": 0.0873,
+ "step": 2280
+ },
+ {
+ "epoch": 17.150375939849624,
+ "grad_norm": 0.2711281630566933,
+ "learning_rate": 1.0888066684447662e-07,
+ "loss": 0.0839,
+ "step": 2281
+ },
+ {
+ "epoch": 17.157894736842106,
+ "grad_norm": 0.26537886485299816,
+ "learning_rate": 1.0831744786779417e-07,
+ "loss": 0.0916,
+ "step": 2282
+ },
+ {
+ "epoch": 17.165413533834588,
+ "grad_norm": 0.2644289999532647,
+ "learning_rate": 1.0775560596048339e-07,
+ "loss": 0.0827,
+ "step": 2283
+ },
+ {
+ "epoch": 17.172932330827066,
+ "grad_norm": 0.2649538596185435,
+ "learning_rate": 1.0719514199022472e-07,
+ "loss": 0.089,
+ "step": 2284
+ },
+ {
+ "epoch": 17.18045112781955,
+ "grad_norm": 0.2863427316268458,
+ "learning_rate": 1.0663605682257005e-07,
+ "loss": 0.0806,
+ "step": 2285
+ },
+ {
+ "epoch": 17.18796992481203,
+ "grad_norm": 0.2769730559584381,
+ "learning_rate": 1.0607835132094257e-07,
+ "loss": 0.0923,
+ "step": 2286
+ },
+ {
+ "epoch": 17.195488721804512,
+ "grad_norm": 0.7312997330998975,
+ "learning_rate": 1.055220263466341e-07,
+ "loss": 0.0911,
+ "step": 2287
+ },
+ {
+ "epoch": 17.203007518796994,
+ "grad_norm": 0.2643141334456369,
+ "learning_rate": 1.0496708275880495e-07,
+ "loss": 0.0944,
+ "step": 2288
+ },
+ {
+ "epoch": 17.210526315789473,
+ "grad_norm": 0.30730161988699095,
+ "learning_rate": 1.0441352141448156e-07,
+ "loss": 0.0886,
+ "step": 2289
+ },
+ {
+ "epoch": 17.218045112781954,
+ "grad_norm": 0.2798575547670645,
+ "learning_rate": 1.0386134316855666e-07,
+ "loss": 0.0935,
+ "step": 2290
+ },
+ {
+ "epoch": 17.225563909774436,
+ "grad_norm": 0.2748482264239042,
+ "learning_rate": 1.0331054887378566e-07,
+ "loss": 0.0819,
+ "step": 2291
+ },
+ {
+ "epoch": 17.23308270676692,
+ "grad_norm": 0.2597862857966871,
+ "learning_rate": 1.0276113938078768e-07,
+ "loss": 0.0823,
+ "step": 2292
+ },
+ {
+ "epoch": 17.2406015037594,
+ "grad_norm": 0.4907957736440581,
+ "learning_rate": 1.0221311553804312e-07,
+ "loss": 0.0834,
+ "step": 2293
+ },
+ {
+ "epoch": 17.24812030075188,
+ "grad_norm": 0.34895058881424945,
+ "learning_rate": 1.01666478191892e-07,
+ "loss": 0.0889,
+ "step": 2294
+ },
+ {
+ "epoch": 17.25563909774436,
+ "grad_norm": 0.2807651085250727,
+ "learning_rate": 1.0112122818653345e-07,
+ "loss": 0.083,
+ "step": 2295
+ },
+ {
+ "epoch": 17.263157894736842,
+ "grad_norm": 0.33033565156179723,
+ "learning_rate": 1.0057736636402381e-07,
+ "loss": 0.0989,
+ "step": 2296
+ },
+ {
+ "epoch": 17.270676691729324,
+ "grad_norm": 0.26307392502344096,
+ "learning_rate": 1.0003489356427596e-07,
+ "loss": 0.0901,
+ "step": 2297
+ },
+ {
+ "epoch": 17.278195488721803,
+ "grad_norm": 0.28159972014133106,
+ "learning_rate": 9.949381062505723e-08,
+ "loss": 0.092,
+ "step": 2298
+ },
+ {
+ "epoch": 17.285714285714285,
+ "grad_norm": 0.2690700170455822,
+ "learning_rate": 9.895411838198886e-08,
+ "loss": 0.0859,
+ "step": 2299
+ },
+ {
+ "epoch": 17.293233082706767,
+ "grad_norm": 0.5561694380049415,
+ "learning_rate": 9.8415817668544e-08,
+ "loss": 0.0914,
+ "step": 2300
+ },
+ {
+ "epoch": 17.30075187969925,
+ "grad_norm": 0.27583509926646604,
+ "learning_rate": 9.787890931604737e-08,
+ "loss": 0.088,
+ "step": 2301
+ },
+ {
+ "epoch": 17.30827067669173,
+ "grad_norm": 0.2756648613236237,
+ "learning_rate": 9.734339415367254e-08,
+ "loss": 0.0891,
+ "step": 2302
+ },
+ {
+ "epoch": 17.31578947368421,
+ "grad_norm": 0.26594858665340093,
+ "learning_rate": 9.680927300844243e-08,
+ "loss": 0.0771,
+ "step": 2303
+ },
+ {
+ "epoch": 17.32330827067669,
+ "grad_norm": 0.3124219125005735,
+ "learning_rate": 9.627654670522645e-08,
+ "loss": 0.0913,
+ "step": 2304
+ },
+ {
+ "epoch": 17.330827067669173,
+ "grad_norm": 0.3095689703603318,
+ "learning_rate": 9.574521606674035e-08,
+ "loss": 0.0914,
+ "step": 2305
+ },
+ {
+ "epoch": 17.338345864661655,
+ "grad_norm": 0.2870268449327643,
+ "learning_rate": 9.521528191354389e-08,
+ "loss": 0.0885,
+ "step": 2306
+ },
+ {
+ "epoch": 17.345864661654137,
+ "grad_norm": 0.3155172473644976,
+ "learning_rate": 9.468674506404095e-08,
+ "loss": 0.093,
+ "step": 2307
+ },
+ {
+ "epoch": 17.353383458646615,
+ "grad_norm": 0.273997869075056,
+ "learning_rate": 9.415960633447673e-08,
+ "loss": 0.0916,
+ "step": 2308
+ },
+ {
+ "epoch": 17.360902255639097,
+ "grad_norm": 0.2712279089731928,
+ "learning_rate": 9.36338665389379e-08,
+ "loss": 0.0965,
+ "step": 2309
+ },
+ {
+ "epoch": 17.36842105263158,
+ "grad_norm": 0.28207901520071643,
+ "learning_rate": 9.310952648935e-08,
+ "loss": 0.0913,
+ "step": 2310
+ },
+ {
+ "epoch": 17.37593984962406,
+ "grad_norm": 0.30014752447683374,
+ "learning_rate": 9.258658699547762e-08,
+ "loss": 0.0929,
+ "step": 2311
+ },
+ {
+ "epoch": 17.383458646616543,
+ "grad_norm": 0.2591969576125931,
+ "learning_rate": 9.206504886492161e-08,
+ "loss": 0.0949,
+ "step": 2312
+ },
+ {
+ "epoch": 17.39097744360902,
+ "grad_norm": 0.2738772478764185,
+ "learning_rate": 9.15449129031196e-08,
+ "loss": 0.0854,
+ "step": 2313
+ },
+ {
+ "epoch": 17.398496240601503,
+ "grad_norm": 0.26749701365872813,
+ "learning_rate": 9.102617991334272e-08,
+ "loss": 0.0886,
+ "step": 2314
+ },
+ {
+ "epoch": 17.406015037593985,
+ "grad_norm": 0.27521713876992066,
+ "learning_rate": 9.050885069669622e-08,
+ "loss": 0.0902,
+ "step": 2315
+ },
+ {
+ "epoch": 17.413533834586467,
+ "grad_norm": 0.27987974683404776,
+ "learning_rate": 8.999292605211694e-08,
+ "loss": 0.0931,
+ "step": 2316
+ },
+ {
+ "epoch": 17.42105263157895,
+ "grad_norm": 0.30114951203977086,
+ "learning_rate": 8.947840677637298e-08,
+ "loss": 0.0961,
+ "step": 2317
+ },
+ {
+ "epoch": 17.428571428571427,
+ "grad_norm": 0.2635099379208972,
+ "learning_rate": 8.896529366406181e-08,
+ "loss": 0.0917,
+ "step": 2318
+ },
+ {
+ "epoch": 17.43609022556391,
+ "grad_norm": 0.27624585060707374,
+ "learning_rate": 8.845358750760901e-08,
+ "loss": 0.0846,
+ "step": 2319
+ },
+ {
+ "epoch": 17.44360902255639,
+ "grad_norm": 0.25356090162866607,
+ "learning_rate": 8.794328909726822e-08,
+ "loss": 0.0921,
+ "step": 2320
+ },
+ {
+ "epoch": 17.451127819548873,
+ "grad_norm": 0.2631740247312996,
+ "learning_rate": 8.743439922111784e-08,
+ "loss": 0.0861,
+ "step": 2321
+ },
+ {
+ "epoch": 17.458646616541355,
+ "grad_norm": 0.2672255926754012,
+ "learning_rate": 8.692691866506219e-08,
+ "loss": 0.0907,
+ "step": 2322
+ },
+ {
+ "epoch": 17.466165413533833,
+ "grad_norm": 0.2740214926329061,
+ "learning_rate": 8.642084821282802e-08,
+ "loss": 0.0812,
+ "step": 2323
+ },
+ {
+ "epoch": 17.473684210526315,
+ "grad_norm": 0.28396503401311035,
+ "learning_rate": 8.59161886459654e-08,
+ "loss": 0.0964,
+ "step": 2324
+ },
+ {
+ "epoch": 17.481203007518797,
+ "grad_norm": 0.2577684020073858,
+ "learning_rate": 8.541294074384465e-08,
+ "loss": 0.0874,
+ "step": 2325
+ },
+ {
+ "epoch": 17.48872180451128,
+ "grad_norm": 0.3089645523755116,
+ "learning_rate": 8.491110528365652e-08,
+ "loss": 0.0925,
+ "step": 2326
+ },
+ {
+ "epoch": 17.49624060150376,
+ "grad_norm": 0.28006859663879946,
+ "learning_rate": 8.44106830404101e-08,
+ "loss": 0.0874,
+ "step": 2327
+ },
+ {
+ "epoch": 17.50375939849624,
+ "grad_norm": 0.26399574892263294,
+ "learning_rate": 8.39116747869324e-08,
+ "loss": 0.091,
+ "step": 2328
+ },
+ {
+ "epoch": 17.51127819548872,
+ "grad_norm": 0.3047174365711635,
+ "learning_rate": 8.341408129386629e-08,
+ "loss": 0.0881,
+ "step": 2329
+ },
+ {
+ "epoch": 17.518796992481203,
+ "grad_norm": 0.26639875464469936,
+ "learning_rate": 8.291790332967007e-08,
+ "loss": 0.0859,
+ "step": 2330
+ },
+ {
+ "epoch": 17.526315789473685,
+ "grad_norm": 0.3491920636618503,
+ "learning_rate": 8.242314166061581e-08,
+ "loss": 0.0894,
+ "step": 2331
+ },
+ {
+ "epoch": 17.533834586466167,
+ "grad_norm": 0.279297525323214,
+ "learning_rate": 8.19297970507885e-08,
+ "loss": 0.0898,
+ "step": 2332
+ },
+ {
+ "epoch": 17.541353383458645,
+ "grad_norm": 0.4806614428417323,
+ "learning_rate": 8.143787026208426e-08,
+ "loss": 0.0937,
+ "step": 2333
+ },
+ {
+ "epoch": 17.548872180451127,
+ "grad_norm": 0.31350163842584655,
+ "learning_rate": 8.094736205421026e-08,
+ "loss": 0.0861,
+ "step": 2334
+ },
+ {
+ "epoch": 17.55639097744361,
+ "grad_norm": 0.2729729783860566,
+ "learning_rate": 8.045827318468224e-08,
+ "loss": 0.0929,
+ "step": 2335
+ },
+ {
+ "epoch": 17.56390977443609,
+ "grad_norm": 0.283321411878572,
+ "learning_rate": 7.997060440882453e-08,
+ "loss": 0.0939,
+ "step": 2336
+ },
+ {
+ "epoch": 17.571428571428573,
+ "grad_norm": 0.3298353058713564,
+ "learning_rate": 7.94843564797678e-08,
+ "loss": 0.0899,
+ "step": 2337
+ },
+ {
+ "epoch": 17.57894736842105,
+ "grad_norm": 0.2679704451072854,
+ "learning_rate": 7.899953014844918e-08,
+ "loss": 0.0884,
+ "step": 2338
+ },
+ {
+ "epoch": 17.586466165413533,
+ "grad_norm": 0.27726908193127664,
+ "learning_rate": 7.851612616360937e-08,
+ "loss": 0.0832,
+ "step": 2339
+ },
+ {
+ "epoch": 17.593984962406015,
+ "grad_norm": 0.3702450510440619,
+ "learning_rate": 7.803414527179342e-08,
+ "loss": 0.0911,
+ "step": 2340
+ },
+ {
+ "epoch": 17.601503759398497,
+ "grad_norm": 0.292378488897541,
+ "learning_rate": 7.755358821734782e-08,
+ "loss": 0.0909,
+ "step": 2341
+ },
+ {
+ "epoch": 17.60902255639098,
+ "grad_norm": 0.28906469802597806,
+ "learning_rate": 7.707445574242099e-08,
+ "loss": 0.0909,
+ "step": 2342
+ },
+ {
+ "epoch": 17.616541353383457,
+ "grad_norm": 0.27054194865979425,
+ "learning_rate": 7.659674858696041e-08,
+ "loss": 0.0941,
+ "step": 2343
+ },
+ {
+ "epoch": 17.62406015037594,
+ "grad_norm": 0.29192175176273094,
+ "learning_rate": 7.612046748871326e-08,
+ "loss": 0.0872,
+ "step": 2344
+ },
+ {
+ "epoch": 17.63157894736842,
+ "grad_norm": 0.28547117065211064,
+ "learning_rate": 7.564561318322371e-08,
+ "loss": 0.0938,
+ "step": 2345
+ },
+ {
+ "epoch": 17.639097744360903,
+ "grad_norm": 0.28492782730262334,
+ "learning_rate": 7.51721864038326e-08,
+ "loss": 0.0978,
+ "step": 2346
+ },
+ {
+ "epoch": 17.646616541353385,
+ "grad_norm": 0.27202077851680356,
+ "learning_rate": 7.470018788167643e-08,
+ "loss": 0.0869,
+ "step": 2347
+ },
+ {
+ "epoch": 17.654135338345863,
+ "grad_norm": 0.42027829319988025,
+ "learning_rate": 7.422961834568563e-08,
+ "loss": 0.0911,
+ "step": 2348
+ },
+ {
+ "epoch": 17.661654135338345,
+ "grad_norm": 0.26271352248269164,
+ "learning_rate": 7.376047852258426e-08,
+ "loss": 0.0922,
+ "step": 2349
+ },
+ {
+ "epoch": 17.669172932330827,
+ "grad_norm": 0.3388950936007761,
+ "learning_rate": 7.329276913688787e-08,
+ "loss": 0.0932,
+ "step": 2350
+ },
+ {
+ "epoch": 17.67669172932331,
+ "grad_norm": 0.27667524646570085,
+ "learning_rate": 7.282649091090332e-08,
+ "loss": 0.0791,
+ "step": 2351
+ },
+ {
+ "epoch": 17.68421052631579,
+ "grad_norm": 0.26485739703271843,
+ "learning_rate": 7.236164456472671e-08,
+ "loss": 0.0859,
+ "step": 2352
+ },
+ {
+ "epoch": 17.69172932330827,
+ "grad_norm": 0.2656983706036729,
+ "learning_rate": 7.189823081624368e-08,
+ "loss": 0.0875,
+ "step": 2353
+ },
+ {
+ "epoch": 17.69924812030075,
+ "grad_norm": 0.3013054724671779,
+ "learning_rate": 7.143625038112666e-08,
+ "loss": 0.0834,
+ "step": 2354
+ },
+ {
+ "epoch": 17.706766917293233,
+ "grad_norm": 0.3281438615269998,
+ "learning_rate": 7.097570397283492e-08,
+ "loss": 0.0954,
+ "step": 2355
+ },
+ {
+ "epoch": 17.714285714285715,
+ "grad_norm": 0.29022814222973004,
+ "learning_rate": 7.051659230261297e-08,
+ "loss": 0.0966,
+ "step": 2356
+ },
+ {
+ "epoch": 17.721804511278194,
+ "grad_norm": 0.2699134929516921,
+ "learning_rate": 7.005891607948977e-08,
+ "loss": 0.0879,
+ "step": 2357
+ },
+ {
+ "epoch": 17.729323308270676,
+ "grad_norm": 0.2692411851794957,
+ "learning_rate": 6.960267601027691e-08,
+ "loss": 0.094,
+ "step": 2358
+ },
+ {
+ "epoch": 17.736842105263158,
+ "grad_norm": 0.27746134889893775,
+ "learning_rate": 6.914787279956902e-08,
+ "loss": 0.0789,
+ "step": 2359
+ },
+ {
+ "epoch": 17.74436090225564,
+ "grad_norm": 0.27394641558035904,
+ "learning_rate": 6.869450714974057e-08,
+ "loss": 0.081,
+ "step": 2360
+ },
+ {
+ "epoch": 17.75187969924812,
+ "grad_norm": 0.34157598063045325,
+ "learning_rate": 6.824257976094694e-08,
+ "loss": 0.0824,
+ "step": 2361
+ },
+ {
+ "epoch": 17.7593984962406,
+ "grad_norm": 0.3004422961982896,
+ "learning_rate": 6.779209133112163e-08,
+ "loss": 0.0861,
+ "step": 2362
+ },
+ {
+ "epoch": 17.76691729323308,
+ "grad_norm": 0.279515497121471,
+ "learning_rate": 6.734304255597634e-08,
+ "loss": 0.0846,
+ "step": 2363
+ },
+ {
+ "epoch": 17.774436090225564,
+ "grad_norm": 0.28307900561889454,
+ "learning_rate": 6.689543412899911e-08,
+ "loss": 0.0913,
+ "step": 2364
+ },
+ {
+ "epoch": 17.781954887218046,
+ "grad_norm": 0.2649799600301815,
+ "learning_rate": 6.64492667414539e-08,
+ "loss": 0.0917,
+ "step": 2365
+ },
+ {
+ "epoch": 17.789473684210527,
+ "grad_norm": 0.2929995866567021,
+ "learning_rate": 6.600454108237874e-08,
+ "loss": 0.0918,
+ "step": 2366
+ },
+ {
+ "epoch": 17.796992481203006,
+ "grad_norm": 0.2675791525651904,
+ "learning_rate": 6.556125783858568e-08,
+ "loss": 0.087,
+ "step": 2367
+ },
+ {
+ "epoch": 17.804511278195488,
+ "grad_norm": 0.2685161627576881,
+ "learning_rate": 6.511941769465878e-08,
+ "loss": 0.0957,
+ "step": 2368
+ },
+ {
+ "epoch": 17.81203007518797,
+ "grad_norm": 0.31348092039436826,
+ "learning_rate": 6.467902133295366e-08,
+ "loss": 0.085,
+ "step": 2369
+ },
+ {
+ "epoch": 17.81954887218045,
+ "grad_norm": 0.27497905743886725,
+ "learning_rate": 6.424006943359606e-08,
+ "loss": 0.0839,
+ "step": 2370
+ },
+ {
+ "epoch": 17.827067669172934,
+ "grad_norm": 0.2787030979378094,
+ "learning_rate": 6.380256267448114e-08,
+ "loss": 0.0921,
+ "step": 2371
+ },
+ {
+ "epoch": 17.834586466165412,
+ "grad_norm": 0.3163708822754905,
+ "learning_rate": 6.336650173127223e-08,
+ "loss": 0.0914,
+ "step": 2372
+ },
+ {
+ "epoch": 17.842105263157894,
+ "grad_norm": 0.2724145674474142,
+ "learning_rate": 6.293188727739962e-08,
+ "loss": 0.0891,
+ "step": 2373
+ },
+ {
+ "epoch": 17.849624060150376,
+ "grad_norm": 0.276850226453316,
+ "learning_rate": 6.249871998405998e-08,
+ "loss": 0.092,
+ "step": 2374
+ },
+ {
+ "epoch": 17.857142857142858,
+ "grad_norm": 0.4435537654896566,
+ "learning_rate": 6.206700052021474e-08,
+ "loss": 0.08,
+ "step": 2375
+ },
+ {
+ "epoch": 17.86466165413534,
+ "grad_norm": 0.29556423727557424,
+ "learning_rate": 6.163672955258981e-08,
+ "loss": 0.0894,
+ "step": 2376
+ },
+ {
+ "epoch": 17.872180451127818,
+ "grad_norm": 0.2711846534621648,
+ "learning_rate": 6.120790774567375e-08,
+ "loss": 0.081,
+ "step": 2377
+ },
+ {
+ "epoch": 17.8796992481203,
+ "grad_norm": 0.28395890182480643,
+ "learning_rate": 6.078053576171738e-08,
+ "loss": 0.0932,
+ "step": 2378
+ },
+ {
+ "epoch": 17.887218045112782,
+ "grad_norm": 0.4338707665825218,
+ "learning_rate": 6.035461426073219e-08,
+ "loss": 0.0914,
+ "step": 2379
+ },
+ {
+ "epoch": 17.894736842105264,
+ "grad_norm": 0.3141116380825378,
+ "learning_rate": 5.99301439004899e-08,
+ "loss": 0.0826,
+ "step": 2380
+ },
+ {
+ "epoch": 17.902255639097746,
+ "grad_norm": 0.28751647126158086,
+ "learning_rate": 5.9507125336520805e-08,
+ "loss": 0.0788,
+ "step": 2381
+ },
+ {
+ "epoch": 17.909774436090224,
+ "grad_norm": 0.27412180677461184,
+ "learning_rate": 5.908555922211367e-08,
+ "loss": 0.0867,
+ "step": 2382
+ },
+ {
+ "epoch": 17.917293233082706,
+ "grad_norm": 0.266130269529754,
+ "learning_rate": 5.8665446208313486e-08,
+ "loss": 0.0887,
+ "step": 2383
+ },
+ {
+ "epoch": 17.924812030075188,
+ "grad_norm": 0.2736008685784562,
+ "learning_rate": 5.824678694392193e-08,
+ "loss": 0.0839,
+ "step": 2384
+ },
+ {
+ "epoch": 17.93233082706767,
+ "grad_norm": 0.2901551726265573,
+ "learning_rate": 5.782958207549482e-08,
+ "loss": 0.0795,
+ "step": 2385
+ },
+ {
+ "epoch": 17.93984962406015,
+ "grad_norm": 0.27282973795145254,
+ "learning_rate": 5.741383224734253e-08,
+ "loss": 0.0782,
+ "step": 2386
+ },
+ {
+ "epoch": 17.94736842105263,
+ "grad_norm": 0.2822518825060521,
+ "learning_rate": 5.699953810152769e-08,
+ "loss": 0.0921,
+ "step": 2387
+ },
+ {
+ "epoch": 17.954887218045112,
+ "grad_norm": 0.2789554443137904,
+ "learning_rate": 5.6586700277865604e-08,
+ "loss": 0.0951,
+ "step": 2388
+ },
+ {
+ "epoch": 17.962406015037594,
+ "grad_norm": 0.2916191328674473,
+ "learning_rate": 5.617531941392162e-08,
+ "loss": 0.0913,
+ "step": 2389
+ },
+ {
+ "epoch": 17.969924812030076,
+ "grad_norm": 0.29397172477534506,
+ "learning_rate": 5.5765396145011965e-08,
+ "loss": 0.094,
+ "step": 2390
+ },
+ {
+ "epoch": 17.977443609022558,
+ "grad_norm": 0.3158750463899334,
+ "learning_rate": 5.535693110420092e-08,
+ "loss": 0.0939,
+ "step": 2391
+ },
+ {
+ "epoch": 17.984962406015036,
+ "grad_norm": 0.27241197459971345,
+ "learning_rate": 5.494992492230166e-08,
+ "loss": 0.0841,
+ "step": 2392
+ },
+ {
+ "epoch": 17.992481203007518,
+ "grad_norm": 0.2738810124463407,
+ "learning_rate": 5.454437822787361e-08,
+ "loss": 0.0989,
+ "step": 2393
+ },
+ {
+ "epoch": 18.0,
+ "grad_norm": 0.2688594741193054,
+ "learning_rate": 5.414029164722278e-08,
+ "loss": 0.0885,
+ "step": 2394
+ },
+ {
+ "epoch": 18.0,
+ "eval_loss": 0.3576342761516571,
+ "eval_runtime": 35.9706,
+ "eval_samples_per_second": 12.427,
+ "eval_steps_per_second": 0.195,
+ "step": 2394
+ },
+ {
+ "epoch": 18.007518796992482,
+ "grad_norm": 0.26023162764793945,
+ "learning_rate": 5.373766580439976e-08,
+ "loss": 0.0868,
+ "step": 2395
+ },
+ {
+ "epoch": 18.015037593984964,
+ "grad_norm": 0.2582988360973098,
+ "learning_rate": 5.333650132119971e-08,
+ "loss": 0.0815,
+ "step": 2396
+ },
+ {
+ "epoch": 18.022556390977442,
+ "grad_norm": 0.2584915872352455,
+ "learning_rate": 5.293679881716051e-08,
+ "loss": 0.0851,
+ "step": 2397
+ },
+ {
+ "epoch": 18.030075187969924,
+ "grad_norm": 0.2679499585988047,
+ "learning_rate": 5.2538558909562716e-08,
+ "loss": 0.0839,
+ "step": 2398
+ },
+ {
+ "epoch": 18.037593984962406,
+ "grad_norm": 0.2617203985913774,
+ "learning_rate": 5.21417822134278e-08,
+ "loss": 0.0883,
+ "step": 2399
+ },
+ {
+ "epoch": 18.045112781954888,
+ "grad_norm": 0.25985780406914627,
+ "learning_rate": 5.1746469341517497e-08,
+ "loss": 0.0868,
+ "step": 2400
+ },
+ {
+ "epoch": 18.05263157894737,
+ "grad_norm": 0.3178912198978709,
+ "learning_rate": 5.135262090433323e-08,
+ "loss": 0.0946,
+ "step": 2401
+ },
+ {
+ "epoch": 18.06015037593985,
+ "grad_norm": 0.2791624976111889,
+ "learning_rate": 5.096023751011413e-08,
+ "loss": 0.0893,
+ "step": 2402
+ },
+ {
+ "epoch": 18.06766917293233,
+ "grad_norm": 0.26855346886620696,
+ "learning_rate": 5.05693197648378e-08,
+ "loss": 0.0861,
+ "step": 2403
+ },
+ {
+ "epoch": 18.075187969924812,
+ "grad_norm": 0.26485351088697195,
+ "learning_rate": 5.017986827221732e-08,
+ "loss": 0.0933,
+ "step": 2404
+ },
+ {
+ "epoch": 18.082706766917294,
+ "grad_norm": 0.2623126490580351,
+ "learning_rate": 4.979188363370213e-08,
+ "loss": 0.0833,
+ "step": 2405
+ },
+ {
+ "epoch": 18.090225563909776,
+ "grad_norm": 0.2942988639163744,
+ "learning_rate": 4.940536644847593e-08,
+ "loss": 0.085,
+ "step": 2406
+ },
+ {
+ "epoch": 18.097744360902254,
+ "grad_norm": 0.2849947103514226,
+ "learning_rate": 4.9020317313456463e-08,
+ "loss": 0.0787,
+ "step": 2407
+ },
+ {
+ "epoch": 18.105263157894736,
+ "grad_norm": 0.2714017943006747,
+ "learning_rate": 4.863673682329372e-08,
+ "loss": 0.0999,
+ "step": 2408
+ },
+ {
+ "epoch": 18.112781954887218,
+ "grad_norm": 0.28332131438218283,
+ "learning_rate": 4.825462557037052e-08,
+ "loss": 0.0814,
+ "step": 2409
+ },
+ {
+ "epoch": 18.1203007518797,
+ "grad_norm": 0.32504745070119123,
+ "learning_rate": 4.78739841447996e-08,
+ "loss": 0.1012,
+ "step": 2410
+ },
+ {
+ "epoch": 18.127819548872182,
+ "grad_norm": 0.26463705104858865,
+ "learning_rate": 4.749481313442483e-08,
+ "loss": 0.0952,
+ "step": 2411
+ },
+ {
+ "epoch": 18.13533834586466,
+ "grad_norm": 0.2619084800602278,
+ "learning_rate": 4.7117113124818144e-08,
+ "loss": 0.0867,
+ "step": 2412
+ },
+ {
+ "epoch": 18.142857142857142,
+ "grad_norm": 0.27082943225619055,
+ "learning_rate": 4.674088469928084e-08,
+ "loss": 0.0804,
+ "step": 2413
+ },
+ {
+ "epoch": 18.150375939849624,
+ "grad_norm": 0.25498350301298167,
+ "learning_rate": 4.636612843884058e-08,
+ "loss": 0.0817,
+ "step": 2414
+ },
+ {
+ "epoch": 18.157894736842106,
+ "grad_norm": 0.26001768122442404,
+ "learning_rate": 4.59928449222523e-08,
+ "loss": 0.0864,
+ "step": 2415
+ },
+ {
+ "epoch": 18.165413533834588,
+ "grad_norm": 0.2589847519075539,
+ "learning_rate": 4.562103472599599e-08,
+ "loss": 0.0875,
+ "step": 2416
+ },
+ {
+ "epoch": 18.172932330827066,
+ "grad_norm": 0.28595276061833397,
+ "learning_rate": 4.5250698424276536e-08,
+ "loss": 0.0886,
+ "step": 2417
+ },
+ {
+ "epoch": 18.18045112781955,
+ "grad_norm": 0.2720177937430171,
+ "learning_rate": 4.488183658902256e-08,
+ "loss": 0.0889,
+ "step": 2418
+ },
+ {
+ "epoch": 18.18796992481203,
+ "grad_norm": 0.2642758373560932,
+ "learning_rate": 4.451444978988561e-08,
+ "loss": 0.09,
+ "step": 2419
+ },
+ {
+ "epoch": 18.195488721804512,
+ "grad_norm": 0.26495797348069056,
+ "learning_rate": 4.414853859423917e-08,
+ "loss": 0.0881,
+ "step": 2420
+ },
+ {
+ "epoch": 18.203007518796994,
+ "grad_norm": 0.2809913441862084,
+ "learning_rate": 4.37841035671781e-08,
+ "loss": 0.0901,
+ "step": 2421
+ },
+ {
+ "epoch": 18.210526315789473,
+ "grad_norm": 0.25331597633682995,
+ "learning_rate": 4.342114527151719e-08,
+ "loss": 0.0853,
+ "step": 2422
+ },
+ {
+ "epoch": 18.218045112781954,
+ "grad_norm": 0.3565513068140586,
+ "learning_rate": 4.3059664267791175e-08,
+ "loss": 0.0903,
+ "step": 2423
+ },
+ {
+ "epoch": 18.225563909774436,
+ "grad_norm": 0.31287371531745417,
+ "learning_rate": 4.2699661114252714e-08,
+ "loss": 0.0855,
+ "step": 2424
+ },
+ {
+ "epoch": 18.23308270676692,
+ "grad_norm": 0.2954205526821148,
+ "learning_rate": 4.234113636687242e-08,
+ "loss": 0.0903,
+ "step": 2425
+ },
+ {
+ "epoch": 18.2406015037594,
+ "grad_norm": 0.257064475965973,
+ "learning_rate": 4.198409057933805e-08,
+ "loss": 0.0834,
+ "step": 2426
+ },
+ {
+ "epoch": 18.24812030075188,
+ "grad_norm": 0.2681305776486809,
+ "learning_rate": 4.162852430305275e-08,
+ "loss": 0.0925,
+ "step": 2427
+ },
+ {
+ "epoch": 18.25563909774436,
+ "grad_norm": 0.27395418540519306,
+ "learning_rate": 4.127443808713527e-08,
+ "loss": 0.0832,
+ "step": 2428
+ },
+ {
+ "epoch": 18.263157894736842,
+ "grad_norm": 0.27006724647588815,
+ "learning_rate": 4.09218324784183e-08,
+ "loss": 0.0989,
+ "step": 2429
+ },
+ {
+ "epoch": 18.270676691729324,
+ "grad_norm": 0.2800223113969112,
+ "learning_rate": 4.057070802144813e-08,
+ "loss": 0.0943,
+ "step": 2430
+ },
+ {
+ "epoch": 18.278195488721803,
+ "grad_norm": 0.3404142141217579,
+ "learning_rate": 4.022106525848346e-08,
+ "loss": 0.0787,
+ "step": 2431
+ },
+ {
+ "epoch": 18.285714285714285,
+ "grad_norm": 0.3066332274695883,
+ "learning_rate": 3.9872904729495113e-08,
+ "loss": 0.0894,
+ "step": 2432
+ },
+ {
+ "epoch": 18.293233082706767,
+ "grad_norm": 0.27075519320044117,
+ "learning_rate": 3.9526226972164455e-08,
+ "loss": 0.0754,
+ "step": 2433
+ },
+ {
+ "epoch": 18.30075187969925,
+ "grad_norm": 0.264141523351325,
+ "learning_rate": 3.918103252188298e-08,
+ "loss": 0.0849,
+ "step": 2434
+ },
+ {
+ "epoch": 18.30827067669173,
+ "grad_norm": 0.26210058011970655,
+ "learning_rate": 3.88373219117516e-08,
+ "loss": 0.0839,
+ "step": 2435
+ },
+ {
+ "epoch": 18.31578947368421,
+ "grad_norm": 0.26623021196767443,
+ "learning_rate": 3.849509567257958e-08,
+ "loss": 0.0885,
+ "step": 2436
+ },
+ {
+ "epoch": 18.32330827067669,
+ "grad_norm": 0.27513008274642137,
+ "learning_rate": 3.815435433288372e-08,
+ "loss": 0.0973,
+ "step": 2437
+ },
+ {
+ "epoch": 18.330827067669173,
+ "grad_norm": 0.33417491742155875,
+ "learning_rate": 3.7815098418887746e-08,
+ "loss": 0.0894,
+ "step": 2438
+ },
+ {
+ "epoch": 18.338345864661655,
+ "grad_norm": 0.2672776751046192,
+ "learning_rate": 3.747732845452134e-08,
+ "loss": 0.0922,
+ "step": 2439
+ },
+ {
+ "epoch": 18.345864661654137,
+ "grad_norm": 0.27572371518751027,
+ "learning_rate": 3.714104496141923e-08,
+ "loss": 0.0907,
+ "step": 2440
+ },
+ {
+ "epoch": 18.353383458646615,
+ "grad_norm": 0.3037016576935569,
+ "learning_rate": 3.680624845892066e-08,
+ "loss": 0.094,
+ "step": 2441
+ },
+ {
+ "epoch": 18.360902255639097,
+ "grad_norm": 0.26904740489603746,
+ "learning_rate": 3.647293946406849e-08,
+ "loss": 0.0883,
+ "step": 2442
+ },
+ {
+ "epoch": 18.36842105263158,
+ "grad_norm": 0.25243045864027897,
+ "learning_rate": 3.614111849160795e-08,
+ "loss": 0.0813,
+ "step": 2443
+ },
+ {
+ "epoch": 18.37593984962406,
+ "grad_norm": 0.2624197059918996,
+ "learning_rate": 3.581078605398702e-08,
+ "loss": 0.0928,
+ "step": 2444
+ },
+ {
+ "epoch": 18.383458646616543,
+ "grad_norm": 0.2536967720707937,
+ "learning_rate": 3.548194266135385e-08,
+ "loss": 0.0893,
+ "step": 2445
+ },
+ {
+ "epoch": 18.39097744360902,
+ "grad_norm": 0.27146039209029654,
+ "learning_rate": 3.5154588821557975e-08,
+ "loss": 0.0844,
+ "step": 2446
+ },
+ {
+ "epoch": 18.398496240601503,
+ "grad_norm": 0.34690309156200805,
+ "learning_rate": 3.4828725040147776e-08,
+ "loss": 0.0903,
+ "step": 2447
+ },
+ {
+ "epoch": 18.406015037593985,
+ "grad_norm": 0.25748201890831185,
+ "learning_rate": 3.4504351820371035e-08,
+ "loss": 0.0938,
+ "step": 2448
+ },
+ {
+ "epoch": 18.413533834586467,
+ "grad_norm": 0.319607485490621,
+ "learning_rate": 3.418146966317303e-08,
+ "loss": 0.0912,
+ "step": 2449
+ },
+ {
+ "epoch": 18.42105263157895,
+ "grad_norm": 0.2634471835012121,
+ "learning_rate": 3.38600790671969e-08,
+ "loss": 0.0803,
+ "step": 2450
+ },
+ {
+ "epoch": 18.428571428571427,
+ "grad_norm": 0.2706876174054403,
+ "learning_rate": 3.354018052878182e-08,
+ "loss": 0.0804,
+ "step": 2451
+ },
+ {
+ "epoch": 18.43609022556391,
+ "grad_norm": 0.31281179467657033,
+ "learning_rate": 3.3221774541962847e-08,
+ "loss": 0.0872,
+ "step": 2452
+ },
+ {
+ "epoch": 18.44360902255639,
+ "grad_norm": 0.2611337370128115,
+ "learning_rate": 3.2904861598470276e-08,
+ "loss": 0.0897,
+ "step": 2453
+ },
+ {
+ "epoch": 18.451127819548873,
+ "grad_norm": 0.270028652393061,
+ "learning_rate": 3.258944218772819e-08,
+ "loss": 0.0962,
+ "step": 2454
+ },
+ {
+ "epoch": 18.458646616541355,
+ "grad_norm": 0.2638434279400047,
+ "learning_rate": 3.2275516796854585e-08,
+ "loss": 0.0808,
+ "step": 2455
+ },
+ {
+ "epoch": 18.466165413533833,
+ "grad_norm": 0.27492073438431225,
+ "learning_rate": 3.196308591065966e-08,
+ "loss": 0.0906,
+ "step": 2456
+ },
+ {
+ "epoch": 18.473684210526315,
+ "grad_norm": 0.26010698777653213,
+ "learning_rate": 3.165215001164601e-08,
+ "loss": 0.0907,
+ "step": 2457
+ },
+ {
+ "epoch": 18.481203007518797,
+ "grad_norm": 0.2640449478560818,
+ "learning_rate": 3.1342709580007175e-08,
+ "loss": 0.0866,
+ "step": 2458
+ },
+ {
+ "epoch": 18.48872180451128,
+ "grad_norm": 0.28539716784654057,
+ "learning_rate": 3.103476509362757e-08,
+ "loss": 0.0853,
+ "step": 2459
+ },
+ {
+ "epoch": 18.49624060150376,
+ "grad_norm": 0.2905641639254359,
+ "learning_rate": 3.072831702808065e-08,
+ "loss": 0.0883,
+ "step": 2460
+ },
+ {
+ "epoch": 18.50375939849624,
+ "grad_norm": 0.2775316215341824,
+ "learning_rate": 3.0423365856629746e-08,
+ "loss": 0.0887,
+ "step": 2461
+ },
+ {
+ "epoch": 18.51127819548872,
+ "grad_norm": 0.28202855673301763,
+ "learning_rate": 3.011991205022557e-08,
+ "loss": 0.0867,
+ "step": 2462
+ },
+ {
+ "epoch": 18.518796992481203,
+ "grad_norm": 0.27421518327858946,
+ "learning_rate": 2.981795607750704e-08,
+ "loss": 0.0873,
+ "step": 2463
+ },
+ {
+ "epoch": 18.526315789473685,
+ "grad_norm": 0.2682810183442042,
+ "learning_rate": 2.9517498404799668e-08,
+ "loss": 0.09,
+ "step": 2464
+ },
+ {
+ "epoch": 18.533834586466167,
+ "grad_norm": 0.2569847050784203,
+ "learning_rate": 2.921853949611508e-08,
+ "loss": 0.0827,
+ "step": 2465
+ },
+ {
+ "epoch": 18.541353383458645,
+ "grad_norm": 0.25901328464701945,
+ "learning_rate": 2.892107981315006e-08,
+ "loss": 0.0907,
+ "step": 2466
+ },
+ {
+ "epoch": 18.548872180451127,
+ "grad_norm": 0.26336892007006657,
+ "learning_rate": 2.862511981528659e-08,
+ "loss": 0.0953,
+ "step": 2467
+ },
+ {
+ "epoch": 18.55639097744361,
+ "grad_norm": 0.25922291148690846,
+ "learning_rate": 2.8330659959589942e-08,
+ "loss": 0.0863,
+ "step": 2468
+ },
+ {
+ "epoch": 18.56390977443609,
+ "grad_norm": 0.25368488024878916,
+ "learning_rate": 2.8037700700809464e-08,
+ "loss": 0.0898,
+ "step": 2469
+ },
+ {
+ "epoch": 18.571428571428573,
+ "grad_norm": 0.2690385932457634,
+ "learning_rate": 2.7746242491376138e-08,
+ "loss": 0.0943,
+ "step": 2470
+ },
+ {
+ "epoch": 18.57894736842105,
+ "grad_norm": 0.45119316760229716,
+ "learning_rate": 2.7456285781403577e-08,
+ "loss": 0.0908,
+ "step": 2471
+ },
+ {
+ "epoch": 18.586466165413533,
+ "grad_norm": 0.26454950768754626,
+ "learning_rate": 2.7167831018686137e-08,
+ "loss": 0.0841,
+ "step": 2472
+ },
+ {
+ "epoch": 18.593984962406015,
+ "grad_norm": 0.2691856837806106,
+ "learning_rate": 2.6880878648698702e-08,
+ "loss": 0.0758,
+ "step": 2473
+ },
+ {
+ "epoch": 18.601503759398497,
+ "grad_norm": 0.2671923673555449,
+ "learning_rate": 2.659542911459589e-08,
+ "loss": 0.0802,
+ "step": 2474
+ },
+ {
+ "epoch": 18.60902255639098,
+ "grad_norm": 0.26880563007032415,
+ "learning_rate": 2.6311482857211853e-08,
+ "loss": 0.081,
+ "step": 2475
+ },
+ {
+ "epoch": 18.616541353383457,
+ "grad_norm": 0.2736395332334841,
+ "learning_rate": 2.602904031505848e-08,
+ "loss": 0.091,
+ "step": 2476
+ },
+ {
+ "epoch": 18.62406015037594,
+ "grad_norm": 0.28684469639578947,
+ "learning_rate": 2.574810192432575e-08,
+ "loss": 0.09,
+ "step": 2477
+ },
+ {
+ "epoch": 18.63157894736842,
+ "grad_norm": 0.27015587155255316,
+ "learning_rate": 2.5468668118880933e-08,
+ "loss": 0.0913,
+ "step": 2478
+ },
+ {
+ "epoch": 18.639097744360903,
+ "grad_norm": 0.26823861543122496,
+ "learning_rate": 2.5190739330267053e-08,
+ "loss": 0.0957,
+ "step": 2479
+ },
+ {
+ "epoch": 18.646616541353385,
+ "grad_norm": 0.29285419290748516,
+ "learning_rate": 2.491431598770366e-08,
+ "loss": 0.0807,
+ "step": 2480
+ },
+ {
+ "epoch": 18.654135338345863,
+ "grad_norm": 0.3329315265350039,
+ "learning_rate": 2.463939851808472e-08,
+ "loss": 0.0796,
+ "step": 2481
+ },
+ {
+ "epoch": 18.661654135338345,
+ "grad_norm": 0.26844763478663275,
+ "learning_rate": 2.4365987345978946e-08,
+ "loss": 0.0814,
+ "step": 2482
+ },
+ {
+ "epoch": 18.669172932330827,
+ "grad_norm": 0.2631573093471393,
+ "learning_rate": 2.4094082893628574e-08,
+ "loss": 0.0832,
+ "step": 2483
+ },
+ {
+ "epoch": 18.67669172932331,
+ "grad_norm": 0.2610397012455284,
+ "learning_rate": 2.382368558094927e-08,
+ "loss": 0.0884,
+ "step": 2484
+ },
+ {
+ "epoch": 18.68421052631579,
+ "grad_norm": 0.2679164878876356,
+ "learning_rate": 2.355479582552877e-08,
+ "loss": 0.0845,
+ "step": 2485
+ },
+ {
+ "epoch": 18.69172932330827,
+ "grad_norm": 0.26780291081470636,
+ "learning_rate": 2.3287414042626908e-08,
+ "loss": 0.0895,
+ "step": 2486
+ },
+ {
+ "epoch": 18.69924812030075,
+ "grad_norm": 0.26944520807100264,
+ "learning_rate": 2.3021540645174476e-08,
+ "loss": 0.0772,
+ "step": 2487
+ },
+ {
+ "epoch": 18.706766917293233,
+ "grad_norm": 0.2949905747111546,
+ "learning_rate": 2.275717604377292e-08,
+ "loss": 0.093,
+ "step": 2488
+ },
+ {
+ "epoch": 18.714285714285715,
+ "grad_norm": 0.3982211379013551,
+ "learning_rate": 2.2494320646693544e-08,
+ "loss": 0.0887,
+ "step": 2489
+ },
+ {
+ "epoch": 18.721804511278194,
+ "grad_norm": 0.2613325232845665,
+ "learning_rate": 2.2232974859877073e-08,
+ "loss": 0.0824,
+ "step": 2490
+ },
+ {
+ "epoch": 18.729323308270676,
+ "grad_norm": 0.2739275301912728,
+ "learning_rate": 2.1973139086932436e-08,
+ "loss": 0.0878,
+ "step": 2491
+ },
+ {
+ "epoch": 18.736842105263158,
+ "grad_norm": 0.26660886629169694,
+ "learning_rate": 2.1714813729136972e-08,
+ "loss": 0.0824,
+ "step": 2492
+ },
+ {
+ "epoch": 18.74436090225564,
+ "grad_norm": 0.2819558758387217,
+ "learning_rate": 2.1457999185435228e-08,
+ "loss": 0.0961,
+ "step": 2493
+ },
+ {
+ "epoch": 18.75187969924812,
+ "grad_norm": 0.296017248130513,
+ "learning_rate": 2.1202695852438725e-08,
+ "loss": 0.0719,
+ "step": 2494
+ },
+ {
+ "epoch": 18.7593984962406,
+ "grad_norm": 0.2772703830658941,
+ "learning_rate": 2.0948904124424736e-08,
+ "loss": 0.0831,
+ "step": 2495
+ },
+ {
+ "epoch": 18.76691729323308,
+ "grad_norm": 1.0780730775595422,
+ "learning_rate": 2.0696624393336636e-08,
+ "loss": 0.0813,
+ "step": 2496
+ },
+ {
+ "epoch": 18.774436090225564,
+ "grad_norm": 0.43728164383808726,
+ "learning_rate": 2.044585704878221e-08,
+ "loss": 0.0846,
+ "step": 2497
+ },
+ {
+ "epoch": 18.781954887218046,
+ "grad_norm": 0.2630662696520097,
+ "learning_rate": 2.019660247803401e-08,
+ "loss": 0.0801,
+ "step": 2498
+ },
+ {
+ "epoch": 18.789473684210527,
+ "grad_norm": 0.2834731581992547,
+ "learning_rate": 1.9948861066028112e-08,
+ "loss": 0.0861,
+ "step": 2499
+ },
+ {
+ "epoch": 18.796992481203006,
+ "grad_norm": 0.26820420461747774,
+ "learning_rate": 1.9702633195363917e-08,
+ "loss": 0.085,
+ "step": 2500
+ },
+ {
+ "epoch": 18.804511278195488,
+ "grad_norm": 0.27480584871724606,
+ "learning_rate": 1.9457919246303134e-08,
+ "loss": 0.1005,
+ "step": 2501
+ },
+ {
+ "epoch": 18.81203007518797,
+ "grad_norm": 0.2687974770326145,
+ "learning_rate": 1.921471959676957e-08,
+ "loss": 0.0913,
+ "step": 2502
+ },
+ {
+ "epoch": 18.81954887218045,
+ "grad_norm": 0.2716969395984612,
+ "learning_rate": 1.897303462234856e-08,
+ "loss": 0.0884,
+ "step": 2503
+ },
+ {
+ "epoch": 18.827067669172934,
+ "grad_norm": 0.3346896517917557,
+ "learning_rate": 1.87328646962861e-08,
+ "loss": 0.0867,
+ "step": 2504
+ },
+ {
+ "epoch": 18.834586466165412,
+ "grad_norm": 0.2648697548811433,
+ "learning_rate": 1.849421018948849e-08,
+ "loss": 0.0865,
+ "step": 2505
+ },
+ {
+ "epoch": 18.842105263157894,
+ "grad_norm": 0.2831795721248979,
+ "learning_rate": 1.8257071470521467e-08,
+ "loss": 0.0941,
+ "step": 2506
+ },
+ {
+ "epoch": 18.849624060150376,
+ "grad_norm": 0.29677110485370745,
+ "learning_rate": 1.8021448905610414e-08,
+ "loss": 0.0905,
+ "step": 2507
+ },
+ {
+ "epoch": 18.857142857142858,
+ "grad_norm": 0.2740532294665459,
+ "learning_rate": 1.7787342858638588e-08,
+ "loss": 0.0982,
+ "step": 2508
+ },
+ {
+ "epoch": 18.86466165413534,
+ "grad_norm": 0.2745344881665879,
+ "learning_rate": 1.7554753691147672e-08,
+ "loss": 0.0966,
+ "step": 2509
+ },
+ {
+ "epoch": 18.872180451127818,
+ "grad_norm": 0.29226071085281663,
+ "learning_rate": 1.732368176233645e-08,
+ "loss": 0.0835,
+ "step": 2510
+ },
+ {
+ "epoch": 18.8796992481203,
+ "grad_norm": 0.2699338487934352,
+ "learning_rate": 1.709412742906091e-08,
+ "loss": 0.0819,
+ "step": 2511
+ },
+ {
+ "epoch": 18.887218045112782,
+ "grad_norm": 0.26730788303842723,
+ "learning_rate": 1.686609104583292e-08,
+ "loss": 0.079,
+ "step": 2512
+ },
+ {
+ "epoch": 18.894736842105264,
+ "grad_norm": 0.26208673023520646,
+ "learning_rate": 1.6639572964820437e-08,
+ "loss": 0.0801,
+ "step": 2513
+ },
+ {
+ "epoch": 18.902255639097746,
+ "grad_norm": 0.2792736309043197,
+ "learning_rate": 1.641457353584652e-08,
+ "loss": 0.0903,
+ "step": 2514
+ },
+ {
+ "epoch": 18.909774436090224,
+ "grad_norm": 0.27436941595369213,
+ "learning_rate": 1.6191093106388886e-08,
+ "loss": 0.0777,
+ "step": 2515
+ },
+ {
+ "epoch": 18.917293233082706,
+ "grad_norm": 0.2796538766973517,
+ "learning_rate": 1.5969132021579347e-08,
+ "loss": 0.0953,
+ "step": 2516
+ },
+ {
+ "epoch": 18.924812030075188,
+ "grad_norm": 0.5029253714441433,
+ "learning_rate": 1.5748690624203366e-08,
+ "loss": 0.0911,
+ "step": 2517
+ },
+ {
+ "epoch": 18.93233082706767,
+ "grad_norm": 0.27055846172848647,
+ "learning_rate": 1.552976925469951e-08,
+ "loss": 0.0904,
+ "step": 2518
+ },
+ {
+ "epoch": 18.93984962406015,
+ "grad_norm": 0.28212718292289835,
+ "learning_rate": 1.531236825115889e-08,
+ "loss": 0.0886,
+ "step": 2519
+ },
+ {
+ "epoch": 18.94736842105263,
+ "grad_norm": 0.26700917859947804,
+ "learning_rate": 1.50964879493245e-08,
+ "loss": 0.0925,
+ "step": 2520
+ },
+ {
+ "epoch": 18.954887218045112,
+ "grad_norm": 0.2899818008134482,
+ "learning_rate": 1.4882128682590978e-08,
+ "loss": 0.0936,
+ "step": 2521
+ },
+ {
+ "epoch": 18.962406015037594,
+ "grad_norm": 0.26568105942912534,
+ "learning_rate": 1.4669290782003962e-08,
+ "loss": 0.0822,
+ "step": 2522
+ },
+ {
+ "epoch": 18.969924812030076,
+ "grad_norm": 0.27090972451936063,
+ "learning_rate": 1.4457974576259524e-08,
+ "loss": 0.0904,
+ "step": 2523
+ },
+ {
+ "epoch": 18.977443609022558,
+ "grad_norm": 0.27377680340425975,
+ "learning_rate": 1.4248180391703613e-08,
+ "loss": 0.0945,
+ "step": 2524
+ },
+ {
+ "epoch": 18.984962406015036,
+ "grad_norm": 0.29472112557771496,
+ "learning_rate": 1.4039908552331836e-08,
+ "loss": 0.0894,
+ "step": 2525
+ },
+ {
+ "epoch": 18.992481203007518,
+ "grad_norm": 0.33454218698643456,
+ "learning_rate": 1.3833159379788684e-08,
+ "loss": 0.0939,
+ "step": 2526
+ },
+ {
+ "epoch": 19.0,
+ "grad_norm": 0.26102964107162796,
+ "learning_rate": 1.362793319336708e-08,
+ "loss": 0.0809,
+ "step": 2527
+ },
+ {
+ "epoch": 19.0,
+ "eval_loss": 0.35997581481933594,
+ "eval_runtime": 36.0264,
+ "eval_samples_per_second": 12.408,
+ "eval_steps_per_second": 0.194,
+ "step": 2527
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 2660,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 20,
+ "save_steps": 133,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 8897060187144192.0,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/training_args.bin b/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..104c879b6c23187df427e2c93c9b596821dbb8a5
--- /dev/null
+++ b/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:edcb45974df536269474e40ea9eb315dc4f5486f1cb316c1355fb208429176ca
+size 10616
diff --git a/zero_to_fp32.py b/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04
--- /dev/null
+++ b/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)